xref: /linux/mm/gup.c (revision d9afbb3509900a953f5cf90bc57e793ee80c1108)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/err.h>
5 #include <linux/spinlock.h>
6 
7 #include <linux/mm.h>
8 #include <linux/memremap.h>
9 #include <linux/pagemap.h>
10 #include <linux/rmap.h>
11 #include <linux/swap.h>
12 #include <linux/swapops.h>
13 
14 #include <linux/sched/signal.h>
15 #include <linux/rwsem.h>
16 #include <linux/hugetlb.h>
17 #include <linux/migrate.h>
18 #include <linux/mm_inline.h>
19 #include <linux/sched/mm.h>
20 
21 #include <asm/mmu_context.h>
22 #include <asm/pgtable.h>
23 #include <asm/tlbflush.h>
24 
25 #include "internal.h"
26 
27 struct follow_page_context {
28 	struct dev_pagemap *pgmap;
29 	unsigned int page_mask;
30 };
31 
32 static void hpage_pincount_add(struct page *page, int refs)
33 {
34 	VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
35 	VM_BUG_ON_PAGE(page != compound_head(page), page);
36 
37 	atomic_add(refs, compound_pincount_ptr(page));
38 }
39 
40 static void hpage_pincount_sub(struct page *page, int refs)
41 {
42 	VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
43 	VM_BUG_ON_PAGE(page != compound_head(page), page);
44 
45 	atomic_sub(refs, compound_pincount_ptr(page));
46 }
47 
48 /*
49  * Return the compound head page with ref appropriately incremented,
50  * or NULL if that failed.
51  */
52 static inline struct page *try_get_compound_head(struct page *page, int refs)
53 {
54 	struct page *head = compound_head(page);
55 
56 	if (WARN_ON_ONCE(page_ref_count(head) < 0))
57 		return NULL;
58 	if (unlikely(!page_cache_add_speculative(head, refs)))
59 		return NULL;
60 	return head;
61 }
62 
63 /*
64  * try_grab_compound_head() - attempt to elevate a page's refcount, by a
65  * flags-dependent amount.
66  *
67  * "grab" names in this file mean, "look at flags to decide whether to use
68  * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
69  *
70  * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
71  * same time. (That's true throughout the get_user_pages*() and
72  * pin_user_pages*() APIs.) Cases:
73  *
74  *    FOLL_GET: page's refcount will be incremented by 1.
75  *    FOLL_PIN: page's refcount will be incremented by GUP_PIN_COUNTING_BIAS.
76  *
77  * Return: head page (with refcount appropriately incremented) for success, or
78  * NULL upon failure. If neither FOLL_GET nor FOLL_PIN was set, that's
79  * considered failure, and furthermore, a likely bug in the caller, so a warning
80  * is also emitted.
81  */
82 static __maybe_unused struct page *try_grab_compound_head(struct page *page,
83 							  int refs,
84 							  unsigned int flags)
85 {
86 	if (flags & FOLL_GET)
87 		return try_get_compound_head(page, refs);
88 	else if (flags & FOLL_PIN) {
89 		int orig_refs = refs;
90 
91 		/*
92 		 * Can't do FOLL_LONGTERM + FOLL_PIN with CMA in the gup fast
93 		 * path, so fail and let the caller fall back to the slow path.
94 		 */
95 		if (unlikely(flags & FOLL_LONGTERM) &&
96 				is_migrate_cma_page(page))
97 			return NULL;
98 
99 		/*
100 		 * When pinning a compound page of order > 1 (which is what
101 		 * hpage_pincount_available() checks for), use an exact count to
102 		 * track it, via hpage_pincount_add/_sub().
103 		 *
104 		 * However, be sure to *also* increment the normal page refcount
105 		 * field at least once, so that the page really is pinned.
106 		 */
107 		if (!hpage_pincount_available(page))
108 			refs *= GUP_PIN_COUNTING_BIAS;
109 
110 		page = try_get_compound_head(page, refs);
111 		if (!page)
112 			return NULL;
113 
114 		if (hpage_pincount_available(page))
115 			hpage_pincount_add(page, refs);
116 
117 		mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED,
118 				    orig_refs);
119 
120 		return page;
121 	}
122 
123 	WARN_ON_ONCE(1);
124 	return NULL;
125 }
126 
127 /**
128  * try_grab_page() - elevate a page's refcount by a flag-dependent amount
129  *
130  * This might not do anything at all, depending on the flags argument.
131  *
132  * "grab" names in this file mean, "look at flags to decide whether to use
133  * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
134  *
135  * @page:    pointer to page to be grabbed
136  * @flags:   gup flags: these are the FOLL_* flag values.
137  *
138  * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same
139  * time. Cases:
140  *
141  *    FOLL_GET: page's refcount will be incremented by 1.
142  *    FOLL_PIN: page's refcount will be incremented by GUP_PIN_COUNTING_BIAS.
143  *
144  * Return: true for success, or if no action was required (if neither FOLL_PIN
145  * nor FOLL_GET was set, nothing is done). False for failure: FOLL_GET or
146  * FOLL_PIN was set, but the page could not be grabbed.
147  */
148 bool __must_check try_grab_page(struct page *page, unsigned int flags)
149 {
150 	WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == (FOLL_GET | FOLL_PIN));
151 
152 	if (flags & FOLL_GET)
153 		return try_get_page(page);
154 	else if (flags & FOLL_PIN) {
155 		int refs = 1;
156 
157 		page = compound_head(page);
158 
159 		if (WARN_ON_ONCE(page_ref_count(page) <= 0))
160 			return false;
161 
162 		if (hpage_pincount_available(page))
163 			hpage_pincount_add(page, 1);
164 		else
165 			refs = GUP_PIN_COUNTING_BIAS;
166 
167 		/*
168 		 * Similar to try_grab_compound_head(): even if using the
169 		 * hpage_pincount_add/_sub() routines, be sure to
170 		 * *also* increment the normal page refcount field at least
171 		 * once, so that the page really is pinned.
172 		 */
173 		page_ref_add(page, refs);
174 
175 		mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED, 1);
176 	}
177 
178 	return true;
179 }
180 
181 #ifdef CONFIG_DEV_PAGEMAP_OPS
182 static bool __unpin_devmap_managed_user_page(struct page *page)
183 {
184 	int count, refs = 1;
185 
186 	if (!page_is_devmap_managed(page))
187 		return false;
188 
189 	if (hpage_pincount_available(page))
190 		hpage_pincount_sub(page, 1);
191 	else
192 		refs = GUP_PIN_COUNTING_BIAS;
193 
194 	count = page_ref_sub_return(page, refs);
195 
196 	mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_RELEASED, 1);
197 	/*
198 	 * devmap page refcounts are 1-based, rather than 0-based: if
199 	 * refcount is 1, then the page is free and the refcount is
200 	 * stable because nobody holds a reference on the page.
201 	 */
202 	if (count == 1)
203 		free_devmap_managed_page(page);
204 	else if (!count)
205 		__put_page(page);
206 
207 	return true;
208 }
209 #else
210 static bool __unpin_devmap_managed_user_page(struct page *page)
211 {
212 	return false;
213 }
214 #endif /* CONFIG_DEV_PAGEMAP_OPS */
215 
216 /**
217  * unpin_user_page() - release a dma-pinned page
218  * @page:            pointer to page to be released
219  *
220  * Pages that were pinned via pin_user_pages*() must be released via either
221  * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
222  * that such pages can be separately tracked and uniquely handled. In
223  * particular, interactions with RDMA and filesystems need special handling.
224  */
225 void unpin_user_page(struct page *page)
226 {
227 	int refs = 1;
228 
229 	page = compound_head(page);
230 
231 	/*
232 	 * For devmap managed pages we need to catch refcount transition from
233 	 * GUP_PIN_COUNTING_BIAS to 1, when refcount reach one it means the
234 	 * page is free and we need to inform the device driver through
235 	 * callback. See include/linux/memremap.h and HMM for details.
236 	 */
237 	if (__unpin_devmap_managed_user_page(page))
238 		return;
239 
240 	if (hpage_pincount_available(page))
241 		hpage_pincount_sub(page, 1);
242 	else
243 		refs = GUP_PIN_COUNTING_BIAS;
244 
245 	if (page_ref_sub_and_test(page, refs))
246 		__put_page(page);
247 
248 	mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_RELEASED, 1);
249 }
250 EXPORT_SYMBOL(unpin_user_page);
251 
252 /**
253  * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
254  * @pages:  array of pages to be maybe marked dirty, and definitely released.
255  * @npages: number of pages in the @pages array.
256  * @make_dirty: whether to mark the pages dirty
257  *
258  * "gup-pinned page" refers to a page that has had one of the get_user_pages()
259  * variants called on that page.
260  *
261  * For each page in the @pages array, make that page (or its head page, if a
262  * compound page) dirty, if @make_dirty is true, and if the page was previously
263  * listed as clean. In any case, releases all pages using unpin_user_page(),
264  * possibly via unpin_user_pages(), for the non-dirty case.
265  *
266  * Please see the unpin_user_page() documentation for details.
267  *
268  * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
269  * required, then the caller should a) verify that this is really correct,
270  * because _lock() is usually required, and b) hand code it:
271  * set_page_dirty_lock(), unpin_user_page().
272  *
273  */
274 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
275 				 bool make_dirty)
276 {
277 	unsigned long index;
278 
279 	/*
280 	 * TODO: this can be optimized for huge pages: if a series of pages is
281 	 * physically contiguous and part of the same compound page, then a
282 	 * single operation to the head page should suffice.
283 	 */
284 
285 	if (!make_dirty) {
286 		unpin_user_pages(pages, npages);
287 		return;
288 	}
289 
290 	for (index = 0; index < npages; index++) {
291 		struct page *page = compound_head(pages[index]);
292 		/*
293 		 * Checking PageDirty at this point may race with
294 		 * clear_page_dirty_for_io(), but that's OK. Two key
295 		 * cases:
296 		 *
297 		 * 1) This code sees the page as already dirty, so it
298 		 * skips the call to set_page_dirty(). That could happen
299 		 * because clear_page_dirty_for_io() called
300 		 * page_mkclean(), followed by set_page_dirty().
301 		 * However, now the page is going to get written back,
302 		 * which meets the original intention of setting it
303 		 * dirty, so all is well: clear_page_dirty_for_io() goes
304 		 * on to call TestClearPageDirty(), and write the page
305 		 * back.
306 		 *
307 		 * 2) This code sees the page as clean, so it calls
308 		 * set_page_dirty(). The page stays dirty, despite being
309 		 * written back, so it gets written back again in the
310 		 * next writeback cycle. This is harmless.
311 		 */
312 		if (!PageDirty(page))
313 			set_page_dirty_lock(page);
314 		unpin_user_page(page);
315 	}
316 }
317 EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
318 
319 /**
320  * unpin_user_pages() - release an array of gup-pinned pages.
321  * @pages:  array of pages to be marked dirty and released.
322  * @npages: number of pages in the @pages array.
323  *
324  * For each page in the @pages array, release the page using unpin_user_page().
325  *
326  * Please see the unpin_user_page() documentation for details.
327  */
328 void unpin_user_pages(struct page **pages, unsigned long npages)
329 {
330 	unsigned long index;
331 
332 	/*
333 	 * TODO: this can be optimized for huge pages: if a series of pages is
334 	 * physically contiguous and part of the same compound page, then a
335 	 * single operation to the head page should suffice.
336 	 */
337 	for (index = 0; index < npages; index++)
338 		unpin_user_page(pages[index]);
339 }
340 EXPORT_SYMBOL(unpin_user_pages);
341 
342 #ifdef CONFIG_MMU
343 static struct page *no_page_table(struct vm_area_struct *vma,
344 		unsigned int flags)
345 {
346 	/*
347 	 * When core dumping an enormous anonymous area that nobody
348 	 * has touched so far, we don't want to allocate unnecessary pages or
349 	 * page tables.  Return error instead of NULL to skip handle_mm_fault,
350 	 * then get_dump_page() will return NULL to leave a hole in the dump.
351 	 * But we can only make this optimization where a hole would surely
352 	 * be zero-filled if handle_mm_fault() actually did handle it.
353 	 */
354 	if ((flags & FOLL_DUMP) &&
355 			(vma_is_anonymous(vma) || !vma->vm_ops->fault))
356 		return ERR_PTR(-EFAULT);
357 	return NULL;
358 }
359 
360 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
361 		pte_t *pte, unsigned int flags)
362 {
363 	/* No page to get reference */
364 	if (flags & FOLL_GET)
365 		return -EFAULT;
366 
367 	if (flags & FOLL_TOUCH) {
368 		pte_t entry = *pte;
369 
370 		if (flags & FOLL_WRITE)
371 			entry = pte_mkdirty(entry);
372 		entry = pte_mkyoung(entry);
373 
374 		if (!pte_same(*pte, entry)) {
375 			set_pte_at(vma->vm_mm, address, pte, entry);
376 			update_mmu_cache(vma, address, pte);
377 		}
378 	}
379 
380 	/* Proper page table entry exists, but no corresponding struct page */
381 	return -EEXIST;
382 }
383 
384 /*
385  * FOLL_FORCE or a forced COW break can write even to unwritable pte's,
386  * but only after we've gone through a COW cycle and they are dirty.
387  */
388 static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
389 {
390 	return pte_write(pte) || ((flags & FOLL_COW) && pte_dirty(pte));
391 }
392 
393 /*
394  * A (separate) COW fault might break the page the other way and
395  * get_user_pages() would return the page from what is now the wrong
396  * VM. So we need to force a COW break at GUP time even for reads.
397  */
398 static inline bool should_force_cow_break(struct vm_area_struct *vma, unsigned int flags)
399 {
400 	return is_cow_mapping(vma->vm_flags) && (flags & (FOLL_GET | FOLL_PIN));
401 }
402 
403 static struct page *follow_page_pte(struct vm_area_struct *vma,
404 		unsigned long address, pmd_t *pmd, unsigned int flags,
405 		struct dev_pagemap **pgmap)
406 {
407 	struct mm_struct *mm = vma->vm_mm;
408 	struct page *page;
409 	spinlock_t *ptl;
410 	pte_t *ptep, pte;
411 	int ret;
412 
413 	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
414 	if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
415 			 (FOLL_PIN | FOLL_GET)))
416 		return ERR_PTR(-EINVAL);
417 retry:
418 	if (unlikely(pmd_bad(*pmd)))
419 		return no_page_table(vma, flags);
420 
421 	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
422 	pte = *ptep;
423 	if (!pte_present(pte)) {
424 		swp_entry_t entry;
425 		/*
426 		 * KSM's break_ksm() relies upon recognizing a ksm page
427 		 * even while it is being migrated, so for that case we
428 		 * need migration_entry_wait().
429 		 */
430 		if (likely(!(flags & FOLL_MIGRATION)))
431 			goto no_page;
432 		if (pte_none(pte))
433 			goto no_page;
434 		entry = pte_to_swp_entry(pte);
435 		if (!is_migration_entry(entry))
436 			goto no_page;
437 		pte_unmap_unlock(ptep, ptl);
438 		migration_entry_wait(mm, pmd, address);
439 		goto retry;
440 	}
441 	if ((flags & FOLL_NUMA) && pte_protnone(pte))
442 		goto no_page;
443 	if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
444 		pte_unmap_unlock(ptep, ptl);
445 		return NULL;
446 	}
447 
448 	page = vm_normal_page(vma, address, pte);
449 	if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) {
450 		/*
451 		 * Only return device mapping pages in the FOLL_GET or FOLL_PIN
452 		 * case since they are only valid while holding the pgmap
453 		 * reference.
454 		 */
455 		*pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
456 		if (*pgmap)
457 			page = pte_page(pte);
458 		else
459 			goto no_page;
460 	} else if (unlikely(!page)) {
461 		if (flags & FOLL_DUMP) {
462 			/* Avoid special (like zero) pages in core dumps */
463 			page = ERR_PTR(-EFAULT);
464 			goto out;
465 		}
466 
467 		if (is_zero_pfn(pte_pfn(pte))) {
468 			page = pte_page(pte);
469 		} else {
470 			ret = follow_pfn_pte(vma, address, ptep, flags);
471 			page = ERR_PTR(ret);
472 			goto out;
473 		}
474 	}
475 
476 	if (flags & FOLL_SPLIT && PageTransCompound(page)) {
477 		get_page(page);
478 		pte_unmap_unlock(ptep, ptl);
479 		lock_page(page);
480 		ret = split_huge_page(page);
481 		unlock_page(page);
482 		put_page(page);
483 		if (ret)
484 			return ERR_PTR(ret);
485 		goto retry;
486 	}
487 
488 	/* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
489 	if (unlikely(!try_grab_page(page, flags))) {
490 		page = ERR_PTR(-ENOMEM);
491 		goto out;
492 	}
493 	/*
494 	 * We need to make the page accessible if and only if we are going
495 	 * to access its content (the FOLL_PIN case).  Please see
496 	 * Documentation/core-api/pin_user_pages.rst for details.
497 	 */
498 	if (flags & FOLL_PIN) {
499 		ret = arch_make_page_accessible(page);
500 		if (ret) {
501 			unpin_user_page(page);
502 			page = ERR_PTR(ret);
503 			goto out;
504 		}
505 	}
506 	if (flags & FOLL_TOUCH) {
507 		if ((flags & FOLL_WRITE) &&
508 		    !pte_dirty(pte) && !PageDirty(page))
509 			set_page_dirty(page);
510 		/*
511 		 * pte_mkyoung() would be more correct here, but atomic care
512 		 * is needed to avoid losing the dirty bit: it is easier to use
513 		 * mark_page_accessed().
514 		 */
515 		mark_page_accessed(page);
516 	}
517 	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
518 		/* Do not mlock pte-mapped THP */
519 		if (PageTransCompound(page))
520 			goto out;
521 
522 		/*
523 		 * The preliminary mapping check is mainly to avoid the
524 		 * pointless overhead of lock_page on the ZERO_PAGE
525 		 * which might bounce very badly if there is contention.
526 		 *
527 		 * If the page is already locked, we don't need to
528 		 * handle it now - vmscan will handle it later if and
529 		 * when it attempts to reclaim the page.
530 		 */
531 		if (page->mapping && trylock_page(page)) {
532 			lru_add_drain();  /* push cached pages to LRU */
533 			/*
534 			 * Because we lock page here, and migration is
535 			 * blocked by the pte's page reference, and we
536 			 * know the page is still mapped, we don't even
537 			 * need to check for file-cache page truncation.
538 			 */
539 			mlock_vma_page(page);
540 			unlock_page(page);
541 		}
542 	}
543 out:
544 	pte_unmap_unlock(ptep, ptl);
545 	return page;
546 no_page:
547 	pte_unmap_unlock(ptep, ptl);
548 	if (!pte_none(pte))
549 		return NULL;
550 	return no_page_table(vma, flags);
551 }
552 
553 static struct page *follow_pmd_mask(struct vm_area_struct *vma,
554 				    unsigned long address, pud_t *pudp,
555 				    unsigned int flags,
556 				    struct follow_page_context *ctx)
557 {
558 	pmd_t *pmd, pmdval;
559 	spinlock_t *ptl;
560 	struct page *page;
561 	struct mm_struct *mm = vma->vm_mm;
562 
563 	pmd = pmd_offset(pudp, address);
564 	/*
565 	 * The READ_ONCE() will stabilize the pmdval in a register or
566 	 * on the stack so that it will stop changing under the code.
567 	 */
568 	pmdval = READ_ONCE(*pmd);
569 	if (pmd_none(pmdval))
570 		return no_page_table(vma, flags);
571 	if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) {
572 		page = follow_huge_pmd(mm, address, pmd, flags);
573 		if (page)
574 			return page;
575 		return no_page_table(vma, flags);
576 	}
577 	if (is_hugepd(__hugepd(pmd_val(pmdval)))) {
578 		page = follow_huge_pd(vma, address,
579 				      __hugepd(pmd_val(pmdval)), flags,
580 				      PMD_SHIFT);
581 		if (page)
582 			return page;
583 		return no_page_table(vma, flags);
584 	}
585 retry:
586 	if (!pmd_present(pmdval)) {
587 		if (likely(!(flags & FOLL_MIGRATION)))
588 			return no_page_table(vma, flags);
589 		VM_BUG_ON(thp_migration_supported() &&
590 				  !is_pmd_migration_entry(pmdval));
591 		if (is_pmd_migration_entry(pmdval))
592 			pmd_migration_entry_wait(mm, pmd);
593 		pmdval = READ_ONCE(*pmd);
594 		/*
595 		 * MADV_DONTNEED may convert the pmd to null because
596 		 * mmap_sem is held in read mode
597 		 */
598 		if (pmd_none(pmdval))
599 			return no_page_table(vma, flags);
600 		goto retry;
601 	}
602 	if (pmd_devmap(pmdval)) {
603 		ptl = pmd_lock(mm, pmd);
604 		page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
605 		spin_unlock(ptl);
606 		if (page)
607 			return page;
608 	}
609 	if (likely(!pmd_trans_huge(pmdval)))
610 		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
611 
612 	if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
613 		return no_page_table(vma, flags);
614 
615 retry_locked:
616 	ptl = pmd_lock(mm, pmd);
617 	if (unlikely(pmd_none(*pmd))) {
618 		spin_unlock(ptl);
619 		return no_page_table(vma, flags);
620 	}
621 	if (unlikely(!pmd_present(*pmd))) {
622 		spin_unlock(ptl);
623 		if (likely(!(flags & FOLL_MIGRATION)))
624 			return no_page_table(vma, flags);
625 		pmd_migration_entry_wait(mm, pmd);
626 		goto retry_locked;
627 	}
628 	if (unlikely(!pmd_trans_huge(*pmd))) {
629 		spin_unlock(ptl);
630 		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
631 	}
632 	if (flags & (FOLL_SPLIT | FOLL_SPLIT_PMD)) {
633 		int ret;
634 		page = pmd_page(*pmd);
635 		if (is_huge_zero_page(page)) {
636 			spin_unlock(ptl);
637 			ret = 0;
638 			split_huge_pmd(vma, pmd, address);
639 			if (pmd_trans_unstable(pmd))
640 				ret = -EBUSY;
641 		} else if (flags & FOLL_SPLIT) {
642 			if (unlikely(!try_get_page(page))) {
643 				spin_unlock(ptl);
644 				return ERR_PTR(-ENOMEM);
645 			}
646 			spin_unlock(ptl);
647 			lock_page(page);
648 			ret = split_huge_page(page);
649 			unlock_page(page);
650 			put_page(page);
651 			if (pmd_none(*pmd))
652 				return no_page_table(vma, flags);
653 		} else {  /* flags & FOLL_SPLIT_PMD */
654 			spin_unlock(ptl);
655 			split_huge_pmd(vma, pmd, address);
656 			ret = pte_alloc(mm, pmd) ? -ENOMEM : 0;
657 		}
658 
659 		return ret ? ERR_PTR(ret) :
660 			follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
661 	}
662 	page = follow_trans_huge_pmd(vma, address, pmd, flags);
663 	spin_unlock(ptl);
664 	ctx->page_mask = HPAGE_PMD_NR - 1;
665 	return page;
666 }
667 
668 static struct page *follow_pud_mask(struct vm_area_struct *vma,
669 				    unsigned long address, p4d_t *p4dp,
670 				    unsigned int flags,
671 				    struct follow_page_context *ctx)
672 {
673 	pud_t *pud;
674 	spinlock_t *ptl;
675 	struct page *page;
676 	struct mm_struct *mm = vma->vm_mm;
677 
678 	pud = pud_offset(p4dp, address);
679 	if (pud_none(*pud))
680 		return no_page_table(vma, flags);
681 	if (pud_huge(*pud) && is_vm_hugetlb_page(vma)) {
682 		page = follow_huge_pud(mm, address, pud, flags);
683 		if (page)
684 			return page;
685 		return no_page_table(vma, flags);
686 	}
687 	if (is_hugepd(__hugepd(pud_val(*pud)))) {
688 		page = follow_huge_pd(vma, address,
689 				      __hugepd(pud_val(*pud)), flags,
690 				      PUD_SHIFT);
691 		if (page)
692 			return page;
693 		return no_page_table(vma, flags);
694 	}
695 	if (pud_devmap(*pud)) {
696 		ptl = pud_lock(mm, pud);
697 		page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
698 		spin_unlock(ptl);
699 		if (page)
700 			return page;
701 	}
702 	if (unlikely(pud_bad(*pud)))
703 		return no_page_table(vma, flags);
704 
705 	return follow_pmd_mask(vma, address, pud, flags, ctx);
706 }
707 
708 static struct page *follow_p4d_mask(struct vm_area_struct *vma,
709 				    unsigned long address, pgd_t *pgdp,
710 				    unsigned int flags,
711 				    struct follow_page_context *ctx)
712 {
713 	p4d_t *p4d;
714 	struct page *page;
715 
716 	p4d = p4d_offset(pgdp, address);
717 	if (p4d_none(*p4d))
718 		return no_page_table(vma, flags);
719 	BUILD_BUG_ON(p4d_huge(*p4d));
720 	if (unlikely(p4d_bad(*p4d)))
721 		return no_page_table(vma, flags);
722 
723 	if (is_hugepd(__hugepd(p4d_val(*p4d)))) {
724 		page = follow_huge_pd(vma, address,
725 				      __hugepd(p4d_val(*p4d)), flags,
726 				      P4D_SHIFT);
727 		if (page)
728 			return page;
729 		return no_page_table(vma, flags);
730 	}
731 	return follow_pud_mask(vma, address, p4d, flags, ctx);
732 }
733 
734 /**
735  * follow_page_mask - look up a page descriptor from a user-virtual address
736  * @vma: vm_area_struct mapping @address
737  * @address: virtual address to look up
738  * @flags: flags modifying lookup behaviour
739  * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
740  *       pointer to output page_mask
741  *
742  * @flags can have FOLL_ flags set, defined in <linux/mm.h>
743  *
744  * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
745  * the device's dev_pagemap metadata to avoid repeating expensive lookups.
746  *
747  * On output, the @ctx->page_mask is set according to the size of the page.
748  *
749  * Return: the mapped (struct page *), %NULL if no mapping exists, or
750  * an error pointer if there is a mapping to something not represented
751  * by a page descriptor (see also vm_normal_page()).
752  */
753 static struct page *follow_page_mask(struct vm_area_struct *vma,
754 			      unsigned long address, unsigned int flags,
755 			      struct follow_page_context *ctx)
756 {
757 	pgd_t *pgd;
758 	struct page *page;
759 	struct mm_struct *mm = vma->vm_mm;
760 
761 	ctx->page_mask = 0;
762 
763 	/* make this handle hugepd */
764 	page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
765 	if (!IS_ERR(page)) {
766 		WARN_ON_ONCE(flags & (FOLL_GET | FOLL_PIN));
767 		return page;
768 	}
769 
770 	pgd = pgd_offset(mm, address);
771 
772 	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
773 		return no_page_table(vma, flags);
774 
775 	if (pgd_huge(*pgd)) {
776 		page = follow_huge_pgd(mm, address, pgd, flags);
777 		if (page)
778 			return page;
779 		return no_page_table(vma, flags);
780 	}
781 	if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
782 		page = follow_huge_pd(vma, address,
783 				      __hugepd(pgd_val(*pgd)), flags,
784 				      PGDIR_SHIFT);
785 		if (page)
786 			return page;
787 		return no_page_table(vma, flags);
788 	}
789 
790 	return follow_p4d_mask(vma, address, pgd, flags, ctx);
791 }
792 
793 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
794 			 unsigned int foll_flags)
795 {
796 	struct follow_page_context ctx = { NULL };
797 	struct page *page;
798 
799 	page = follow_page_mask(vma, address, foll_flags, &ctx);
800 	if (ctx.pgmap)
801 		put_dev_pagemap(ctx.pgmap);
802 	return page;
803 }
804 
805 static int get_gate_page(struct mm_struct *mm, unsigned long address,
806 		unsigned int gup_flags, struct vm_area_struct **vma,
807 		struct page **page)
808 {
809 	pgd_t *pgd;
810 	p4d_t *p4d;
811 	pud_t *pud;
812 	pmd_t *pmd;
813 	pte_t *pte;
814 	int ret = -EFAULT;
815 
816 	/* user gate pages are read-only */
817 	if (gup_flags & FOLL_WRITE)
818 		return -EFAULT;
819 	if (address > TASK_SIZE)
820 		pgd = pgd_offset_k(address);
821 	else
822 		pgd = pgd_offset_gate(mm, address);
823 	if (pgd_none(*pgd))
824 		return -EFAULT;
825 	p4d = p4d_offset(pgd, address);
826 	if (p4d_none(*p4d))
827 		return -EFAULT;
828 	pud = pud_offset(p4d, address);
829 	if (pud_none(*pud))
830 		return -EFAULT;
831 	pmd = pmd_offset(pud, address);
832 	if (!pmd_present(*pmd))
833 		return -EFAULT;
834 	VM_BUG_ON(pmd_trans_huge(*pmd));
835 	pte = pte_offset_map(pmd, address);
836 	if (pte_none(*pte))
837 		goto unmap;
838 	*vma = get_gate_vma(mm);
839 	if (!page)
840 		goto out;
841 	*page = vm_normal_page(*vma, address, *pte);
842 	if (!*page) {
843 		if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
844 			goto unmap;
845 		*page = pte_page(*pte);
846 	}
847 	if (unlikely(!try_get_page(*page))) {
848 		ret = -ENOMEM;
849 		goto unmap;
850 	}
851 out:
852 	ret = 0;
853 unmap:
854 	pte_unmap(pte);
855 	return ret;
856 }
857 
858 /*
859  * mmap_sem must be held on entry.  If @locked != NULL and *@flags
860  * does not include FOLL_NOWAIT, the mmap_sem may be released.  If it
861  * is, *@locked will be set to 0 and -EBUSY returned.
862  */
863 static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
864 		unsigned long address, unsigned int *flags, int *locked)
865 {
866 	unsigned int fault_flags = 0;
867 	vm_fault_t ret;
868 
869 	/* mlock all present pages, but do not fault in new pages */
870 	if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
871 		return -ENOENT;
872 	if (*flags & FOLL_WRITE)
873 		fault_flags |= FAULT_FLAG_WRITE;
874 	if (*flags & FOLL_REMOTE)
875 		fault_flags |= FAULT_FLAG_REMOTE;
876 	if (locked)
877 		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
878 	if (*flags & FOLL_NOWAIT)
879 		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
880 	if (*flags & FOLL_TRIED) {
881 		/*
882 		 * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED
883 		 * can co-exist
884 		 */
885 		fault_flags |= FAULT_FLAG_TRIED;
886 	}
887 
888 	ret = handle_mm_fault(vma, address, fault_flags);
889 	if (ret & VM_FAULT_ERROR) {
890 		int err = vm_fault_to_errno(ret, *flags);
891 
892 		if (err)
893 			return err;
894 		BUG();
895 	}
896 
897 	if (tsk) {
898 		if (ret & VM_FAULT_MAJOR)
899 			tsk->maj_flt++;
900 		else
901 			tsk->min_flt++;
902 	}
903 
904 	if (ret & VM_FAULT_RETRY) {
905 		if (locked && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
906 			*locked = 0;
907 		return -EBUSY;
908 	}
909 
910 	/*
911 	 * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
912 	 * necessary, even if maybe_mkwrite decided not to set pte_write. We
913 	 * can thus safely do subsequent page lookups as if they were reads.
914 	 * But only do so when looping for pte_write is futile: in some cases
915 	 * userspace may also be wanting to write to the gotten user page,
916 	 * which a read fault here might prevent (a readonly page might get
917 	 * reCOWed by userspace write).
918 	 */
919 	if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
920 		*flags |= FOLL_COW;
921 	return 0;
922 }
923 
924 static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
925 {
926 	vm_flags_t vm_flags = vma->vm_flags;
927 	int write = (gup_flags & FOLL_WRITE);
928 	int foreign = (gup_flags & FOLL_REMOTE);
929 
930 	if (vm_flags & (VM_IO | VM_PFNMAP))
931 		return -EFAULT;
932 
933 	if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
934 		return -EFAULT;
935 
936 	if (write) {
937 		if (!(vm_flags & VM_WRITE)) {
938 			if (!(gup_flags & FOLL_FORCE))
939 				return -EFAULT;
940 			/*
941 			 * We used to let the write,force case do COW in a
942 			 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
943 			 * set a breakpoint in a read-only mapping of an
944 			 * executable, without corrupting the file (yet only
945 			 * when that file had been opened for writing!).
946 			 * Anon pages in shared mappings are surprising: now
947 			 * just reject it.
948 			 */
949 			if (!is_cow_mapping(vm_flags))
950 				return -EFAULT;
951 		}
952 	} else if (!(vm_flags & VM_READ)) {
953 		if (!(gup_flags & FOLL_FORCE))
954 			return -EFAULT;
955 		/*
956 		 * Is there actually any vma we can reach here which does not
957 		 * have VM_MAYREAD set?
958 		 */
959 		if (!(vm_flags & VM_MAYREAD))
960 			return -EFAULT;
961 	}
962 	/*
963 	 * gups are always data accesses, not instruction
964 	 * fetches, so execute=false here
965 	 */
966 	if (!arch_vma_access_permitted(vma, write, false, foreign))
967 		return -EFAULT;
968 	return 0;
969 }
970 
971 /**
972  * __get_user_pages() - pin user pages in memory
973  * @tsk:	task_struct of target task
974  * @mm:		mm_struct of target mm
975  * @start:	starting user address
976  * @nr_pages:	number of pages from start to pin
977  * @gup_flags:	flags modifying pin behaviour
978  * @pages:	array that receives pointers to the pages pinned.
979  *		Should be at least nr_pages long. Or NULL, if caller
980  *		only intends to ensure the pages are faulted in.
981  * @vmas:	array of pointers to vmas corresponding to each page.
982  *		Or NULL if the caller does not require them.
983  * @locked:     whether we're still with the mmap_sem held
984  *
985  * Returns either number of pages pinned (which may be less than the
986  * number requested), or an error. Details about the return value:
987  *
988  * -- If nr_pages is 0, returns 0.
989  * -- If nr_pages is >0, but no pages were pinned, returns -errno.
990  * -- If nr_pages is >0, and some pages were pinned, returns the number of
991  *    pages pinned. Again, this may be less than nr_pages.
992  *
993  * The caller is responsible for releasing returned @pages, via put_page().
994  *
995  * @vmas are valid only as long as mmap_sem is held.
996  *
997  * Must be called with mmap_sem held.  It may be released.  See below.
998  *
999  * __get_user_pages walks a process's page tables and takes a reference to
1000  * each struct page that each user address corresponds to at a given
1001  * instant. That is, it takes the page that would be accessed if a user
1002  * thread accesses the given user virtual address at that instant.
1003  *
1004  * This does not guarantee that the page exists in the user mappings when
1005  * __get_user_pages returns, and there may even be a completely different
1006  * page there in some cases (eg. if mmapped pagecache has been invalidated
1007  * and subsequently re faulted). However it does guarantee that the page
1008  * won't be freed completely. And mostly callers simply care that the page
1009  * contains data that was valid *at some point in time*. Typically, an IO
1010  * or similar operation cannot guarantee anything stronger anyway because
1011  * locks can't be held over the syscall boundary.
1012  *
1013  * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
1014  * the page is written to, set_page_dirty (or set_page_dirty_lock, as
1015  * appropriate) must be called after the page is finished with, and
1016  * before put_page is called.
1017  *
1018  * If @locked != NULL, *@locked will be set to 0 when mmap_sem is
1019  * released by an up_read().  That can happen if @gup_flags does not
1020  * have FOLL_NOWAIT.
1021  *
1022  * A caller using such a combination of @locked and @gup_flags
1023  * must therefore hold the mmap_sem for reading only, and recognize
1024  * when it's been released.  Otherwise, it must be held for either
1025  * reading or writing and will not be released.
1026  *
1027  * In most cases, get_user_pages or get_user_pages_fast should be used
1028  * instead of __get_user_pages. __get_user_pages should be used only if
1029  * you need some special @gup_flags.
1030  */
1031 static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1032 		unsigned long start, unsigned long nr_pages,
1033 		unsigned int gup_flags, struct page **pages,
1034 		struct vm_area_struct **vmas, int *locked)
1035 {
1036 	long ret = 0, i = 0;
1037 	struct vm_area_struct *vma = NULL;
1038 	struct follow_page_context ctx = { NULL };
1039 
1040 	if (!nr_pages)
1041 		return 0;
1042 
1043 	start = untagged_addr(start);
1044 
1045 	VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
1046 
1047 	/*
1048 	 * If FOLL_FORCE is set then do not force a full fault as the hinting
1049 	 * fault information is unrelated to the reference behaviour of a task
1050 	 * using the address space
1051 	 */
1052 	if (!(gup_flags & FOLL_FORCE))
1053 		gup_flags |= FOLL_NUMA;
1054 
1055 	do {
1056 		struct page *page;
1057 		unsigned int foll_flags = gup_flags;
1058 		unsigned int page_increm;
1059 
1060 		/* first iteration or cross vma bound */
1061 		if (!vma || start >= vma->vm_end) {
1062 			vma = find_extend_vma(mm, start);
1063 			if (!vma && in_gate_area(mm, start)) {
1064 				ret = get_gate_page(mm, start & PAGE_MASK,
1065 						gup_flags, &vma,
1066 						pages ? &pages[i] : NULL);
1067 				if (ret)
1068 					goto out;
1069 				ctx.page_mask = 0;
1070 				goto next_page;
1071 			}
1072 
1073 			if (!vma || check_vma_flags(vma, gup_flags)) {
1074 				ret = -EFAULT;
1075 				goto out;
1076 			}
1077 			if (is_vm_hugetlb_page(vma)) {
1078 				if (should_force_cow_break(vma, foll_flags))
1079 					foll_flags |= FOLL_WRITE;
1080 				i = follow_hugetlb_page(mm, vma, pages, vmas,
1081 						&start, &nr_pages, i,
1082 						foll_flags, locked);
1083 				if (locked && *locked == 0) {
1084 					/*
1085 					 * We've got a VM_FAULT_RETRY
1086 					 * and we've lost mmap_sem.
1087 					 * We must stop here.
1088 					 */
1089 					BUG_ON(gup_flags & FOLL_NOWAIT);
1090 					BUG_ON(ret != 0);
1091 					goto out;
1092 				}
1093 				continue;
1094 			}
1095 		}
1096 
1097 		if (should_force_cow_break(vma, foll_flags))
1098 			foll_flags |= FOLL_WRITE;
1099 
1100 retry:
1101 		/*
1102 		 * If we have a pending SIGKILL, don't keep faulting pages and
1103 		 * potentially allocating memory.
1104 		 */
1105 		if (fatal_signal_pending(current)) {
1106 			ret = -EINTR;
1107 			goto out;
1108 		}
1109 		cond_resched();
1110 
1111 		page = follow_page_mask(vma, start, foll_flags, &ctx);
1112 		if (!page) {
1113 			ret = faultin_page(tsk, vma, start, &foll_flags,
1114 					   locked);
1115 			switch (ret) {
1116 			case 0:
1117 				goto retry;
1118 			case -EBUSY:
1119 				ret = 0;
1120 				fallthrough;
1121 			case -EFAULT:
1122 			case -ENOMEM:
1123 			case -EHWPOISON:
1124 				goto out;
1125 			case -ENOENT:
1126 				goto next_page;
1127 			}
1128 			BUG();
1129 		} else if (PTR_ERR(page) == -EEXIST) {
1130 			/*
1131 			 * Proper page table entry exists, but no corresponding
1132 			 * struct page.
1133 			 */
1134 			goto next_page;
1135 		} else if (IS_ERR(page)) {
1136 			ret = PTR_ERR(page);
1137 			goto out;
1138 		}
1139 		if (pages) {
1140 			pages[i] = page;
1141 			flush_anon_page(vma, page, start);
1142 			flush_dcache_page(page);
1143 			ctx.page_mask = 0;
1144 		}
1145 next_page:
1146 		if (vmas) {
1147 			vmas[i] = vma;
1148 			ctx.page_mask = 0;
1149 		}
1150 		page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
1151 		if (page_increm > nr_pages)
1152 			page_increm = nr_pages;
1153 		i += page_increm;
1154 		start += page_increm * PAGE_SIZE;
1155 		nr_pages -= page_increm;
1156 	} while (nr_pages);
1157 out:
1158 	if (ctx.pgmap)
1159 		put_dev_pagemap(ctx.pgmap);
1160 	return i ? i : ret;
1161 }
1162 
1163 static bool vma_permits_fault(struct vm_area_struct *vma,
1164 			      unsigned int fault_flags)
1165 {
1166 	bool write   = !!(fault_flags & FAULT_FLAG_WRITE);
1167 	bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
1168 	vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
1169 
1170 	if (!(vm_flags & vma->vm_flags))
1171 		return false;
1172 
1173 	/*
1174 	 * The architecture might have a hardware protection
1175 	 * mechanism other than read/write that can deny access.
1176 	 *
1177 	 * gup always represents data access, not instruction
1178 	 * fetches, so execute=false here:
1179 	 */
1180 	if (!arch_vma_access_permitted(vma, write, false, foreign))
1181 		return false;
1182 
1183 	return true;
1184 }
1185 
1186 /**
1187  * fixup_user_fault() - manually resolve a user page fault
1188  * @tsk:	the task_struct to use for page fault accounting, or
1189  *		NULL if faults are not to be recorded.
1190  * @mm:		mm_struct of target mm
1191  * @address:	user address
1192  * @fault_flags:flags to pass down to handle_mm_fault()
1193  * @unlocked:	did we unlock the mmap_sem while retrying, maybe NULL if caller
1194  *		does not allow retry. If NULL, the caller must guarantee
1195  *		that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY.
1196  *
1197  * This is meant to be called in the specific scenario where for locking reasons
1198  * we try to access user memory in atomic context (within a pagefault_disable()
1199  * section), this returns -EFAULT, and we want to resolve the user fault before
1200  * trying again.
1201  *
1202  * Typically this is meant to be used by the futex code.
1203  *
1204  * The main difference with get_user_pages() is that this function will
1205  * unconditionally call handle_mm_fault() which will in turn perform all the
1206  * necessary SW fixup of the dirty and young bits in the PTE, while
1207  * get_user_pages() only guarantees to update these in the struct page.
1208  *
1209  * This is important for some architectures where those bits also gate the
1210  * access permission to the page because they are maintained in software.  On
1211  * such architectures, gup() will not be enough to make a subsequent access
1212  * succeed.
1213  *
1214  * This function will not return with an unlocked mmap_sem. So it has not the
1215  * same semantics wrt the @mm->mmap_sem as does filemap_fault().
1216  */
1217 int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1218 		     unsigned long address, unsigned int fault_flags,
1219 		     bool *unlocked)
1220 {
1221 	struct vm_area_struct *vma;
1222 	vm_fault_t ret, major = 0;
1223 
1224 	address = untagged_addr(address);
1225 
1226 	if (unlocked)
1227 		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
1228 
1229 retry:
1230 	vma = find_extend_vma(mm, address);
1231 	if (!vma || address < vma->vm_start)
1232 		return -EFAULT;
1233 
1234 	if (!vma_permits_fault(vma, fault_flags))
1235 		return -EFAULT;
1236 
1237 	if ((fault_flags & FAULT_FLAG_KILLABLE) &&
1238 	    fatal_signal_pending(current))
1239 		return -EINTR;
1240 
1241 	ret = handle_mm_fault(vma, address, fault_flags);
1242 	major |= ret & VM_FAULT_MAJOR;
1243 	if (ret & VM_FAULT_ERROR) {
1244 		int err = vm_fault_to_errno(ret, 0);
1245 
1246 		if (err)
1247 			return err;
1248 		BUG();
1249 	}
1250 
1251 	if (ret & VM_FAULT_RETRY) {
1252 		down_read(&mm->mmap_sem);
1253 		*unlocked = true;
1254 		fault_flags |= FAULT_FLAG_TRIED;
1255 		goto retry;
1256 	}
1257 
1258 	if (tsk) {
1259 		if (major)
1260 			tsk->maj_flt++;
1261 		else
1262 			tsk->min_flt++;
1263 	}
1264 	return 0;
1265 }
1266 EXPORT_SYMBOL_GPL(fixup_user_fault);
1267 
1268 static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
1269 						struct mm_struct *mm,
1270 						unsigned long start,
1271 						unsigned long nr_pages,
1272 						struct page **pages,
1273 						struct vm_area_struct **vmas,
1274 						int *locked,
1275 						unsigned int flags)
1276 {
1277 	long ret, pages_done;
1278 	bool lock_dropped;
1279 
1280 	if (locked) {
1281 		/* if VM_FAULT_RETRY can be returned, vmas become invalid */
1282 		BUG_ON(vmas);
1283 		/* check caller initialized locked */
1284 		BUG_ON(*locked != 1);
1285 	}
1286 
1287 	/*
1288 	 * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
1289 	 * is to set FOLL_GET if the caller wants pages[] filled in (but has
1290 	 * carelessly failed to specify FOLL_GET), so keep doing that, but only
1291 	 * for FOLL_GET, not for the newer FOLL_PIN.
1292 	 *
1293 	 * FOLL_PIN always expects pages to be non-null, but no need to assert
1294 	 * that here, as any failures will be obvious enough.
1295 	 */
1296 	if (pages && !(flags & FOLL_PIN))
1297 		flags |= FOLL_GET;
1298 
1299 	pages_done = 0;
1300 	lock_dropped = false;
1301 	for (;;) {
1302 		ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages,
1303 				       vmas, locked);
1304 		if (!locked)
1305 			/* VM_FAULT_RETRY couldn't trigger, bypass */
1306 			return ret;
1307 
1308 		/* VM_FAULT_RETRY cannot return errors */
1309 		if (!*locked) {
1310 			BUG_ON(ret < 0);
1311 			BUG_ON(ret >= nr_pages);
1312 		}
1313 
1314 		if (ret > 0) {
1315 			nr_pages -= ret;
1316 			pages_done += ret;
1317 			if (!nr_pages)
1318 				break;
1319 		}
1320 		if (*locked) {
1321 			/*
1322 			 * VM_FAULT_RETRY didn't trigger or it was a
1323 			 * FOLL_NOWAIT.
1324 			 */
1325 			if (!pages_done)
1326 				pages_done = ret;
1327 			break;
1328 		}
1329 		/*
1330 		 * VM_FAULT_RETRY triggered, so seek to the faulting offset.
1331 		 * For the prefault case (!pages) we only update counts.
1332 		 */
1333 		if (likely(pages))
1334 			pages += ret;
1335 		start += ret << PAGE_SHIFT;
1336 		lock_dropped = true;
1337 
1338 retry:
1339 		/*
1340 		 * Repeat on the address that fired VM_FAULT_RETRY
1341 		 * with both FAULT_FLAG_ALLOW_RETRY and
1342 		 * FAULT_FLAG_TRIED.  Note that GUP can be interrupted
1343 		 * by fatal signals, so we need to check it before we
1344 		 * start trying again otherwise it can loop forever.
1345 		 */
1346 
1347 		if (fatal_signal_pending(current)) {
1348 			if (!pages_done)
1349 				pages_done = -EINTR;
1350 			break;
1351 		}
1352 
1353 		ret = down_read_killable(&mm->mmap_sem);
1354 		if (ret) {
1355 			BUG_ON(ret > 0);
1356 			if (!pages_done)
1357 				pages_done = ret;
1358 			break;
1359 		}
1360 
1361 		*locked = 1;
1362 		ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
1363 				       pages, NULL, locked);
1364 		if (!*locked) {
1365 			/* Continue to retry until we succeeded */
1366 			BUG_ON(ret != 0);
1367 			goto retry;
1368 		}
1369 		if (ret != 1) {
1370 			BUG_ON(ret > 1);
1371 			if (!pages_done)
1372 				pages_done = ret;
1373 			break;
1374 		}
1375 		nr_pages--;
1376 		pages_done++;
1377 		if (!nr_pages)
1378 			break;
1379 		if (likely(pages))
1380 			pages++;
1381 		start += PAGE_SIZE;
1382 	}
1383 	if (lock_dropped && *locked) {
1384 		/*
1385 		 * We must let the caller know we temporarily dropped the lock
1386 		 * and so the critical section protected by it was lost.
1387 		 */
1388 		up_read(&mm->mmap_sem);
1389 		*locked = 0;
1390 	}
1391 	return pages_done;
1392 }
1393 
1394 /**
1395  * populate_vma_page_range() -  populate a range of pages in the vma.
1396  * @vma:   target vma
1397  * @start: start address
1398  * @end:   end address
1399  * @locked: whether the mmap_sem is still held
1400  *
1401  * This takes care of mlocking the pages too if VM_LOCKED is set.
1402  *
1403  * return 0 on success, negative error code on error.
1404  *
1405  * vma->vm_mm->mmap_sem must be held.
1406  *
1407  * If @locked is NULL, it may be held for read or write and will
1408  * be unperturbed.
1409  *
1410  * If @locked is non-NULL, it must held for read only and may be
1411  * released.  If it's released, *@locked will be set to 0.
1412  */
1413 long populate_vma_page_range(struct vm_area_struct *vma,
1414 		unsigned long start, unsigned long end, int *locked)
1415 {
1416 	struct mm_struct *mm = vma->vm_mm;
1417 	unsigned long nr_pages = (end - start) / PAGE_SIZE;
1418 	int gup_flags;
1419 
1420 	VM_BUG_ON(start & ~PAGE_MASK);
1421 	VM_BUG_ON(end   & ~PAGE_MASK);
1422 	VM_BUG_ON_VMA(start < vma->vm_start, vma);
1423 	VM_BUG_ON_VMA(end   > vma->vm_end, vma);
1424 	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
1425 
1426 	gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
1427 	if (vma->vm_flags & VM_LOCKONFAULT)
1428 		gup_flags &= ~FOLL_POPULATE;
1429 	/*
1430 	 * We want to touch writable mappings with a write fault in order
1431 	 * to break COW, except for shared mappings because these don't COW
1432 	 * and we would not want to dirty them for nothing.
1433 	 */
1434 	if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
1435 		gup_flags |= FOLL_WRITE;
1436 
1437 	/*
1438 	 * We want mlock to succeed for regions that have any permissions
1439 	 * other than PROT_NONE.
1440 	 */
1441 	if (vma_is_accessible(vma))
1442 		gup_flags |= FOLL_FORCE;
1443 
1444 	/*
1445 	 * We made sure addr is within a VMA, so the following will
1446 	 * not result in a stack expansion that recurses back here.
1447 	 */
1448 	return __get_user_pages(current, mm, start, nr_pages, gup_flags,
1449 				NULL, NULL, locked);
1450 }
1451 
1452 /*
1453  * __mm_populate - populate and/or mlock pages within a range of address space.
1454  *
1455  * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
1456  * flags. VMAs must be already marked with the desired vm_flags, and
1457  * mmap_sem must not be held.
1458  */
1459 int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
1460 {
1461 	struct mm_struct *mm = current->mm;
1462 	unsigned long end, nstart, nend;
1463 	struct vm_area_struct *vma = NULL;
1464 	int locked = 0;
1465 	long ret = 0;
1466 
1467 	end = start + len;
1468 
1469 	for (nstart = start; nstart < end; nstart = nend) {
1470 		/*
1471 		 * We want to fault in pages for [nstart; end) address range.
1472 		 * Find first corresponding VMA.
1473 		 */
1474 		if (!locked) {
1475 			locked = 1;
1476 			down_read(&mm->mmap_sem);
1477 			vma = find_vma(mm, nstart);
1478 		} else if (nstart >= vma->vm_end)
1479 			vma = vma->vm_next;
1480 		if (!vma || vma->vm_start >= end)
1481 			break;
1482 		/*
1483 		 * Set [nstart; nend) to intersection of desired address
1484 		 * range with the first VMA. Also, skip undesirable VMA types.
1485 		 */
1486 		nend = min(end, vma->vm_end);
1487 		if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1488 			continue;
1489 		if (nstart < vma->vm_start)
1490 			nstart = vma->vm_start;
1491 		/*
1492 		 * Now fault in a range of pages. populate_vma_page_range()
1493 		 * double checks the vma flags, so that it won't mlock pages
1494 		 * if the vma was already munlocked.
1495 		 */
1496 		ret = populate_vma_page_range(vma, nstart, nend, &locked);
1497 		if (ret < 0) {
1498 			if (ignore_errors) {
1499 				ret = 0;
1500 				continue;	/* continue at next VMA */
1501 			}
1502 			break;
1503 		}
1504 		nend = nstart + ret * PAGE_SIZE;
1505 		ret = 0;
1506 	}
1507 	if (locked)
1508 		up_read(&mm->mmap_sem);
1509 	return ret;	/* 0 or negative error code */
1510 }
1511 
1512 /**
1513  * get_dump_page() - pin user page in memory while writing it to core dump
1514  * @addr: user address
1515  *
1516  * Returns struct page pointer of user page pinned for dump,
1517  * to be freed afterwards by put_page().
1518  *
1519  * Returns NULL on any kind of failure - a hole must then be inserted into
1520  * the corefile, to preserve alignment with its headers; and also returns
1521  * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
1522  * allowing a hole to be left in the corefile to save diskspace.
1523  *
1524  * Called without mmap_sem, but after all other threads have been killed.
1525  */
1526 #ifdef CONFIG_ELF_CORE
1527 struct page *get_dump_page(unsigned long addr)
1528 {
1529 	struct vm_area_struct *vma;
1530 	struct page *page;
1531 
1532 	if (__get_user_pages(current, current->mm, addr, 1,
1533 			     FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
1534 			     NULL) < 1)
1535 		return NULL;
1536 	flush_cache_page(vma, addr, page_to_pfn(page));
1537 	return page;
1538 }
1539 #endif /* CONFIG_ELF_CORE */
1540 #else /* CONFIG_MMU */
1541 static long __get_user_pages_locked(struct task_struct *tsk,
1542 		struct mm_struct *mm, unsigned long start,
1543 		unsigned long nr_pages, struct page **pages,
1544 		struct vm_area_struct **vmas, int *locked,
1545 		unsigned int foll_flags)
1546 {
1547 	struct vm_area_struct *vma;
1548 	unsigned long vm_flags;
1549 	int i;
1550 
1551 	/* calculate required read or write permissions.
1552 	 * If FOLL_FORCE is set, we only require the "MAY" flags.
1553 	 */
1554 	vm_flags  = (foll_flags & FOLL_WRITE) ?
1555 			(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
1556 	vm_flags &= (foll_flags & FOLL_FORCE) ?
1557 			(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
1558 
1559 	for (i = 0; i < nr_pages; i++) {
1560 		vma = find_vma(mm, start);
1561 		if (!vma)
1562 			goto finish_or_fault;
1563 
1564 		/* protect what we can, including chardevs */
1565 		if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
1566 		    !(vm_flags & vma->vm_flags))
1567 			goto finish_or_fault;
1568 
1569 		if (pages) {
1570 			pages[i] = virt_to_page(start);
1571 			if (pages[i])
1572 				get_page(pages[i]);
1573 		}
1574 		if (vmas)
1575 			vmas[i] = vma;
1576 		start = (start + PAGE_SIZE) & PAGE_MASK;
1577 	}
1578 
1579 	return i;
1580 
1581 finish_or_fault:
1582 	return i ? : -EFAULT;
1583 }
1584 #endif /* !CONFIG_MMU */
1585 
1586 #if defined(CONFIG_FS_DAX) || defined (CONFIG_CMA)
1587 static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages)
1588 {
1589 	long i;
1590 	struct vm_area_struct *vma_prev = NULL;
1591 
1592 	for (i = 0; i < nr_pages; i++) {
1593 		struct vm_area_struct *vma = vmas[i];
1594 
1595 		if (vma == vma_prev)
1596 			continue;
1597 
1598 		vma_prev = vma;
1599 
1600 		if (vma_is_fsdax(vma))
1601 			return true;
1602 	}
1603 	return false;
1604 }
1605 
1606 #ifdef CONFIG_CMA
1607 static struct page *new_non_cma_page(struct page *page, unsigned long private)
1608 {
1609 	/*
1610 	 * We want to make sure we allocate the new page from the same node
1611 	 * as the source page.
1612 	 */
1613 	int nid = page_to_nid(page);
1614 	/*
1615 	 * Trying to allocate a page for migration. Ignore allocation
1616 	 * failure warnings. We don't force __GFP_THISNODE here because
1617 	 * this node here is the node where we have CMA reservation and
1618 	 * in some case these nodes will have really less non movable
1619 	 * allocation memory.
1620 	 */
1621 	gfp_t gfp_mask = GFP_USER | __GFP_NOWARN;
1622 
1623 	if (PageHighMem(page))
1624 		gfp_mask |= __GFP_HIGHMEM;
1625 
1626 #ifdef CONFIG_HUGETLB_PAGE
1627 	if (PageHuge(page)) {
1628 		struct hstate *h = page_hstate(page);
1629 		/*
1630 		 * We don't want to dequeue from the pool because pool pages will
1631 		 * mostly be from the CMA region.
1632 		 */
1633 		return alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
1634 	}
1635 #endif
1636 	if (PageTransHuge(page)) {
1637 		struct page *thp;
1638 		/*
1639 		 * ignore allocation failure warnings
1640 		 */
1641 		gfp_t thp_gfpmask = GFP_TRANSHUGE | __GFP_NOWARN;
1642 
1643 		/*
1644 		 * Remove the movable mask so that we don't allocate from
1645 		 * CMA area again.
1646 		 */
1647 		thp_gfpmask &= ~__GFP_MOVABLE;
1648 		thp = __alloc_pages_node(nid, thp_gfpmask, HPAGE_PMD_ORDER);
1649 		if (!thp)
1650 			return NULL;
1651 		prep_transhuge_page(thp);
1652 		return thp;
1653 	}
1654 
1655 	return __alloc_pages_node(nid, gfp_mask, 0);
1656 }
1657 
1658 static long check_and_migrate_cma_pages(struct task_struct *tsk,
1659 					struct mm_struct *mm,
1660 					unsigned long start,
1661 					unsigned long nr_pages,
1662 					struct page **pages,
1663 					struct vm_area_struct **vmas,
1664 					unsigned int gup_flags)
1665 {
1666 	unsigned long i;
1667 	unsigned long step;
1668 	bool drain_allow = true;
1669 	bool migrate_allow = true;
1670 	LIST_HEAD(cma_page_list);
1671 	long ret = nr_pages;
1672 
1673 check_again:
1674 	for (i = 0; i < nr_pages;) {
1675 
1676 		struct page *head = compound_head(pages[i]);
1677 
1678 		/*
1679 		 * gup may start from a tail page. Advance step by the left
1680 		 * part.
1681 		 */
1682 		step = compound_nr(head) - (pages[i] - head);
1683 		/*
1684 		 * If we get a page from the CMA zone, since we are going to
1685 		 * be pinning these entries, we might as well move them out
1686 		 * of the CMA zone if possible.
1687 		 */
1688 		if (is_migrate_cma_page(head)) {
1689 			if (PageHuge(head))
1690 				isolate_huge_page(head, &cma_page_list);
1691 			else {
1692 				if (!PageLRU(head) && drain_allow) {
1693 					lru_add_drain_all();
1694 					drain_allow = false;
1695 				}
1696 
1697 				if (!isolate_lru_page(head)) {
1698 					list_add_tail(&head->lru, &cma_page_list);
1699 					mod_node_page_state(page_pgdat(head),
1700 							    NR_ISOLATED_ANON +
1701 							    page_is_file_lru(head),
1702 							    hpage_nr_pages(head));
1703 				}
1704 			}
1705 		}
1706 
1707 		i += step;
1708 	}
1709 
1710 	if (!list_empty(&cma_page_list)) {
1711 		/*
1712 		 * drop the above get_user_pages reference.
1713 		 */
1714 		for (i = 0; i < nr_pages; i++)
1715 			put_page(pages[i]);
1716 
1717 		if (migrate_pages(&cma_page_list, new_non_cma_page,
1718 				  NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
1719 			/*
1720 			 * some of the pages failed migration. Do get_user_pages
1721 			 * without migration.
1722 			 */
1723 			migrate_allow = false;
1724 
1725 			if (!list_empty(&cma_page_list))
1726 				putback_movable_pages(&cma_page_list);
1727 		}
1728 		/*
1729 		 * We did migrate all the pages, Try to get the page references
1730 		 * again migrating any new CMA pages which we failed to isolate
1731 		 * earlier.
1732 		 */
1733 		ret = __get_user_pages_locked(tsk, mm, start, nr_pages,
1734 						   pages, vmas, NULL,
1735 						   gup_flags);
1736 
1737 		if ((ret > 0) && migrate_allow) {
1738 			nr_pages = ret;
1739 			drain_allow = true;
1740 			goto check_again;
1741 		}
1742 	}
1743 
1744 	return ret;
1745 }
1746 #else
1747 static long check_and_migrate_cma_pages(struct task_struct *tsk,
1748 					struct mm_struct *mm,
1749 					unsigned long start,
1750 					unsigned long nr_pages,
1751 					struct page **pages,
1752 					struct vm_area_struct **vmas,
1753 					unsigned int gup_flags)
1754 {
1755 	return nr_pages;
1756 }
1757 #endif /* CONFIG_CMA */
1758 
1759 /*
1760  * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
1761  * allows us to process the FOLL_LONGTERM flag.
1762  */
1763 static long __gup_longterm_locked(struct task_struct *tsk,
1764 				  struct mm_struct *mm,
1765 				  unsigned long start,
1766 				  unsigned long nr_pages,
1767 				  struct page **pages,
1768 				  struct vm_area_struct **vmas,
1769 				  unsigned int gup_flags)
1770 {
1771 	struct vm_area_struct **vmas_tmp = vmas;
1772 	unsigned long flags = 0;
1773 	long rc, i;
1774 
1775 	if (gup_flags & FOLL_LONGTERM) {
1776 		if (!pages)
1777 			return -EINVAL;
1778 
1779 		if (!vmas_tmp) {
1780 			vmas_tmp = kcalloc(nr_pages,
1781 					   sizeof(struct vm_area_struct *),
1782 					   GFP_KERNEL);
1783 			if (!vmas_tmp)
1784 				return -ENOMEM;
1785 		}
1786 		flags = memalloc_nocma_save();
1787 	}
1788 
1789 	rc = __get_user_pages_locked(tsk, mm, start, nr_pages, pages,
1790 				     vmas_tmp, NULL, gup_flags);
1791 
1792 	if (gup_flags & FOLL_LONGTERM) {
1793 		memalloc_nocma_restore(flags);
1794 		if (rc < 0)
1795 			goto out;
1796 
1797 		if (check_dax_vmas(vmas_tmp, rc)) {
1798 			for (i = 0; i < rc; i++)
1799 				put_page(pages[i]);
1800 			rc = -EOPNOTSUPP;
1801 			goto out;
1802 		}
1803 
1804 		rc = check_and_migrate_cma_pages(tsk, mm, start, rc, pages,
1805 						 vmas_tmp, gup_flags);
1806 	}
1807 
1808 out:
1809 	if (vmas_tmp != vmas)
1810 		kfree(vmas_tmp);
1811 	return rc;
1812 }
1813 #else /* !CONFIG_FS_DAX && !CONFIG_CMA */
1814 static __always_inline long __gup_longterm_locked(struct task_struct *tsk,
1815 						  struct mm_struct *mm,
1816 						  unsigned long start,
1817 						  unsigned long nr_pages,
1818 						  struct page **pages,
1819 						  struct vm_area_struct **vmas,
1820 						  unsigned int flags)
1821 {
1822 	return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
1823 				       NULL, flags);
1824 }
1825 #endif /* CONFIG_FS_DAX || CONFIG_CMA */
1826 
1827 #ifdef CONFIG_MMU
1828 static long __get_user_pages_remote(struct task_struct *tsk,
1829 				    struct mm_struct *mm,
1830 				    unsigned long start, unsigned long nr_pages,
1831 				    unsigned int gup_flags, struct page **pages,
1832 				    struct vm_area_struct **vmas, int *locked)
1833 {
1834 	/*
1835 	 * Parts of FOLL_LONGTERM behavior are incompatible with
1836 	 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
1837 	 * vmas. However, this only comes up if locked is set, and there are
1838 	 * callers that do request FOLL_LONGTERM, but do not set locked. So,
1839 	 * allow what we can.
1840 	 */
1841 	if (gup_flags & FOLL_LONGTERM) {
1842 		if (WARN_ON_ONCE(locked))
1843 			return -EINVAL;
1844 		/*
1845 		 * This will check the vmas (even if our vmas arg is NULL)
1846 		 * and return -ENOTSUPP if DAX isn't allowed in this case:
1847 		 */
1848 		return __gup_longterm_locked(tsk, mm, start, nr_pages, pages,
1849 					     vmas, gup_flags | FOLL_TOUCH |
1850 					     FOLL_REMOTE);
1851 	}
1852 
1853 	return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
1854 				       locked,
1855 				       gup_flags | FOLL_TOUCH | FOLL_REMOTE);
1856 }
1857 
1858 /**
1859  * get_user_pages_remote() - pin user pages in memory
1860  * @tsk:	the task_struct to use for page fault accounting, or
1861  *		NULL if faults are not to be recorded.
1862  * @mm:		mm_struct of target mm
1863  * @start:	starting user address
1864  * @nr_pages:	number of pages from start to pin
1865  * @gup_flags:	flags modifying lookup behaviour
1866  * @pages:	array that receives pointers to the pages pinned.
1867  *		Should be at least nr_pages long. Or NULL, if caller
1868  *		only intends to ensure the pages are faulted in.
1869  * @vmas:	array of pointers to vmas corresponding to each page.
1870  *		Or NULL if the caller does not require them.
1871  * @locked:	pointer to lock flag indicating whether lock is held and
1872  *		subsequently whether VM_FAULT_RETRY functionality can be
1873  *		utilised. Lock must initially be held.
1874  *
1875  * Returns either number of pages pinned (which may be less than the
1876  * number requested), or an error. Details about the return value:
1877  *
1878  * -- If nr_pages is 0, returns 0.
1879  * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1880  * -- If nr_pages is >0, and some pages were pinned, returns the number of
1881  *    pages pinned. Again, this may be less than nr_pages.
1882  *
1883  * The caller is responsible for releasing returned @pages, via put_page().
1884  *
1885  * @vmas are valid only as long as mmap_sem is held.
1886  *
1887  * Must be called with mmap_sem held for read or write.
1888  *
1889  * get_user_pages_remote walks a process's page tables and takes a reference
1890  * to each struct page that each user address corresponds to at a given
1891  * instant. That is, it takes the page that would be accessed if a user
1892  * thread accesses the given user virtual address at that instant.
1893  *
1894  * This does not guarantee that the page exists in the user mappings when
1895  * get_user_pages_remote returns, and there may even be a completely different
1896  * page there in some cases (eg. if mmapped pagecache has been invalidated
1897  * and subsequently re faulted). However it does guarantee that the page
1898  * won't be freed completely. And mostly callers simply care that the page
1899  * contains data that was valid *at some point in time*. Typically, an IO
1900  * or similar operation cannot guarantee anything stronger anyway because
1901  * locks can't be held over the syscall boundary.
1902  *
1903  * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
1904  * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
1905  * be called after the page is finished with, and before put_page is called.
1906  *
1907  * get_user_pages_remote is typically used for fewer-copy IO operations,
1908  * to get a handle on the memory by some means other than accesses
1909  * via the user virtual addresses. The pages may be submitted for
1910  * DMA to devices or accessed via their kernel linear mapping (via the
1911  * kmap APIs). Care should be taken to use the correct cache flushing APIs.
1912  *
1913  * See also get_user_pages_fast, for performance critical applications.
1914  *
1915  * get_user_pages_remote should be phased out in favor of
1916  * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
1917  * should use get_user_pages_remote because it cannot pass
1918  * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
1919  */
1920 long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
1921 		unsigned long start, unsigned long nr_pages,
1922 		unsigned int gup_flags, struct page **pages,
1923 		struct vm_area_struct **vmas, int *locked)
1924 {
1925 	/*
1926 	 * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
1927 	 * never directly by the caller, so enforce that with an assertion:
1928 	 */
1929 	if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
1930 		return -EINVAL;
1931 
1932 	return __get_user_pages_remote(tsk, mm, start, nr_pages, gup_flags,
1933 				       pages, vmas, locked);
1934 }
1935 EXPORT_SYMBOL(get_user_pages_remote);
1936 
1937 #else /* CONFIG_MMU */
1938 long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
1939 			   unsigned long start, unsigned long nr_pages,
1940 			   unsigned int gup_flags, struct page **pages,
1941 			   struct vm_area_struct **vmas, int *locked)
1942 {
1943 	return 0;
1944 }
1945 
1946 static long __get_user_pages_remote(struct task_struct *tsk,
1947 				    struct mm_struct *mm,
1948 				    unsigned long start, unsigned long nr_pages,
1949 				    unsigned int gup_flags, struct page **pages,
1950 				    struct vm_area_struct **vmas, int *locked)
1951 {
1952 	return 0;
1953 }
1954 #endif /* !CONFIG_MMU */
1955 
1956 /**
1957  * get_user_pages() - pin user pages in memory
1958  * @start:      starting user address
1959  * @nr_pages:   number of pages from start to pin
1960  * @gup_flags:  flags modifying lookup behaviour
1961  * @pages:      array that receives pointers to the pages pinned.
1962  *              Should be at least nr_pages long. Or NULL, if caller
1963  *              only intends to ensure the pages are faulted in.
1964  * @vmas:       array of pointers to vmas corresponding to each page.
1965  *              Or NULL if the caller does not require them.
1966  *
1967  * This is the same as get_user_pages_remote(), just with a
1968  * less-flexible calling convention where we assume that the task
1969  * and mm being operated on are the current task's and don't allow
1970  * passing of a locked parameter.  We also obviously don't pass
1971  * FOLL_REMOTE in here.
1972  */
1973 long get_user_pages(unsigned long start, unsigned long nr_pages,
1974 		unsigned int gup_flags, struct page **pages,
1975 		struct vm_area_struct **vmas)
1976 {
1977 	/*
1978 	 * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
1979 	 * never directly by the caller, so enforce that with an assertion:
1980 	 */
1981 	if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
1982 		return -EINVAL;
1983 
1984 	return __gup_longterm_locked(current, current->mm, start, nr_pages,
1985 				     pages, vmas, gup_flags | FOLL_TOUCH);
1986 }
1987 EXPORT_SYMBOL(get_user_pages);
1988 
1989 /**
1990  * get_user_pages_locked() is suitable to replace the form:
1991  *
1992  *      down_read(&mm->mmap_sem);
1993  *      do_something()
1994  *      get_user_pages(tsk, mm, ..., pages, NULL);
1995  *      up_read(&mm->mmap_sem);
1996  *
1997  *  to:
1998  *
1999  *      int locked = 1;
2000  *      down_read(&mm->mmap_sem);
2001  *      do_something()
2002  *      get_user_pages_locked(tsk, mm, ..., pages, &locked);
2003  *      if (locked)
2004  *          up_read(&mm->mmap_sem);
2005  *
2006  * @start:      starting user address
2007  * @nr_pages:   number of pages from start to pin
2008  * @gup_flags:  flags modifying lookup behaviour
2009  * @pages:      array that receives pointers to the pages pinned.
2010  *              Should be at least nr_pages long. Or NULL, if caller
2011  *              only intends to ensure the pages are faulted in.
2012  * @locked:     pointer to lock flag indicating whether lock is held and
2013  *              subsequently whether VM_FAULT_RETRY functionality can be
2014  *              utilised. Lock must initially be held.
2015  *
2016  * We can leverage the VM_FAULT_RETRY functionality in the page fault
2017  * paths better by using either get_user_pages_locked() or
2018  * get_user_pages_unlocked().
2019  *
2020  */
2021 long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
2022 			   unsigned int gup_flags, struct page **pages,
2023 			   int *locked)
2024 {
2025 	/*
2026 	 * FIXME: Current FOLL_LONGTERM behavior is incompatible with
2027 	 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
2028 	 * vmas.  As there are no users of this flag in this call we simply
2029 	 * disallow this option for now.
2030 	 */
2031 	if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
2032 		return -EINVAL;
2033 
2034 	return __get_user_pages_locked(current, current->mm, start, nr_pages,
2035 				       pages, NULL, locked,
2036 				       gup_flags | FOLL_TOUCH);
2037 }
2038 EXPORT_SYMBOL(get_user_pages_locked);
2039 
2040 /*
2041  * get_user_pages_unlocked() is suitable to replace the form:
2042  *
2043  *      down_read(&mm->mmap_sem);
2044  *      get_user_pages(tsk, mm, ..., pages, NULL);
2045  *      up_read(&mm->mmap_sem);
2046  *
2047  *  with:
2048  *
2049  *      get_user_pages_unlocked(tsk, mm, ..., pages);
2050  *
2051  * It is functionally equivalent to get_user_pages_fast so
2052  * get_user_pages_fast should be used instead if specific gup_flags
2053  * (e.g. FOLL_FORCE) are not required.
2054  */
2055 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2056 			     struct page **pages, unsigned int gup_flags)
2057 {
2058 	struct mm_struct *mm = current->mm;
2059 	int locked = 1;
2060 	long ret;
2061 
2062 	/*
2063 	 * FIXME: Current FOLL_LONGTERM behavior is incompatible with
2064 	 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
2065 	 * vmas.  As there are no users of this flag in this call we simply
2066 	 * disallow this option for now.
2067 	 */
2068 	if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
2069 		return -EINVAL;
2070 
2071 	down_read(&mm->mmap_sem);
2072 	ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL,
2073 				      &locked, gup_flags | FOLL_TOUCH);
2074 	if (locked)
2075 		up_read(&mm->mmap_sem);
2076 	return ret;
2077 }
2078 EXPORT_SYMBOL(get_user_pages_unlocked);
2079 
2080 /*
2081  * Fast GUP
2082  *
2083  * get_user_pages_fast attempts to pin user pages by walking the page
2084  * tables directly and avoids taking locks. Thus the walker needs to be
2085  * protected from page table pages being freed from under it, and should
2086  * block any THP splits.
2087  *
2088  * One way to achieve this is to have the walker disable interrupts, and
2089  * rely on IPIs from the TLB flushing code blocking before the page table
2090  * pages are freed. This is unsuitable for architectures that do not need
2091  * to broadcast an IPI when invalidating TLBs.
2092  *
2093  * Another way to achieve this is to batch up page table containing pages
2094  * belonging to more than one mm_user, then rcu_sched a callback to free those
2095  * pages. Disabling interrupts will allow the fast_gup walker to both block
2096  * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
2097  * (which is a relatively rare event). The code below adopts this strategy.
2098  *
2099  * Before activating this code, please be aware that the following assumptions
2100  * are currently made:
2101  *
2102  *  *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
2103  *  free pages containing page tables or TLB flushing requires IPI broadcast.
2104  *
2105  *  *) ptes can be read atomically by the architecture.
2106  *
2107  *  *) access_ok is sufficient to validate userspace address ranges.
2108  *
2109  * The last two assumptions can be relaxed by the addition of helper functions.
2110  *
2111  * This code is based heavily on the PowerPC implementation by Nick Piggin.
2112  */
2113 #ifdef CONFIG_HAVE_FAST_GUP
2114 
2115 static void put_compound_head(struct page *page, int refs, unsigned int flags)
2116 {
2117 	if (flags & FOLL_PIN) {
2118 		mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_RELEASED,
2119 				    refs);
2120 
2121 		if (hpage_pincount_available(page))
2122 			hpage_pincount_sub(page, refs);
2123 		else
2124 			refs *= GUP_PIN_COUNTING_BIAS;
2125 	}
2126 
2127 	VM_BUG_ON_PAGE(page_ref_count(page) < refs, page);
2128 	/*
2129 	 * Calling put_page() for each ref is unnecessarily slow. Only the last
2130 	 * ref needs a put_page().
2131 	 */
2132 	if (refs > 1)
2133 		page_ref_sub(page, refs - 1);
2134 	put_page(page);
2135 }
2136 
2137 #ifdef CONFIG_GUP_GET_PTE_LOW_HIGH
2138 
2139 /*
2140  * WARNING: only to be used in the get_user_pages_fast() implementation.
2141  *
2142  * With get_user_pages_fast(), we walk down the pagetables without taking any
2143  * locks.  For this we would like to load the pointers atomically, but sometimes
2144  * that is not possible (e.g. without expensive cmpxchg8b on x86_32 PAE).  What
2145  * we do have is the guarantee that a PTE will only either go from not present
2146  * to present, or present to not present or both -- it will not switch to a
2147  * completely different present page without a TLB flush in between; something
2148  * that we are blocking by holding interrupts off.
2149  *
2150  * Setting ptes from not present to present goes:
2151  *
2152  *   ptep->pte_high = h;
2153  *   smp_wmb();
2154  *   ptep->pte_low = l;
2155  *
2156  * And present to not present goes:
2157  *
2158  *   ptep->pte_low = 0;
2159  *   smp_wmb();
2160  *   ptep->pte_high = 0;
2161  *
2162  * We must ensure here that the load of pte_low sees 'l' IFF pte_high sees 'h'.
2163  * We load pte_high *after* loading pte_low, which ensures we don't see an older
2164  * value of pte_high.  *Then* we recheck pte_low, which ensures that we haven't
2165  * picked up a changed pte high. We might have gotten rubbish values from
2166  * pte_low and pte_high, but we are guaranteed that pte_low will not have the
2167  * present bit set *unless* it is 'l'. Because get_user_pages_fast() only
2168  * operates on present ptes we're safe.
2169  */
2170 static inline pte_t gup_get_pte(pte_t *ptep)
2171 {
2172 	pte_t pte;
2173 
2174 	do {
2175 		pte.pte_low = ptep->pte_low;
2176 		smp_rmb();
2177 		pte.pte_high = ptep->pte_high;
2178 		smp_rmb();
2179 	} while (unlikely(pte.pte_low != ptep->pte_low));
2180 
2181 	return pte;
2182 }
2183 #else /* CONFIG_GUP_GET_PTE_LOW_HIGH */
2184 /*
2185  * We require that the PTE can be read atomically.
2186  */
2187 static inline pte_t gup_get_pte(pte_t *ptep)
2188 {
2189 	return READ_ONCE(*ptep);
2190 }
2191 #endif /* CONFIG_GUP_GET_PTE_LOW_HIGH */
2192 
2193 static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
2194 					    unsigned int flags,
2195 					    struct page **pages)
2196 {
2197 	while ((*nr) - nr_start) {
2198 		struct page *page = pages[--(*nr)];
2199 
2200 		ClearPageReferenced(page);
2201 		if (flags & FOLL_PIN)
2202 			unpin_user_page(page);
2203 		else
2204 			put_page(page);
2205 	}
2206 }
2207 
2208 #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
2209 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
2210 			 unsigned int flags, struct page **pages, int *nr)
2211 {
2212 	struct dev_pagemap *pgmap = NULL;
2213 	int nr_start = *nr, ret = 0;
2214 	pte_t *ptep, *ptem;
2215 
2216 	ptem = ptep = pte_offset_map(&pmd, addr);
2217 	do {
2218 		pte_t pte = gup_get_pte(ptep);
2219 		struct page *head, *page;
2220 
2221 		/*
2222 		 * Similar to the PMD case below, NUMA hinting must take slow
2223 		 * path using the pte_protnone check.
2224 		 */
2225 		if (pte_protnone(pte))
2226 			goto pte_unmap;
2227 
2228 		if (!pte_access_permitted(pte, flags & FOLL_WRITE))
2229 			goto pte_unmap;
2230 
2231 		if (pte_devmap(pte)) {
2232 			if (unlikely(flags & FOLL_LONGTERM))
2233 				goto pte_unmap;
2234 
2235 			pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
2236 			if (unlikely(!pgmap)) {
2237 				undo_dev_pagemap(nr, nr_start, flags, pages);
2238 				goto pte_unmap;
2239 			}
2240 		} else if (pte_special(pte))
2241 			goto pte_unmap;
2242 
2243 		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2244 		page = pte_page(pte);
2245 
2246 		head = try_grab_compound_head(page, 1, flags);
2247 		if (!head)
2248 			goto pte_unmap;
2249 
2250 		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
2251 			put_compound_head(head, 1, flags);
2252 			goto pte_unmap;
2253 		}
2254 
2255 		VM_BUG_ON_PAGE(compound_head(page) != head, page);
2256 
2257 		/*
2258 		 * We need to make the page accessible if and only if we are
2259 		 * going to access its content (the FOLL_PIN case).  Please
2260 		 * see Documentation/core-api/pin_user_pages.rst for
2261 		 * details.
2262 		 */
2263 		if (flags & FOLL_PIN) {
2264 			ret = arch_make_page_accessible(page);
2265 			if (ret) {
2266 				unpin_user_page(page);
2267 				goto pte_unmap;
2268 			}
2269 		}
2270 		SetPageReferenced(page);
2271 		pages[*nr] = page;
2272 		(*nr)++;
2273 
2274 	} while (ptep++, addr += PAGE_SIZE, addr != end);
2275 
2276 	ret = 1;
2277 
2278 pte_unmap:
2279 	if (pgmap)
2280 		put_dev_pagemap(pgmap);
2281 	pte_unmap(ptem);
2282 	return ret;
2283 }
2284 #else
2285 
2286 /*
2287  * If we can't determine whether or not a pte is special, then fail immediately
2288  * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
2289  * to be special.
2290  *
2291  * For a futex to be placed on a THP tail page, get_futex_key requires a
2292  * __get_user_pages_fast implementation that can pin pages. Thus it's still
2293  * useful to have gup_huge_pmd even if we can't operate on ptes.
2294  */
2295 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
2296 			 unsigned int flags, struct page **pages, int *nr)
2297 {
2298 	return 0;
2299 }
2300 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
2301 
2302 #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
2303 static int __gup_device_huge(unsigned long pfn, unsigned long addr,
2304 			     unsigned long end, unsigned int flags,
2305 			     struct page **pages, int *nr)
2306 {
2307 	int nr_start = *nr;
2308 	struct dev_pagemap *pgmap = NULL;
2309 
2310 	do {
2311 		struct page *page = pfn_to_page(pfn);
2312 
2313 		pgmap = get_dev_pagemap(pfn, pgmap);
2314 		if (unlikely(!pgmap)) {
2315 			undo_dev_pagemap(nr, nr_start, flags, pages);
2316 			return 0;
2317 		}
2318 		SetPageReferenced(page);
2319 		pages[*nr] = page;
2320 		if (unlikely(!try_grab_page(page, flags))) {
2321 			undo_dev_pagemap(nr, nr_start, flags, pages);
2322 			return 0;
2323 		}
2324 		(*nr)++;
2325 		pfn++;
2326 	} while (addr += PAGE_SIZE, addr != end);
2327 
2328 	if (pgmap)
2329 		put_dev_pagemap(pgmap);
2330 	return 1;
2331 }
2332 
2333 static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2334 				 unsigned long end, unsigned int flags,
2335 				 struct page **pages, int *nr)
2336 {
2337 	unsigned long fault_pfn;
2338 	int nr_start = *nr;
2339 
2340 	fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
2341 	if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
2342 		return 0;
2343 
2344 	if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
2345 		undo_dev_pagemap(nr, nr_start, flags, pages);
2346 		return 0;
2347 	}
2348 	return 1;
2349 }
2350 
2351 static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
2352 				 unsigned long end, unsigned int flags,
2353 				 struct page **pages, int *nr)
2354 {
2355 	unsigned long fault_pfn;
2356 	int nr_start = *nr;
2357 
2358 	fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
2359 	if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
2360 		return 0;
2361 
2362 	if (unlikely(pud_val(orig) != pud_val(*pudp))) {
2363 		undo_dev_pagemap(nr, nr_start, flags, pages);
2364 		return 0;
2365 	}
2366 	return 1;
2367 }
2368 #else
2369 static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2370 				 unsigned long end, unsigned int flags,
2371 				 struct page **pages, int *nr)
2372 {
2373 	BUILD_BUG();
2374 	return 0;
2375 }
2376 
2377 static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
2378 				 unsigned long end, unsigned int flags,
2379 				 struct page **pages, int *nr)
2380 {
2381 	BUILD_BUG();
2382 	return 0;
2383 }
2384 #endif
2385 
2386 static int record_subpages(struct page *page, unsigned long addr,
2387 			   unsigned long end, struct page **pages)
2388 {
2389 	int nr;
2390 
2391 	for (nr = 0; addr != end; addr += PAGE_SIZE)
2392 		pages[nr++] = page++;
2393 
2394 	return nr;
2395 }
2396 
2397 #ifdef CONFIG_ARCH_HAS_HUGEPD
2398 static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
2399 				      unsigned long sz)
2400 {
2401 	unsigned long __boundary = (addr + sz) & ~(sz-1);
2402 	return (__boundary - 1 < end - 1) ? __boundary : end;
2403 }
2404 
2405 static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
2406 		       unsigned long end, unsigned int flags,
2407 		       struct page **pages, int *nr)
2408 {
2409 	unsigned long pte_end;
2410 	struct page *head, *page;
2411 	pte_t pte;
2412 	int refs;
2413 
2414 	pte_end = (addr + sz) & ~(sz-1);
2415 	if (pte_end < end)
2416 		end = pte_end;
2417 
2418 	pte = READ_ONCE(*ptep);
2419 
2420 	if (!pte_access_permitted(pte, flags & FOLL_WRITE))
2421 		return 0;
2422 
2423 	/* hugepages are never "special" */
2424 	VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2425 
2426 	head = pte_page(pte);
2427 	page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
2428 	refs = record_subpages(page, addr, end, pages + *nr);
2429 
2430 	head = try_grab_compound_head(head, refs, flags);
2431 	if (!head)
2432 		return 0;
2433 
2434 	if (unlikely(pte_val(pte) != pte_val(*ptep))) {
2435 		put_compound_head(head, refs, flags);
2436 		return 0;
2437 	}
2438 
2439 	*nr += refs;
2440 	SetPageReferenced(head);
2441 	return 1;
2442 }
2443 
2444 static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
2445 		unsigned int pdshift, unsigned long end, unsigned int flags,
2446 		struct page **pages, int *nr)
2447 {
2448 	pte_t *ptep;
2449 	unsigned long sz = 1UL << hugepd_shift(hugepd);
2450 	unsigned long next;
2451 
2452 	ptep = hugepte_offset(hugepd, addr, pdshift);
2453 	do {
2454 		next = hugepte_addr_end(addr, end, sz);
2455 		if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr))
2456 			return 0;
2457 	} while (ptep++, addr = next, addr != end);
2458 
2459 	return 1;
2460 }
2461 #else
2462 static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
2463 		unsigned int pdshift, unsigned long end, unsigned int flags,
2464 		struct page **pages, int *nr)
2465 {
2466 	return 0;
2467 }
2468 #endif /* CONFIG_ARCH_HAS_HUGEPD */
2469 
2470 static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2471 			unsigned long end, unsigned int flags,
2472 			struct page **pages, int *nr)
2473 {
2474 	struct page *head, *page;
2475 	int refs;
2476 
2477 	if (!pmd_access_permitted(orig, flags & FOLL_WRITE))
2478 		return 0;
2479 
2480 	if (pmd_devmap(orig)) {
2481 		if (unlikely(flags & FOLL_LONGTERM))
2482 			return 0;
2483 		return __gup_device_huge_pmd(orig, pmdp, addr, end, flags,
2484 					     pages, nr);
2485 	}
2486 
2487 	page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
2488 	refs = record_subpages(page, addr, end, pages + *nr);
2489 
2490 	head = try_grab_compound_head(pmd_page(orig), refs, flags);
2491 	if (!head)
2492 		return 0;
2493 
2494 	if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
2495 		put_compound_head(head, refs, flags);
2496 		return 0;
2497 	}
2498 
2499 	*nr += refs;
2500 	SetPageReferenced(head);
2501 	return 1;
2502 }
2503 
2504 static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
2505 			unsigned long end, unsigned int flags,
2506 			struct page **pages, int *nr)
2507 {
2508 	struct page *head, *page;
2509 	int refs;
2510 
2511 	if (!pud_access_permitted(orig, flags & FOLL_WRITE))
2512 		return 0;
2513 
2514 	if (pud_devmap(orig)) {
2515 		if (unlikely(flags & FOLL_LONGTERM))
2516 			return 0;
2517 		return __gup_device_huge_pud(orig, pudp, addr, end, flags,
2518 					     pages, nr);
2519 	}
2520 
2521 	page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
2522 	refs = record_subpages(page, addr, end, pages + *nr);
2523 
2524 	head = try_grab_compound_head(pud_page(orig), refs, flags);
2525 	if (!head)
2526 		return 0;
2527 
2528 	if (unlikely(pud_val(orig) != pud_val(*pudp))) {
2529 		put_compound_head(head, refs, flags);
2530 		return 0;
2531 	}
2532 
2533 	*nr += refs;
2534 	SetPageReferenced(head);
2535 	return 1;
2536 }
2537 
2538 static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
2539 			unsigned long end, unsigned int flags,
2540 			struct page **pages, int *nr)
2541 {
2542 	int refs;
2543 	struct page *head, *page;
2544 
2545 	if (!pgd_access_permitted(orig, flags & FOLL_WRITE))
2546 		return 0;
2547 
2548 	BUILD_BUG_ON(pgd_devmap(orig));
2549 
2550 	page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
2551 	refs = record_subpages(page, addr, end, pages + *nr);
2552 
2553 	head = try_grab_compound_head(pgd_page(orig), refs, flags);
2554 	if (!head)
2555 		return 0;
2556 
2557 	if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
2558 		put_compound_head(head, refs, flags);
2559 		return 0;
2560 	}
2561 
2562 	*nr += refs;
2563 	SetPageReferenced(head);
2564 	return 1;
2565 }
2566 
2567 static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
2568 		unsigned int flags, struct page **pages, int *nr)
2569 {
2570 	unsigned long next;
2571 	pmd_t *pmdp;
2572 
2573 	pmdp = pmd_offset(&pud, addr);
2574 	do {
2575 		pmd_t pmd = READ_ONCE(*pmdp);
2576 
2577 		next = pmd_addr_end(addr, end);
2578 		if (!pmd_present(pmd))
2579 			return 0;
2580 
2581 		if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
2582 			     pmd_devmap(pmd))) {
2583 			/*
2584 			 * NUMA hinting faults need to be handled in the GUP
2585 			 * slowpath for accounting purposes and so that they
2586 			 * can be serialised against THP migration.
2587 			 */
2588 			if (pmd_protnone(pmd))
2589 				return 0;
2590 
2591 			if (!gup_huge_pmd(pmd, pmdp, addr, next, flags,
2592 				pages, nr))
2593 				return 0;
2594 
2595 		} else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
2596 			/*
2597 			 * architecture have different format for hugetlbfs
2598 			 * pmd format and THP pmd format
2599 			 */
2600 			if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
2601 					 PMD_SHIFT, next, flags, pages, nr))
2602 				return 0;
2603 		} else if (!gup_pte_range(pmd, addr, next, flags, pages, nr))
2604 			return 0;
2605 	} while (pmdp++, addr = next, addr != end);
2606 
2607 	return 1;
2608 }
2609 
2610 static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end,
2611 			 unsigned int flags, struct page **pages, int *nr)
2612 {
2613 	unsigned long next;
2614 	pud_t *pudp;
2615 
2616 	pudp = pud_offset(&p4d, addr);
2617 	do {
2618 		pud_t pud = READ_ONCE(*pudp);
2619 
2620 		next = pud_addr_end(addr, end);
2621 		if (unlikely(!pud_present(pud)))
2622 			return 0;
2623 		if (unlikely(pud_huge(pud))) {
2624 			if (!gup_huge_pud(pud, pudp, addr, next, flags,
2625 					  pages, nr))
2626 				return 0;
2627 		} else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
2628 			if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
2629 					 PUD_SHIFT, next, flags, pages, nr))
2630 				return 0;
2631 		} else if (!gup_pmd_range(pud, addr, next, flags, pages, nr))
2632 			return 0;
2633 	} while (pudp++, addr = next, addr != end);
2634 
2635 	return 1;
2636 }
2637 
2638 static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
2639 			 unsigned int flags, struct page **pages, int *nr)
2640 {
2641 	unsigned long next;
2642 	p4d_t *p4dp;
2643 
2644 	p4dp = p4d_offset(&pgd, addr);
2645 	do {
2646 		p4d_t p4d = READ_ONCE(*p4dp);
2647 
2648 		next = p4d_addr_end(addr, end);
2649 		if (p4d_none(p4d))
2650 			return 0;
2651 		BUILD_BUG_ON(p4d_huge(p4d));
2652 		if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
2653 			if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
2654 					 P4D_SHIFT, next, flags, pages, nr))
2655 				return 0;
2656 		} else if (!gup_pud_range(p4d, addr, next, flags, pages, nr))
2657 			return 0;
2658 	} while (p4dp++, addr = next, addr != end);
2659 
2660 	return 1;
2661 }
2662 
2663 static void gup_pgd_range(unsigned long addr, unsigned long end,
2664 		unsigned int flags, struct page **pages, int *nr)
2665 {
2666 	unsigned long next;
2667 	pgd_t *pgdp;
2668 
2669 	pgdp = pgd_offset(current->mm, addr);
2670 	do {
2671 		pgd_t pgd = READ_ONCE(*pgdp);
2672 
2673 		next = pgd_addr_end(addr, end);
2674 		if (pgd_none(pgd))
2675 			return;
2676 		if (unlikely(pgd_huge(pgd))) {
2677 			if (!gup_huge_pgd(pgd, pgdp, addr, next, flags,
2678 					  pages, nr))
2679 				return;
2680 		} else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
2681 			if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
2682 					 PGDIR_SHIFT, next, flags, pages, nr))
2683 				return;
2684 		} else if (!gup_p4d_range(pgd, addr, next, flags, pages, nr))
2685 			return;
2686 	} while (pgdp++, addr = next, addr != end);
2687 }
2688 #else
2689 static inline void gup_pgd_range(unsigned long addr, unsigned long end,
2690 		unsigned int flags, struct page **pages, int *nr)
2691 {
2692 }
2693 #endif /* CONFIG_HAVE_FAST_GUP */
2694 
2695 #ifndef gup_fast_permitted
2696 /*
2697  * Check if it's allowed to use __get_user_pages_fast() for the range, or
2698  * we need to fall back to the slow version:
2699  */
2700 static bool gup_fast_permitted(unsigned long start, unsigned long end)
2701 {
2702 	return true;
2703 }
2704 #endif
2705 
2706 /*
2707  * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
2708  * the regular GUP.
2709  * Note a difference with get_user_pages_fast: this always returns the
2710  * number of pages pinned, 0 if no pages were pinned.
2711  *
2712  * If the architecture does not support this function, simply return with no
2713  * pages pinned.
2714  *
2715  * Careful, careful! COW breaking can go either way, so a non-write
2716  * access can get ambiguous page results. If you call this function without
2717  * 'write' set, you'd better be sure that you're ok with that ambiguity.
2718  */
2719 int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
2720 			  struct page **pages)
2721 {
2722 	unsigned long len, end;
2723 	unsigned long flags;
2724 	int nr_pinned = 0;
2725 	/*
2726 	 * Internally (within mm/gup.c), gup fast variants must set FOLL_GET,
2727 	 * because gup fast is always a "pin with a +1 page refcount" request.
2728 	 */
2729 	unsigned int gup_flags = FOLL_GET;
2730 
2731 	if (write)
2732 		gup_flags |= FOLL_WRITE;
2733 
2734 	start = untagged_addr(start) & PAGE_MASK;
2735 	len = (unsigned long) nr_pages << PAGE_SHIFT;
2736 	end = start + len;
2737 
2738 	if (end <= start)
2739 		return 0;
2740 	if (unlikely(!access_ok((void __user *)start, len)))
2741 		return 0;
2742 
2743 	/*
2744 	 * Disable interrupts.  We use the nested form as we can already have
2745 	 * interrupts disabled by get_futex_key.
2746 	 *
2747 	 * With interrupts disabled, we block page table pages from being
2748 	 * freed from under us. See struct mmu_table_batch comments in
2749 	 * include/asm-generic/tlb.h for more details.
2750 	 *
2751 	 * We do not adopt an rcu_read_lock(.) here as we also want to
2752 	 * block IPIs that come from THPs splitting.
2753 	 *
2754 	 * NOTE! We allow read-only gup_fast() here, but you'd better be
2755 	 * careful about possible COW pages. You'll get _a_ COW page, but
2756 	 * not necessarily the one you intended to get depending on what
2757 	 * COW event happens after this. COW may break the page copy in a
2758 	 * random direction.
2759 	 */
2760 
2761 	if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) &&
2762 	    gup_fast_permitted(start, end)) {
2763 		local_irq_save(flags);
2764 		gup_pgd_range(start, end, gup_flags, pages, &nr_pinned);
2765 		local_irq_restore(flags);
2766 	}
2767 
2768 	return nr_pinned;
2769 }
2770 EXPORT_SYMBOL_GPL(__get_user_pages_fast);
2771 
2772 static int __gup_longterm_unlocked(unsigned long start, int nr_pages,
2773 				   unsigned int gup_flags, struct page **pages)
2774 {
2775 	int ret;
2776 
2777 	/*
2778 	 * FIXME: FOLL_LONGTERM does not work with
2779 	 * get_user_pages_unlocked() (see comments in that function)
2780 	 */
2781 	if (gup_flags & FOLL_LONGTERM) {
2782 		down_read(&current->mm->mmap_sem);
2783 		ret = __gup_longterm_locked(current, current->mm,
2784 					    start, nr_pages,
2785 					    pages, NULL, gup_flags);
2786 		up_read(&current->mm->mmap_sem);
2787 	} else {
2788 		ret = get_user_pages_unlocked(start, nr_pages,
2789 					      pages, gup_flags);
2790 	}
2791 
2792 	return ret;
2793 }
2794 
2795 static int internal_get_user_pages_fast(unsigned long start, int nr_pages,
2796 					unsigned int gup_flags,
2797 					struct page **pages)
2798 {
2799 	unsigned long addr, len, end;
2800 	int nr_pinned = 0, ret = 0;
2801 
2802 	if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
2803 				       FOLL_FORCE | FOLL_PIN | FOLL_GET)))
2804 		return -EINVAL;
2805 
2806 	start = untagged_addr(start) & PAGE_MASK;
2807 	addr = start;
2808 	len = (unsigned long) nr_pages << PAGE_SHIFT;
2809 	end = start + len;
2810 
2811 	if (end <= start)
2812 		return 0;
2813 	if (unlikely(!access_ok((void __user *)start, len)))
2814 		return -EFAULT;
2815 
2816 	/*
2817 	 * The FAST_GUP case requires FOLL_WRITE even for pure reads,
2818 	 * because get_user_pages() may need to cause an early COW in
2819 	 * order to avoid confusing the normal COW routines. So only
2820 	 * targets that are already writable are safe to do by just
2821 	 * looking at the page tables.
2822 	 */
2823 	if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) &&
2824 	    gup_fast_permitted(start, end)) {
2825 		local_irq_disable();
2826 		gup_pgd_range(addr, end, gup_flags | FOLL_WRITE, pages, &nr_pinned);
2827 		local_irq_enable();
2828 		ret = nr_pinned;
2829 	}
2830 
2831 	if (nr_pinned < nr_pages) {
2832 		/* Try to get the remaining pages with get_user_pages */
2833 		start += nr_pinned << PAGE_SHIFT;
2834 		pages += nr_pinned;
2835 
2836 		ret = __gup_longterm_unlocked(start, nr_pages - nr_pinned,
2837 					      gup_flags, pages);
2838 
2839 		/* Have to be a bit careful with return values */
2840 		if (nr_pinned > 0) {
2841 			if (ret < 0)
2842 				ret = nr_pinned;
2843 			else
2844 				ret += nr_pinned;
2845 		}
2846 	}
2847 
2848 	return ret;
2849 }
2850 
2851 /**
2852  * get_user_pages_fast() - pin user pages in memory
2853  * @start:      starting user address
2854  * @nr_pages:   number of pages from start to pin
2855  * @gup_flags:  flags modifying pin behaviour
2856  * @pages:      array that receives pointers to the pages pinned.
2857  *              Should be at least nr_pages long.
2858  *
2859  * Attempt to pin user pages in memory without taking mm->mmap_sem.
2860  * If not successful, it will fall back to taking the lock and
2861  * calling get_user_pages().
2862  *
2863  * Returns number of pages pinned. This may be fewer than the number requested.
2864  * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
2865  * -errno.
2866  */
2867 int get_user_pages_fast(unsigned long start, int nr_pages,
2868 			unsigned int gup_flags, struct page **pages)
2869 {
2870 	/*
2871 	 * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
2872 	 * never directly by the caller, so enforce that:
2873 	 */
2874 	if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
2875 		return -EINVAL;
2876 
2877 	/*
2878 	 * The caller may or may not have explicitly set FOLL_GET; either way is
2879 	 * OK. However, internally (within mm/gup.c), gup fast variants must set
2880 	 * FOLL_GET, because gup fast is always a "pin with a +1 page refcount"
2881 	 * request.
2882 	 */
2883 	gup_flags |= FOLL_GET;
2884 	return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
2885 }
2886 EXPORT_SYMBOL_GPL(get_user_pages_fast);
2887 
2888 /**
2889  * pin_user_pages_fast() - pin user pages in memory without taking locks
2890  *
2891  * @start:      starting user address
2892  * @nr_pages:   number of pages from start to pin
2893  * @gup_flags:  flags modifying pin behaviour
2894  * @pages:      array that receives pointers to the pages pinned.
2895  *              Should be at least nr_pages long.
2896  *
2897  * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See
2898  * get_user_pages_fast() for documentation on the function arguments, because
2899  * the arguments here are identical.
2900  *
2901  * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
2902  * see Documentation/core-api/pin_user_pages.rst for further details.
2903  *
2904  * This is intended for Case 1 (DIO) in Documentation/core-api/pin_user_pages.rst. It
2905  * is NOT intended for Case 2 (RDMA: long-term pins).
2906  */
2907 int pin_user_pages_fast(unsigned long start, int nr_pages,
2908 			unsigned int gup_flags, struct page **pages)
2909 {
2910 	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
2911 	if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2912 		return -EINVAL;
2913 
2914 	gup_flags |= FOLL_PIN;
2915 	return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
2916 }
2917 EXPORT_SYMBOL_GPL(pin_user_pages_fast);
2918 
2919 /**
2920  * pin_user_pages_remote() - pin pages of a remote process (task != current)
2921  *
2922  * @tsk:	the task_struct to use for page fault accounting, or
2923  *		NULL if faults are not to be recorded.
2924  * @mm:		mm_struct of target mm
2925  * @start:	starting user address
2926  * @nr_pages:	number of pages from start to pin
2927  * @gup_flags:	flags modifying lookup behaviour
2928  * @pages:	array that receives pointers to the pages pinned.
2929  *		Should be at least nr_pages long. Or NULL, if caller
2930  *		only intends to ensure the pages are faulted in.
2931  * @vmas:	array of pointers to vmas corresponding to each page.
2932  *		Or NULL if the caller does not require them.
2933  * @locked:	pointer to lock flag indicating whether lock is held and
2934  *		subsequently whether VM_FAULT_RETRY functionality can be
2935  *		utilised. Lock must initially be held.
2936  *
2937  * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See
2938  * get_user_pages_remote() for documentation on the function arguments, because
2939  * the arguments here are identical.
2940  *
2941  * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
2942  * see Documentation/core-api/pin_user_pages.rst for details.
2943  *
2944  * This is intended for Case 1 (DIO) in Documentation/core-api/pin_user_pages.rst. It
2945  * is NOT intended for Case 2 (RDMA: long-term pins).
2946  */
2947 long pin_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
2948 			   unsigned long start, unsigned long nr_pages,
2949 			   unsigned int gup_flags, struct page **pages,
2950 			   struct vm_area_struct **vmas, int *locked)
2951 {
2952 	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
2953 	if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2954 		return -EINVAL;
2955 
2956 	gup_flags |= FOLL_PIN;
2957 	return __get_user_pages_remote(tsk, mm, start, nr_pages, gup_flags,
2958 				       pages, vmas, locked);
2959 }
2960 EXPORT_SYMBOL(pin_user_pages_remote);
2961 
2962 /**
2963  * pin_user_pages() - pin user pages in memory for use by other devices
2964  *
2965  * @start:	starting user address
2966  * @nr_pages:	number of pages from start to pin
2967  * @gup_flags:	flags modifying lookup behaviour
2968  * @pages:	array that receives pointers to the pages pinned.
2969  *		Should be at least nr_pages long. Or NULL, if caller
2970  *		only intends to ensure the pages are faulted in.
2971  * @vmas:	array of pointers to vmas corresponding to each page.
2972  *		Or NULL if the caller does not require them.
2973  *
2974  * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and
2975  * FOLL_PIN is set.
2976  *
2977  * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
2978  * see Documentation/core-api/pin_user_pages.rst for details.
2979  *
2980  * This is intended for Case 1 (DIO) in Documentation/core-api/pin_user_pages.rst. It
2981  * is NOT intended for Case 2 (RDMA: long-term pins).
2982  */
2983 long pin_user_pages(unsigned long start, unsigned long nr_pages,
2984 		    unsigned int gup_flags, struct page **pages,
2985 		    struct vm_area_struct **vmas)
2986 {
2987 	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
2988 	if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2989 		return -EINVAL;
2990 
2991 	gup_flags |= FOLL_PIN;
2992 	return __gup_longterm_locked(current, current->mm, start, nr_pages,
2993 				     pages, vmas, gup_flags);
2994 }
2995 EXPORT_SYMBOL(pin_user_pages);
2996 
2997 /*
2998  * pin_user_pages_unlocked() is the FOLL_PIN variant of
2999  * get_user_pages_unlocked(). Behavior is the same, except that this one sets
3000  * FOLL_PIN and rejects FOLL_GET.
3001  */
3002 long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
3003 			     struct page **pages, unsigned int gup_flags)
3004 {
3005 	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
3006 	if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3007 		return -EINVAL;
3008 
3009 	gup_flags |= FOLL_PIN;
3010 	return get_user_pages_unlocked(start, nr_pages, pages, gup_flags);
3011 }
3012 EXPORT_SYMBOL(pin_user_pages_unlocked);
3013