1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/err.h>
5 #include <linux/spinlock.h>
6
7 #include <linux/mm.h>
8 #include <linux/memfd.h>
9 #include <linux/memremap.h>
10 #include <linux/pagemap.h>
11 #include <linux/rmap.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 #include <linux/secretmem.h>
15
16 #include <linux/sched/signal.h>
17 #include <linux/rwsem.h>
18 #include <linux/hugetlb.h>
19 #include <linux/migrate.h>
20 #include <linux/mm_inline.h>
21 #include <linux/pagevec.h>
22 #include <linux/sched/mm.h>
23 #include <linux/shmem_fs.h>
24
25 #include <asm/mmu_context.h>
26 #include <asm/tlbflush.h>
27
28 #include "internal.h"
29
30 struct follow_page_context {
31 struct dev_pagemap *pgmap;
32 unsigned int page_mask;
33 };
34
sanity_check_pinned_pages(struct page ** pages,unsigned long npages)35 static inline void sanity_check_pinned_pages(struct page **pages,
36 unsigned long npages)
37 {
38 if (!IS_ENABLED(CONFIG_DEBUG_VM))
39 return;
40
41 /*
42 * We only pin anonymous pages if they are exclusive. Once pinned, we
43 * can no longer turn them possibly shared and PageAnonExclusive() will
44 * stick around until the page is freed.
45 *
46 * We'd like to verify that our pinned anonymous pages are still mapped
47 * exclusively. The issue with anon THP is that we don't know how
48 * they are/were mapped when pinning them. However, for anon
49 * THP we can assume that either the given page (PTE-mapped THP) or
50 * the head page (PMD-mapped THP) should be PageAnonExclusive(). If
51 * neither is the case, there is certainly something wrong.
52 */
53 for (; npages; npages--, pages++) {
54 struct page *page = *pages;
55 struct folio *folio;
56
57 if (!page)
58 continue;
59
60 folio = page_folio(page);
61
62 if (is_zero_page(page) ||
63 !folio_test_anon(folio))
64 continue;
65 if (!folio_test_large(folio) || folio_test_hugetlb(folio))
66 VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page), page);
67 else
68 /* Either a PTE-mapped or a PMD-mapped THP. */
69 VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page) &&
70 !PageAnonExclusive(page), page);
71 }
72 }
73
74 /*
75 * Return the folio with ref appropriately incremented,
76 * or NULL if that failed.
77 */
try_get_folio(struct page * page,int refs)78 static inline struct folio *try_get_folio(struct page *page, int refs)
79 {
80 struct folio *folio;
81
82 retry:
83 folio = page_folio(page);
84 if (WARN_ON_ONCE(folio_ref_count(folio) < 0))
85 return NULL;
86 if (unlikely(!folio_ref_try_add(folio, refs)))
87 return NULL;
88
89 /*
90 * At this point we have a stable reference to the folio; but it
91 * could be that between calling page_folio() and the refcount
92 * increment, the folio was split, in which case we'd end up
93 * holding a reference on a folio that has nothing to do with the page
94 * we were given anymore.
95 * So now that the folio is stable, recheck that the page still
96 * belongs to this folio.
97 */
98 if (unlikely(page_folio(page) != folio)) {
99 if (!put_devmap_managed_folio_refs(folio, refs))
100 folio_put_refs(folio, refs);
101 goto retry;
102 }
103
104 return folio;
105 }
106
gup_put_folio(struct folio * folio,int refs,unsigned int flags)107 static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
108 {
109 if (flags & FOLL_PIN) {
110 if (is_zero_folio(folio))
111 return;
112 node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs);
113 if (folio_test_large(folio))
114 atomic_sub(refs, &folio->_pincount);
115 else
116 refs *= GUP_PIN_COUNTING_BIAS;
117 }
118
119 if (!put_devmap_managed_folio_refs(folio, refs))
120 folio_put_refs(folio, refs);
121 }
122
123 /**
124 * try_grab_folio() - add a folio's refcount by a flag-dependent amount
125 * @folio: pointer to folio to be grabbed
126 * @refs: the value to (effectively) add to the folio's refcount
127 * @flags: gup flags: these are the FOLL_* flag values
128 *
129 * This might not do anything at all, depending on the flags argument.
130 *
131 * "grab" names in this file mean, "look at flags to decide whether to use
132 * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
133 *
134 * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same
135 * time.
136 *
137 * Return: 0 for success, or if no action was required (if neither FOLL_PIN
138 * nor FOLL_GET was set, nothing is done). A negative error code for failure:
139 *
140 * -ENOMEM FOLL_GET or FOLL_PIN was set, but the folio could not
141 * be grabbed.
142 *
143 * It is called when we have a stable reference for the folio, typically in
144 * GUP slow path.
145 */
try_grab_folio(struct folio * folio,int refs,unsigned int flags)146 int __must_check try_grab_folio(struct folio *folio, int refs,
147 unsigned int flags)
148 {
149 if (WARN_ON_ONCE(folio_ref_count(folio) <= 0))
150 return -ENOMEM;
151
152 if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(&folio->page)))
153 return -EREMOTEIO;
154
155 if (flags & FOLL_GET)
156 folio_ref_add(folio, refs);
157 else if (flags & FOLL_PIN) {
158 /*
159 * Don't take a pin on the zero page - it's not going anywhere
160 * and it is used in a *lot* of places.
161 */
162 if (is_zero_folio(folio))
163 return 0;
164
165 /*
166 * Increment the normal page refcount field at least once,
167 * so that the page really is pinned.
168 */
169 if (folio_test_large(folio)) {
170 folio_ref_add(folio, refs);
171 atomic_add(refs, &folio->_pincount);
172 } else {
173 folio_ref_add(folio, refs * GUP_PIN_COUNTING_BIAS);
174 }
175
176 node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
177 }
178
179 return 0;
180 }
181
182 /**
183 * unpin_user_page() - release a dma-pinned page
184 * @page: pointer to page to be released
185 *
186 * Pages that were pinned via pin_user_pages*() must be released via either
187 * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
188 * that such pages can be separately tracked and uniquely handled. In
189 * particular, interactions with RDMA and filesystems need special handling.
190 */
unpin_user_page(struct page * page)191 void unpin_user_page(struct page *page)
192 {
193 sanity_check_pinned_pages(&page, 1);
194 gup_put_folio(page_folio(page), 1, FOLL_PIN);
195 }
196 EXPORT_SYMBOL(unpin_user_page);
197
198 /**
199 * unpin_folio() - release a dma-pinned folio
200 * @folio: pointer to folio to be released
201 *
202 * Folios that were pinned via memfd_pin_folios() or other similar routines
203 * must be released either using unpin_folio() or unpin_folios().
204 */
unpin_folio(struct folio * folio)205 void unpin_folio(struct folio *folio)
206 {
207 gup_put_folio(folio, 1, FOLL_PIN);
208 }
209 EXPORT_SYMBOL_GPL(unpin_folio);
210
211 /**
212 * folio_add_pin - Try to get an additional pin on a pinned folio
213 * @folio: The folio to be pinned
214 *
215 * Get an additional pin on a folio we already have a pin on. Makes no change
216 * if the folio is a zero_page.
217 */
folio_add_pin(struct folio * folio)218 void folio_add_pin(struct folio *folio)
219 {
220 if (is_zero_folio(folio))
221 return;
222
223 /*
224 * Similar to try_grab_folio(): be sure to *also* increment the normal
225 * page refcount field at least once, so that the page really is
226 * pinned.
227 */
228 if (folio_test_large(folio)) {
229 WARN_ON_ONCE(atomic_read(&folio->_pincount) < 1);
230 folio_ref_inc(folio);
231 atomic_inc(&folio->_pincount);
232 } else {
233 WARN_ON_ONCE(folio_ref_count(folio) < GUP_PIN_COUNTING_BIAS);
234 folio_ref_add(folio, GUP_PIN_COUNTING_BIAS);
235 }
236 }
237
gup_folio_range_next(struct page * start,unsigned long npages,unsigned long i,unsigned int * ntails)238 static inline struct folio *gup_folio_range_next(struct page *start,
239 unsigned long npages, unsigned long i, unsigned int *ntails)
240 {
241 struct page *next = nth_page(start, i);
242 struct folio *folio = page_folio(next);
243 unsigned int nr = 1;
244
245 if (folio_test_large(folio))
246 nr = min_t(unsigned int, npages - i,
247 folio_nr_pages(folio) - folio_page_idx(folio, next));
248
249 *ntails = nr;
250 return folio;
251 }
252
gup_folio_next(struct page ** list,unsigned long npages,unsigned long i,unsigned int * ntails)253 static inline struct folio *gup_folio_next(struct page **list,
254 unsigned long npages, unsigned long i, unsigned int *ntails)
255 {
256 struct folio *folio = page_folio(list[i]);
257 unsigned int nr;
258
259 for (nr = i + 1; nr < npages; nr++) {
260 if (page_folio(list[nr]) != folio)
261 break;
262 }
263
264 *ntails = nr - i;
265 return folio;
266 }
267
268 /**
269 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
270 * @pages: array of pages to be maybe marked dirty, and definitely released.
271 * @npages: number of pages in the @pages array.
272 * @make_dirty: whether to mark the pages dirty
273 *
274 * "gup-pinned page" refers to a page that has had one of the get_user_pages()
275 * variants called on that page.
276 *
277 * For each page in the @pages array, make that page (or its head page, if a
278 * compound page) dirty, if @make_dirty is true, and if the page was previously
279 * listed as clean. In any case, releases all pages using unpin_user_page(),
280 * possibly via unpin_user_pages(), for the non-dirty case.
281 *
282 * Please see the unpin_user_page() documentation for details.
283 *
284 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
285 * required, then the caller should a) verify that this is really correct,
286 * because _lock() is usually required, and b) hand code it:
287 * set_page_dirty_lock(), unpin_user_page().
288 *
289 */
unpin_user_pages_dirty_lock(struct page ** pages,unsigned long npages,bool make_dirty)290 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
291 bool make_dirty)
292 {
293 unsigned long i;
294 struct folio *folio;
295 unsigned int nr;
296
297 if (!make_dirty) {
298 unpin_user_pages(pages, npages);
299 return;
300 }
301
302 sanity_check_pinned_pages(pages, npages);
303 for (i = 0; i < npages; i += nr) {
304 folio = gup_folio_next(pages, npages, i, &nr);
305 /*
306 * Checking PageDirty at this point may race with
307 * clear_page_dirty_for_io(), but that's OK. Two key
308 * cases:
309 *
310 * 1) This code sees the page as already dirty, so it
311 * skips the call to set_page_dirty(). That could happen
312 * because clear_page_dirty_for_io() called
313 * folio_mkclean(), followed by set_page_dirty().
314 * However, now the page is going to get written back,
315 * which meets the original intention of setting it
316 * dirty, so all is well: clear_page_dirty_for_io() goes
317 * on to call TestClearPageDirty(), and write the page
318 * back.
319 *
320 * 2) This code sees the page as clean, so it calls
321 * set_page_dirty(). The page stays dirty, despite being
322 * written back, so it gets written back again in the
323 * next writeback cycle. This is harmless.
324 */
325 if (!folio_test_dirty(folio)) {
326 folio_lock(folio);
327 folio_mark_dirty(folio);
328 folio_unlock(folio);
329 }
330 gup_put_folio(folio, nr, FOLL_PIN);
331 }
332 }
333 EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
334
335 /**
336 * unpin_user_page_range_dirty_lock() - release and optionally dirty
337 * gup-pinned page range
338 *
339 * @page: the starting page of a range maybe marked dirty, and definitely released.
340 * @npages: number of consecutive pages to release.
341 * @make_dirty: whether to mark the pages dirty
342 *
343 * "gup-pinned page range" refers to a range of pages that has had one of the
344 * pin_user_pages() variants called on that page.
345 *
346 * For the page ranges defined by [page .. page+npages], make that range (or
347 * its head pages, if a compound page) dirty, if @make_dirty is true, and if the
348 * page range was previously listed as clean.
349 *
350 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
351 * required, then the caller should a) verify that this is really correct,
352 * because _lock() is usually required, and b) hand code it:
353 * set_page_dirty_lock(), unpin_user_page().
354 *
355 */
unpin_user_page_range_dirty_lock(struct page * page,unsigned long npages,bool make_dirty)356 void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
357 bool make_dirty)
358 {
359 unsigned long i;
360 struct folio *folio;
361 unsigned int nr;
362
363 for (i = 0; i < npages; i += nr) {
364 folio = gup_folio_range_next(page, npages, i, &nr);
365 if (make_dirty && !folio_test_dirty(folio)) {
366 folio_lock(folio);
367 folio_mark_dirty(folio);
368 folio_unlock(folio);
369 }
370 gup_put_folio(folio, nr, FOLL_PIN);
371 }
372 }
373 EXPORT_SYMBOL(unpin_user_page_range_dirty_lock);
374
gup_fast_unpin_user_pages(struct page ** pages,unsigned long npages)375 static void gup_fast_unpin_user_pages(struct page **pages, unsigned long npages)
376 {
377 unsigned long i;
378 struct folio *folio;
379 unsigned int nr;
380
381 /*
382 * Don't perform any sanity checks because we might have raced with
383 * fork() and some anonymous pages might now actually be shared --
384 * which is why we're unpinning after all.
385 */
386 for (i = 0; i < npages; i += nr) {
387 folio = gup_folio_next(pages, npages, i, &nr);
388 gup_put_folio(folio, nr, FOLL_PIN);
389 }
390 }
391
392 /**
393 * unpin_user_pages() - release an array of gup-pinned pages.
394 * @pages: array of pages to be marked dirty and released.
395 * @npages: number of pages in the @pages array.
396 *
397 * For each page in the @pages array, release the page using unpin_user_page().
398 *
399 * Please see the unpin_user_page() documentation for details.
400 */
unpin_user_pages(struct page ** pages,unsigned long npages)401 void unpin_user_pages(struct page **pages, unsigned long npages)
402 {
403 unsigned long i;
404 struct folio *folio;
405 unsigned int nr;
406
407 /*
408 * If this WARN_ON() fires, then the system *might* be leaking pages (by
409 * leaving them pinned), but probably not. More likely, gup/pup returned
410 * a hard -ERRNO error to the caller, who erroneously passed it here.
411 */
412 if (WARN_ON(IS_ERR_VALUE(npages)))
413 return;
414
415 sanity_check_pinned_pages(pages, npages);
416 for (i = 0; i < npages; i += nr) {
417 if (!pages[i]) {
418 nr = 1;
419 continue;
420 }
421 folio = gup_folio_next(pages, npages, i, &nr);
422 gup_put_folio(folio, nr, FOLL_PIN);
423 }
424 }
425 EXPORT_SYMBOL(unpin_user_pages);
426
427 /**
428 * unpin_user_folio() - release pages of a folio
429 * @folio: pointer to folio to be released
430 * @npages: number of pages of same folio
431 *
432 * Release npages of the folio
433 */
unpin_user_folio(struct folio * folio,unsigned long npages)434 void unpin_user_folio(struct folio *folio, unsigned long npages)
435 {
436 gup_put_folio(folio, npages, FOLL_PIN);
437 }
438 EXPORT_SYMBOL(unpin_user_folio);
439
440 /**
441 * unpin_folios() - release an array of gup-pinned folios.
442 * @folios: array of folios to be marked dirty and released.
443 * @nfolios: number of folios in the @folios array.
444 *
445 * For each folio in the @folios array, release the folio using gup_put_folio.
446 *
447 * Please see the unpin_folio() documentation for details.
448 */
unpin_folios(struct folio ** folios,unsigned long nfolios)449 void unpin_folios(struct folio **folios, unsigned long nfolios)
450 {
451 unsigned long i = 0, j;
452
453 /*
454 * If this WARN_ON() fires, then the system *might* be leaking folios
455 * (by leaving them pinned), but probably not. More likely, gup/pup
456 * returned a hard -ERRNO error to the caller, who erroneously passed
457 * it here.
458 */
459 if (WARN_ON(IS_ERR_VALUE(nfolios)))
460 return;
461
462 while (i < nfolios) {
463 for (j = i + 1; j < nfolios; j++)
464 if (folios[i] != folios[j])
465 break;
466
467 if (folios[i])
468 gup_put_folio(folios[i], j - i, FOLL_PIN);
469 i = j;
470 }
471 }
472 EXPORT_SYMBOL_GPL(unpin_folios);
473
474 /*
475 * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's
476 * lifecycle. Avoid setting the bit unless necessary, or it might cause write
477 * cache bouncing on large SMP machines for concurrent pinned gups.
478 */
mm_set_has_pinned_flag(unsigned long * mm_flags)479 static inline void mm_set_has_pinned_flag(unsigned long *mm_flags)
480 {
481 if (!test_bit(MMF_HAS_PINNED, mm_flags))
482 set_bit(MMF_HAS_PINNED, mm_flags);
483 }
484
485 #ifdef CONFIG_MMU
486
487 #ifdef CONFIG_HAVE_GUP_FAST
record_subpages(struct page * page,unsigned long sz,unsigned long addr,unsigned long end,struct page ** pages)488 static int record_subpages(struct page *page, unsigned long sz,
489 unsigned long addr, unsigned long end,
490 struct page **pages)
491 {
492 struct page *start_page;
493 int nr;
494
495 start_page = nth_page(page, (addr & (sz - 1)) >> PAGE_SHIFT);
496 for (nr = 0; addr != end; nr++, addr += PAGE_SIZE)
497 pages[nr] = nth_page(start_page, nr);
498
499 return nr;
500 }
501
502 /**
503 * try_grab_folio_fast() - Attempt to get or pin a folio in fast path.
504 * @page: pointer to page to be grabbed
505 * @refs: the value to (effectively) add to the folio's refcount
506 * @flags: gup flags: these are the FOLL_* flag values.
507 *
508 * "grab" names in this file mean, "look at flags to decide whether to use
509 * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
510 *
511 * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
512 * same time. (That's true throughout the get_user_pages*() and
513 * pin_user_pages*() APIs.) Cases:
514 *
515 * FOLL_GET: folio's refcount will be incremented by @refs.
516 *
517 * FOLL_PIN on large folios: folio's refcount will be incremented by
518 * @refs, and its pincount will be incremented by @refs.
519 *
520 * FOLL_PIN on single-page folios: folio's refcount will be incremented by
521 * @refs * GUP_PIN_COUNTING_BIAS.
522 *
523 * Return: The folio containing @page (with refcount appropriately
524 * incremented) for success, or NULL upon failure. If neither FOLL_GET
525 * nor FOLL_PIN was set, that's considered failure, and furthermore,
526 * a likely bug in the caller, so a warning is also emitted.
527 *
528 * It uses add ref unless zero to elevate the folio refcount and must be called
529 * in fast path only.
530 */
try_grab_folio_fast(struct page * page,int refs,unsigned int flags)531 static struct folio *try_grab_folio_fast(struct page *page, int refs,
532 unsigned int flags)
533 {
534 struct folio *folio;
535
536 /* Raise warn if it is not called in fast GUP */
537 VM_WARN_ON_ONCE(!irqs_disabled());
538
539 if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0))
540 return NULL;
541
542 if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)))
543 return NULL;
544
545 if (flags & FOLL_GET)
546 return try_get_folio(page, refs);
547
548 /* FOLL_PIN is set */
549
550 /*
551 * Don't take a pin on the zero page - it's not going anywhere
552 * and it is used in a *lot* of places.
553 */
554 if (is_zero_page(page))
555 return page_folio(page);
556
557 folio = try_get_folio(page, refs);
558 if (!folio)
559 return NULL;
560
561 /*
562 * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
563 * right zone, so fail and let the caller fall back to the slow
564 * path.
565 */
566 if (unlikely((flags & FOLL_LONGTERM) &&
567 !folio_is_longterm_pinnable(folio))) {
568 if (!put_devmap_managed_folio_refs(folio, refs))
569 folio_put_refs(folio, refs);
570 return NULL;
571 }
572
573 /*
574 * When pinning a large folio, use an exact count to track it.
575 *
576 * However, be sure to *also* increment the normal folio
577 * refcount field at least once, so that the folio really
578 * is pinned. That's why the refcount from the earlier
579 * try_get_folio() is left intact.
580 */
581 if (folio_test_large(folio))
582 atomic_add(refs, &folio->_pincount);
583 else
584 folio_ref_add(folio,
585 refs * (GUP_PIN_COUNTING_BIAS - 1));
586 /*
587 * Adjust the pincount before re-checking the PTE for changes.
588 * This is essentially a smp_mb() and is paired with a memory
589 * barrier in folio_try_share_anon_rmap_*().
590 */
591 smp_mb__after_atomic();
592
593 node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
594
595 return folio;
596 }
597 #endif /* CONFIG_HAVE_GUP_FAST */
598
no_page_table(struct vm_area_struct * vma,unsigned int flags,unsigned long address)599 static struct page *no_page_table(struct vm_area_struct *vma,
600 unsigned int flags, unsigned long address)
601 {
602 if (!(flags & FOLL_DUMP))
603 return NULL;
604
605 /*
606 * When core dumping, we don't want to allocate unnecessary pages or
607 * page tables. Return error instead of NULL to skip handle_mm_fault,
608 * then get_dump_page() will return NULL to leave a hole in the dump.
609 * But we can only make this optimization where a hole would surely
610 * be zero-filled if handle_mm_fault() actually did handle it.
611 */
612 if (is_vm_hugetlb_page(vma)) {
613 struct hstate *h = hstate_vma(vma);
614
615 if (!hugetlbfs_pagecache_present(h, vma, address))
616 return ERR_PTR(-EFAULT);
617 } else if ((vma_is_anonymous(vma) || !vma->vm_ops->fault)) {
618 return ERR_PTR(-EFAULT);
619 }
620
621 return NULL;
622 }
623
624 #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
follow_huge_pud(struct vm_area_struct * vma,unsigned long addr,pud_t * pudp,int flags,struct follow_page_context * ctx)625 static struct page *follow_huge_pud(struct vm_area_struct *vma,
626 unsigned long addr, pud_t *pudp,
627 int flags, struct follow_page_context *ctx)
628 {
629 struct mm_struct *mm = vma->vm_mm;
630 struct page *page;
631 pud_t pud = *pudp;
632 unsigned long pfn = pud_pfn(pud);
633 int ret;
634
635 assert_spin_locked(pud_lockptr(mm, pudp));
636
637 if ((flags & FOLL_WRITE) && !pud_write(pud))
638 return NULL;
639
640 if (!pud_present(pud))
641 return NULL;
642
643 pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
644
645 if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
646 pud_devmap(pud)) {
647 /*
648 * device mapped pages can only be returned if the caller
649 * will manage the page reference count.
650 *
651 * At least one of FOLL_GET | FOLL_PIN must be set, so
652 * assert that here:
653 */
654 if (!(flags & (FOLL_GET | FOLL_PIN)))
655 return ERR_PTR(-EEXIST);
656
657 if (flags & FOLL_TOUCH)
658 touch_pud(vma, addr, pudp, flags & FOLL_WRITE);
659
660 ctx->pgmap = get_dev_pagemap(pfn, ctx->pgmap);
661 if (!ctx->pgmap)
662 return ERR_PTR(-EFAULT);
663 }
664
665 page = pfn_to_page(pfn);
666
667 if (!pud_devmap(pud) && !pud_write(pud) &&
668 gup_must_unshare(vma, flags, page))
669 return ERR_PTR(-EMLINK);
670
671 ret = try_grab_folio(page_folio(page), 1, flags);
672 if (ret)
673 page = ERR_PTR(ret);
674 else
675 ctx->page_mask = HPAGE_PUD_NR - 1;
676
677 return page;
678 }
679
680 /* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */
can_follow_write_pmd(pmd_t pmd,struct page * page,struct vm_area_struct * vma,unsigned int flags)681 static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
682 struct vm_area_struct *vma,
683 unsigned int flags)
684 {
685 /* If the pmd is writable, we can write to the page. */
686 if (pmd_write(pmd))
687 return true;
688
689 /* Maybe FOLL_FORCE is set to override it? */
690 if (!(flags & FOLL_FORCE))
691 return false;
692
693 /* But FOLL_FORCE has no effect on shared mappings */
694 if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
695 return false;
696
697 /* ... or read-only private ones */
698 if (!(vma->vm_flags & VM_MAYWRITE))
699 return false;
700
701 /* ... or already writable ones that just need to take a write fault */
702 if (vma->vm_flags & VM_WRITE)
703 return false;
704
705 /*
706 * See can_change_pte_writable(): we broke COW and could map the page
707 * writable if we have an exclusive anonymous page ...
708 */
709 if (!page || !PageAnon(page) || !PageAnonExclusive(page))
710 return false;
711
712 /* ... and a write-fault isn't required for other reasons. */
713 if (pmd_needs_soft_dirty_wp(vma, pmd))
714 return false;
715 return !userfaultfd_huge_pmd_wp(vma, pmd);
716 }
717
follow_huge_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmd,unsigned int flags,struct follow_page_context * ctx)718 static struct page *follow_huge_pmd(struct vm_area_struct *vma,
719 unsigned long addr, pmd_t *pmd,
720 unsigned int flags,
721 struct follow_page_context *ctx)
722 {
723 struct mm_struct *mm = vma->vm_mm;
724 pmd_t pmdval = *pmd;
725 struct page *page;
726 int ret;
727
728 assert_spin_locked(pmd_lockptr(mm, pmd));
729
730 page = pmd_page(pmdval);
731 if ((flags & FOLL_WRITE) &&
732 !can_follow_write_pmd(pmdval, page, vma, flags))
733 return NULL;
734
735 /* Avoid dumping huge zero page */
736 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(pmdval))
737 return ERR_PTR(-EFAULT);
738
739 if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags))
740 return NULL;
741
742 if (!pmd_write(pmdval) && gup_must_unshare(vma, flags, page))
743 return ERR_PTR(-EMLINK);
744
745 VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
746 !PageAnonExclusive(page), page);
747
748 ret = try_grab_folio(page_folio(page), 1, flags);
749 if (ret)
750 return ERR_PTR(ret);
751
752 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
753 if (pmd_trans_huge(pmdval) && (flags & FOLL_TOUCH))
754 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
755 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
756
757 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
758 ctx->page_mask = HPAGE_PMD_NR - 1;
759
760 return page;
761 }
762
763 #else /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
follow_huge_pud(struct vm_area_struct * vma,unsigned long addr,pud_t * pudp,int flags,struct follow_page_context * ctx)764 static struct page *follow_huge_pud(struct vm_area_struct *vma,
765 unsigned long addr, pud_t *pudp,
766 int flags, struct follow_page_context *ctx)
767 {
768 return NULL;
769 }
770
follow_huge_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmd,unsigned int flags,struct follow_page_context * ctx)771 static struct page *follow_huge_pmd(struct vm_area_struct *vma,
772 unsigned long addr, pmd_t *pmd,
773 unsigned int flags,
774 struct follow_page_context *ctx)
775 {
776 return NULL;
777 }
778 #endif /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
779
follow_pfn_pte(struct vm_area_struct * vma,unsigned long address,pte_t * pte,unsigned int flags)780 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
781 pte_t *pte, unsigned int flags)
782 {
783 if (flags & FOLL_TOUCH) {
784 pte_t orig_entry = ptep_get(pte);
785 pte_t entry = orig_entry;
786
787 if (flags & FOLL_WRITE)
788 entry = pte_mkdirty(entry);
789 entry = pte_mkyoung(entry);
790
791 if (!pte_same(orig_entry, entry)) {
792 set_pte_at(vma->vm_mm, address, pte, entry);
793 update_mmu_cache(vma, address, pte);
794 }
795 }
796
797 /* Proper page table entry exists, but no corresponding struct page */
798 return -EEXIST;
799 }
800
801 /* FOLL_FORCE can write to even unwritable PTEs in COW mappings. */
can_follow_write_pte(pte_t pte,struct page * page,struct vm_area_struct * vma,unsigned int flags)802 static inline bool can_follow_write_pte(pte_t pte, struct page *page,
803 struct vm_area_struct *vma,
804 unsigned int flags)
805 {
806 /* If the pte is writable, we can write to the page. */
807 if (pte_write(pte))
808 return true;
809
810 /* Maybe FOLL_FORCE is set to override it? */
811 if (!(flags & FOLL_FORCE))
812 return false;
813
814 /* But FOLL_FORCE has no effect on shared mappings */
815 if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
816 return false;
817
818 /* ... or read-only private ones */
819 if (!(vma->vm_flags & VM_MAYWRITE))
820 return false;
821
822 /* ... or already writable ones that just need to take a write fault */
823 if (vma->vm_flags & VM_WRITE)
824 return false;
825
826 /*
827 * See can_change_pte_writable(): we broke COW and could map the page
828 * writable if we have an exclusive anonymous page ...
829 */
830 if (!page || !PageAnon(page) || !PageAnonExclusive(page))
831 return false;
832
833 /* ... and a write-fault isn't required for other reasons. */
834 if (pte_needs_soft_dirty_wp(vma, pte))
835 return false;
836 return !userfaultfd_pte_wp(vma, pte);
837 }
838
follow_page_pte(struct vm_area_struct * vma,unsigned long address,pmd_t * pmd,unsigned int flags,struct dev_pagemap ** pgmap)839 static struct page *follow_page_pte(struct vm_area_struct *vma,
840 unsigned long address, pmd_t *pmd, unsigned int flags,
841 struct dev_pagemap **pgmap)
842 {
843 struct mm_struct *mm = vma->vm_mm;
844 struct folio *folio;
845 struct page *page;
846 spinlock_t *ptl;
847 pte_t *ptep, pte;
848 int ret;
849
850 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
851 if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
852 (FOLL_PIN | FOLL_GET)))
853 return ERR_PTR(-EINVAL);
854
855 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
856 if (!ptep)
857 return no_page_table(vma, flags, address);
858 pte = ptep_get(ptep);
859 if (!pte_present(pte))
860 goto no_page;
861 if (pte_protnone(pte) && !gup_can_follow_protnone(vma, flags))
862 goto no_page;
863
864 page = vm_normal_page(vma, address, pte);
865
866 /*
867 * We only care about anon pages in can_follow_write_pte() and don't
868 * have to worry about pte_devmap() because they are never anon.
869 */
870 if ((flags & FOLL_WRITE) &&
871 !can_follow_write_pte(pte, page, vma, flags)) {
872 page = NULL;
873 goto out;
874 }
875
876 if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) {
877 /*
878 * Only return device mapping pages in the FOLL_GET or FOLL_PIN
879 * case since they are only valid while holding the pgmap
880 * reference.
881 */
882 *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
883 if (*pgmap)
884 page = pte_page(pte);
885 else
886 goto no_page;
887 } else if (unlikely(!page)) {
888 if (flags & FOLL_DUMP) {
889 /* Avoid special (like zero) pages in core dumps */
890 page = ERR_PTR(-EFAULT);
891 goto out;
892 }
893
894 if (is_zero_pfn(pte_pfn(pte))) {
895 page = pte_page(pte);
896 } else {
897 ret = follow_pfn_pte(vma, address, ptep, flags);
898 page = ERR_PTR(ret);
899 goto out;
900 }
901 }
902 folio = page_folio(page);
903
904 if (!pte_write(pte) && gup_must_unshare(vma, flags, page)) {
905 page = ERR_PTR(-EMLINK);
906 goto out;
907 }
908
909 VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
910 !PageAnonExclusive(page), page);
911
912 /* try_grab_folio() does nothing unless FOLL_GET or FOLL_PIN is set. */
913 ret = try_grab_folio(folio, 1, flags);
914 if (unlikely(ret)) {
915 page = ERR_PTR(ret);
916 goto out;
917 }
918
919 /*
920 * We need to make the page accessible if and only if we are going
921 * to access its content (the FOLL_PIN case). Please see
922 * Documentation/core-api/pin_user_pages.rst for details.
923 */
924 if (flags & FOLL_PIN) {
925 ret = arch_make_folio_accessible(folio);
926 if (ret) {
927 unpin_user_page(page);
928 page = ERR_PTR(ret);
929 goto out;
930 }
931 }
932 if (flags & FOLL_TOUCH) {
933 if ((flags & FOLL_WRITE) &&
934 !pte_dirty(pte) && !folio_test_dirty(folio))
935 folio_mark_dirty(folio);
936 /*
937 * pte_mkyoung() would be more correct here, but atomic care
938 * is needed to avoid losing the dirty bit: it is easier to use
939 * folio_mark_accessed().
940 */
941 folio_mark_accessed(folio);
942 }
943 out:
944 pte_unmap_unlock(ptep, ptl);
945 return page;
946 no_page:
947 pte_unmap_unlock(ptep, ptl);
948 if (!pte_none(pte))
949 return NULL;
950 return no_page_table(vma, flags, address);
951 }
952
follow_pmd_mask(struct vm_area_struct * vma,unsigned long address,pud_t * pudp,unsigned int flags,struct follow_page_context * ctx)953 static struct page *follow_pmd_mask(struct vm_area_struct *vma,
954 unsigned long address, pud_t *pudp,
955 unsigned int flags,
956 struct follow_page_context *ctx)
957 {
958 pmd_t *pmd, pmdval;
959 spinlock_t *ptl;
960 struct page *page;
961 struct mm_struct *mm = vma->vm_mm;
962
963 pmd = pmd_offset(pudp, address);
964 pmdval = pmdp_get_lockless(pmd);
965 if (pmd_none(pmdval))
966 return no_page_table(vma, flags, address);
967 if (!pmd_present(pmdval))
968 return no_page_table(vma, flags, address);
969 if (pmd_devmap(pmdval)) {
970 ptl = pmd_lock(mm, pmd);
971 page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
972 spin_unlock(ptl);
973 if (page)
974 return page;
975 return no_page_table(vma, flags, address);
976 }
977 if (likely(!pmd_leaf(pmdval)))
978 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
979
980 if (pmd_protnone(pmdval) && !gup_can_follow_protnone(vma, flags))
981 return no_page_table(vma, flags, address);
982
983 ptl = pmd_lock(mm, pmd);
984 pmdval = *pmd;
985 if (unlikely(!pmd_present(pmdval))) {
986 spin_unlock(ptl);
987 return no_page_table(vma, flags, address);
988 }
989 if (unlikely(!pmd_leaf(pmdval))) {
990 spin_unlock(ptl);
991 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
992 }
993 if (pmd_trans_huge(pmdval) && (flags & FOLL_SPLIT_PMD)) {
994 spin_unlock(ptl);
995 split_huge_pmd(vma, pmd, address);
996 /* If pmd was left empty, stuff a page table in there quickly */
997 return pte_alloc(mm, pmd) ? ERR_PTR(-ENOMEM) :
998 follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
999 }
1000 page = follow_huge_pmd(vma, address, pmd, flags, ctx);
1001 spin_unlock(ptl);
1002 return page;
1003 }
1004
follow_pud_mask(struct vm_area_struct * vma,unsigned long address,p4d_t * p4dp,unsigned int flags,struct follow_page_context * ctx)1005 static struct page *follow_pud_mask(struct vm_area_struct *vma,
1006 unsigned long address, p4d_t *p4dp,
1007 unsigned int flags,
1008 struct follow_page_context *ctx)
1009 {
1010 pud_t *pudp, pud;
1011 spinlock_t *ptl;
1012 struct page *page;
1013 struct mm_struct *mm = vma->vm_mm;
1014
1015 pudp = pud_offset(p4dp, address);
1016 pud = READ_ONCE(*pudp);
1017 if (!pud_present(pud))
1018 return no_page_table(vma, flags, address);
1019 if (pud_leaf(pud)) {
1020 ptl = pud_lock(mm, pudp);
1021 page = follow_huge_pud(vma, address, pudp, flags, ctx);
1022 spin_unlock(ptl);
1023 if (page)
1024 return page;
1025 return no_page_table(vma, flags, address);
1026 }
1027 if (unlikely(pud_bad(pud)))
1028 return no_page_table(vma, flags, address);
1029
1030 return follow_pmd_mask(vma, address, pudp, flags, ctx);
1031 }
1032
follow_p4d_mask(struct vm_area_struct * vma,unsigned long address,pgd_t * pgdp,unsigned int flags,struct follow_page_context * ctx)1033 static struct page *follow_p4d_mask(struct vm_area_struct *vma,
1034 unsigned long address, pgd_t *pgdp,
1035 unsigned int flags,
1036 struct follow_page_context *ctx)
1037 {
1038 p4d_t *p4dp, p4d;
1039
1040 p4dp = p4d_offset(pgdp, address);
1041 p4d = READ_ONCE(*p4dp);
1042 BUILD_BUG_ON(p4d_leaf(p4d));
1043
1044 if (!p4d_present(p4d) || p4d_bad(p4d))
1045 return no_page_table(vma, flags, address);
1046
1047 return follow_pud_mask(vma, address, p4dp, flags, ctx);
1048 }
1049
1050 /**
1051 * follow_page_mask - look up a page descriptor from a user-virtual address
1052 * @vma: vm_area_struct mapping @address
1053 * @address: virtual address to look up
1054 * @flags: flags modifying lookup behaviour
1055 * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
1056 * pointer to output page_mask
1057 *
1058 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
1059 *
1060 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
1061 * the device's dev_pagemap metadata to avoid repeating expensive lookups.
1062 *
1063 * When getting an anonymous page and the caller has to trigger unsharing
1064 * of a shared anonymous page first, -EMLINK is returned. The caller should
1065 * trigger a fault with FAULT_FLAG_UNSHARE set. Note that unsharing is only
1066 * relevant with FOLL_PIN and !FOLL_WRITE.
1067 *
1068 * On output, the @ctx->page_mask is set according to the size of the page.
1069 *
1070 * Return: the mapped (struct page *), %NULL if no mapping exists, or
1071 * an error pointer if there is a mapping to something not represented
1072 * by a page descriptor (see also vm_normal_page()).
1073 */
follow_page_mask(struct vm_area_struct * vma,unsigned long address,unsigned int flags,struct follow_page_context * ctx)1074 static struct page *follow_page_mask(struct vm_area_struct *vma,
1075 unsigned long address, unsigned int flags,
1076 struct follow_page_context *ctx)
1077 {
1078 pgd_t *pgd;
1079 struct mm_struct *mm = vma->vm_mm;
1080 struct page *page;
1081
1082 vma_pgtable_walk_begin(vma);
1083
1084 ctx->page_mask = 0;
1085 pgd = pgd_offset(mm, address);
1086
1087 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
1088 page = no_page_table(vma, flags, address);
1089 else
1090 page = follow_p4d_mask(vma, address, pgd, flags, ctx);
1091
1092 vma_pgtable_walk_end(vma);
1093
1094 return page;
1095 }
1096
get_gate_page(struct mm_struct * mm,unsigned long address,unsigned int gup_flags,struct vm_area_struct ** vma,struct page ** page)1097 static int get_gate_page(struct mm_struct *mm, unsigned long address,
1098 unsigned int gup_flags, struct vm_area_struct **vma,
1099 struct page **page)
1100 {
1101 pgd_t *pgd;
1102 p4d_t *p4d;
1103 pud_t *pud;
1104 pmd_t *pmd;
1105 pte_t *pte;
1106 pte_t entry;
1107 int ret = -EFAULT;
1108
1109 /* user gate pages are read-only */
1110 if (gup_flags & FOLL_WRITE)
1111 return -EFAULT;
1112 if (address > TASK_SIZE)
1113 pgd = pgd_offset_k(address);
1114 else
1115 pgd = pgd_offset_gate(mm, address);
1116 if (pgd_none(*pgd))
1117 return -EFAULT;
1118 p4d = p4d_offset(pgd, address);
1119 if (p4d_none(*p4d))
1120 return -EFAULT;
1121 pud = pud_offset(p4d, address);
1122 if (pud_none(*pud))
1123 return -EFAULT;
1124 pmd = pmd_offset(pud, address);
1125 if (!pmd_present(*pmd))
1126 return -EFAULT;
1127 pte = pte_offset_map(pmd, address);
1128 if (!pte)
1129 return -EFAULT;
1130 entry = ptep_get(pte);
1131 if (pte_none(entry))
1132 goto unmap;
1133 *vma = get_gate_vma(mm);
1134 if (!page)
1135 goto out;
1136 *page = vm_normal_page(*vma, address, entry);
1137 if (!*page) {
1138 if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(entry)))
1139 goto unmap;
1140 *page = pte_page(entry);
1141 }
1142 ret = try_grab_folio(page_folio(*page), 1, gup_flags);
1143 if (unlikely(ret))
1144 goto unmap;
1145 out:
1146 ret = 0;
1147 unmap:
1148 pte_unmap(pte);
1149 return ret;
1150 }
1151
1152 /*
1153 * mmap_lock must be held on entry. If @flags has FOLL_UNLOCKABLE but not
1154 * FOLL_NOWAIT, the mmap_lock may be released. If it is, *@locked will be set
1155 * to 0 and -EBUSY returned.
1156 */
faultin_page(struct vm_area_struct * vma,unsigned long address,unsigned int flags,bool unshare,int * locked)1157 static int faultin_page(struct vm_area_struct *vma,
1158 unsigned long address, unsigned int flags, bool unshare,
1159 int *locked)
1160 {
1161 unsigned int fault_flags = 0;
1162 vm_fault_t ret;
1163
1164 if (flags & FOLL_NOFAULT)
1165 return -EFAULT;
1166 if (flags & FOLL_WRITE)
1167 fault_flags |= FAULT_FLAG_WRITE;
1168 if (flags & FOLL_REMOTE)
1169 fault_flags |= FAULT_FLAG_REMOTE;
1170 if (flags & FOLL_UNLOCKABLE) {
1171 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
1172 /*
1173 * FAULT_FLAG_INTERRUPTIBLE is opt-in. GUP callers must set
1174 * FOLL_INTERRUPTIBLE to enable FAULT_FLAG_INTERRUPTIBLE.
1175 * That's because some callers may not be prepared to
1176 * handle early exits caused by non-fatal signals.
1177 */
1178 if (flags & FOLL_INTERRUPTIBLE)
1179 fault_flags |= FAULT_FLAG_INTERRUPTIBLE;
1180 }
1181 if (flags & FOLL_NOWAIT)
1182 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
1183 if (flags & FOLL_TRIED) {
1184 /*
1185 * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED
1186 * can co-exist
1187 */
1188 fault_flags |= FAULT_FLAG_TRIED;
1189 }
1190 if (unshare) {
1191 fault_flags |= FAULT_FLAG_UNSHARE;
1192 /* FAULT_FLAG_WRITE and FAULT_FLAG_UNSHARE are incompatible */
1193 VM_BUG_ON(fault_flags & FAULT_FLAG_WRITE);
1194 }
1195
1196 ret = handle_mm_fault(vma, address, fault_flags, NULL);
1197
1198 if (ret & VM_FAULT_COMPLETED) {
1199 /*
1200 * With FAULT_FLAG_RETRY_NOWAIT we'll never release the
1201 * mmap lock in the page fault handler. Sanity check this.
1202 */
1203 WARN_ON_ONCE(fault_flags & FAULT_FLAG_RETRY_NOWAIT);
1204 *locked = 0;
1205
1206 /*
1207 * We should do the same as VM_FAULT_RETRY, but let's not
1208 * return -EBUSY since that's not reflecting the reality of
1209 * what has happened - we've just fully completed a page
1210 * fault, with the mmap lock released. Use -EAGAIN to show
1211 * that we want to take the mmap lock _again_.
1212 */
1213 return -EAGAIN;
1214 }
1215
1216 if (ret & VM_FAULT_ERROR) {
1217 int err = vm_fault_to_errno(ret, flags);
1218
1219 if (err)
1220 return err;
1221 BUG();
1222 }
1223
1224 if (ret & VM_FAULT_RETRY) {
1225 if (!(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
1226 *locked = 0;
1227 return -EBUSY;
1228 }
1229
1230 return 0;
1231 }
1232
1233 /*
1234 * Writing to file-backed mappings which require folio dirty tracking using GUP
1235 * is a fundamentally broken operation, as kernel write access to GUP mappings
1236 * do not adhere to the semantics expected by a file system.
1237 *
1238 * Consider the following scenario:-
1239 *
1240 * 1. A folio is written to via GUP which write-faults the memory, notifying
1241 * the file system and dirtying the folio.
1242 * 2. Later, writeback is triggered, resulting in the folio being cleaned and
1243 * the PTE being marked read-only.
1244 * 3. The GUP caller writes to the folio, as it is mapped read/write via the
1245 * direct mapping.
1246 * 4. The GUP caller, now done with the page, unpins it and sets it dirty
1247 * (though it does not have to).
1248 *
1249 * This results in both data being written to a folio without writenotify, and
1250 * the folio being dirtied unexpectedly (if the caller decides to do so).
1251 */
writable_file_mapping_allowed(struct vm_area_struct * vma,unsigned long gup_flags)1252 static bool writable_file_mapping_allowed(struct vm_area_struct *vma,
1253 unsigned long gup_flags)
1254 {
1255 /*
1256 * If we aren't pinning then no problematic write can occur. A long term
1257 * pin is the most egregious case so this is the case we disallow.
1258 */
1259 if ((gup_flags & (FOLL_PIN | FOLL_LONGTERM)) !=
1260 (FOLL_PIN | FOLL_LONGTERM))
1261 return true;
1262
1263 /*
1264 * If the VMA does not require dirty tracking then no problematic write
1265 * can occur either.
1266 */
1267 return !vma_needs_dirty_tracking(vma);
1268 }
1269
check_vma_flags(struct vm_area_struct * vma,unsigned long gup_flags)1270 static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
1271 {
1272 vm_flags_t vm_flags = vma->vm_flags;
1273 int write = (gup_flags & FOLL_WRITE);
1274 int foreign = (gup_flags & FOLL_REMOTE);
1275 bool vma_anon = vma_is_anonymous(vma);
1276
1277 if (vm_flags & (VM_IO | VM_PFNMAP))
1278 return -EFAULT;
1279
1280 if ((gup_flags & FOLL_ANON) && !vma_anon)
1281 return -EFAULT;
1282
1283 if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma))
1284 return -EOPNOTSUPP;
1285
1286 if (vma_is_secretmem(vma))
1287 return -EFAULT;
1288
1289 if (write) {
1290 if (!vma_anon &&
1291 !writable_file_mapping_allowed(vma, gup_flags))
1292 return -EFAULT;
1293
1294 if (!(vm_flags & VM_WRITE) || (vm_flags & VM_SHADOW_STACK)) {
1295 if (!(gup_flags & FOLL_FORCE))
1296 return -EFAULT;
1297 /* hugetlb does not support FOLL_FORCE|FOLL_WRITE. */
1298 if (is_vm_hugetlb_page(vma))
1299 return -EFAULT;
1300 /*
1301 * We used to let the write,force case do COW in a
1302 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
1303 * set a breakpoint in a read-only mapping of an
1304 * executable, without corrupting the file (yet only
1305 * when that file had been opened for writing!).
1306 * Anon pages in shared mappings are surprising: now
1307 * just reject it.
1308 */
1309 if (!is_cow_mapping(vm_flags))
1310 return -EFAULT;
1311 }
1312 } else if (!(vm_flags & VM_READ)) {
1313 if (!(gup_flags & FOLL_FORCE))
1314 return -EFAULT;
1315 /*
1316 * Is there actually any vma we can reach here which does not
1317 * have VM_MAYREAD set?
1318 */
1319 if (!(vm_flags & VM_MAYREAD))
1320 return -EFAULT;
1321 }
1322 /*
1323 * gups are always data accesses, not instruction
1324 * fetches, so execute=false here
1325 */
1326 if (!arch_vma_access_permitted(vma, write, false, foreign))
1327 return -EFAULT;
1328 return 0;
1329 }
1330
1331 /*
1332 * This is "vma_lookup()", but with a warning if we would have
1333 * historically expanded the stack in the GUP code.
1334 */
gup_vma_lookup(struct mm_struct * mm,unsigned long addr)1335 static struct vm_area_struct *gup_vma_lookup(struct mm_struct *mm,
1336 unsigned long addr)
1337 {
1338 #ifdef CONFIG_STACK_GROWSUP
1339 return vma_lookup(mm, addr);
1340 #else
1341 static volatile unsigned long next_warn;
1342 struct vm_area_struct *vma;
1343 unsigned long now, next;
1344
1345 vma = find_vma(mm, addr);
1346 if (!vma || (addr >= vma->vm_start))
1347 return vma;
1348
1349 /* Only warn for half-way relevant accesses */
1350 if (!(vma->vm_flags & VM_GROWSDOWN))
1351 return NULL;
1352 if (vma->vm_start - addr > 65536)
1353 return NULL;
1354
1355 /* Let's not warn more than once an hour.. */
1356 now = jiffies; next = next_warn;
1357 if (next && time_before(now, next))
1358 return NULL;
1359 next_warn = now + 60*60*HZ;
1360
1361 /* Let people know things may have changed. */
1362 pr_warn("GUP no longer grows the stack in %s (%d): %lx-%lx (%lx)\n",
1363 current->comm, task_pid_nr(current),
1364 vma->vm_start, vma->vm_end, addr);
1365 dump_stack();
1366 return NULL;
1367 #endif
1368 }
1369
1370 /**
1371 * __get_user_pages() - pin user pages in memory
1372 * @mm: mm_struct of target mm
1373 * @start: starting user address
1374 * @nr_pages: number of pages from start to pin
1375 * @gup_flags: flags modifying pin behaviour
1376 * @pages: array that receives pointers to the pages pinned.
1377 * Should be at least nr_pages long. Or NULL, if caller
1378 * only intends to ensure the pages are faulted in.
1379 * @locked: whether we're still with the mmap_lock held
1380 *
1381 * Returns either number of pages pinned (which may be less than the
1382 * number requested), or an error. Details about the return value:
1383 *
1384 * -- If nr_pages is 0, returns 0.
1385 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1386 * -- If nr_pages is >0, and some pages were pinned, returns the number of
1387 * pages pinned. Again, this may be less than nr_pages.
1388 * -- 0 return value is possible when the fault would need to be retried.
1389 *
1390 * The caller is responsible for releasing returned @pages, via put_page().
1391 *
1392 * Must be called with mmap_lock held. It may be released. See below.
1393 *
1394 * __get_user_pages walks a process's page tables and takes a reference to
1395 * each struct page that each user address corresponds to at a given
1396 * instant. That is, it takes the page that would be accessed if a user
1397 * thread accesses the given user virtual address at that instant.
1398 *
1399 * This does not guarantee that the page exists in the user mappings when
1400 * __get_user_pages returns, and there may even be a completely different
1401 * page there in some cases (eg. if mmapped pagecache has been invalidated
1402 * and subsequently re-faulted). However it does guarantee that the page
1403 * won't be freed completely. And mostly callers simply care that the page
1404 * contains data that was valid *at some point in time*. Typically, an IO
1405 * or similar operation cannot guarantee anything stronger anyway because
1406 * locks can't be held over the syscall boundary.
1407 *
1408 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
1409 * the page is written to, set_page_dirty (or set_page_dirty_lock, as
1410 * appropriate) must be called after the page is finished with, and
1411 * before put_page is called.
1412 *
1413 * If FOLL_UNLOCKABLE is set without FOLL_NOWAIT then the mmap_lock may
1414 * be released. If this happens *@locked will be set to 0 on return.
1415 *
1416 * A caller using such a combination of @gup_flags must therefore hold the
1417 * mmap_lock for reading only, and recognize when it's been released. Otherwise,
1418 * it must be held for either reading or writing and will not be released.
1419 *
1420 * In most cases, get_user_pages or get_user_pages_fast should be used
1421 * instead of __get_user_pages. __get_user_pages should be used only if
1422 * you need some special @gup_flags.
1423 */
__get_user_pages(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,int * locked)1424 static long __get_user_pages(struct mm_struct *mm,
1425 unsigned long start, unsigned long nr_pages,
1426 unsigned int gup_flags, struct page **pages,
1427 int *locked)
1428 {
1429 long ret = 0, i = 0;
1430 struct vm_area_struct *vma = NULL;
1431 struct follow_page_context ctx = { NULL };
1432
1433 if (!nr_pages)
1434 return 0;
1435
1436 start = untagged_addr_remote(mm, start);
1437
1438 VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
1439
1440 do {
1441 struct page *page;
1442 unsigned int page_increm;
1443
1444 /* first iteration or cross vma bound */
1445 if (!vma || start >= vma->vm_end) {
1446 /*
1447 * MADV_POPULATE_(READ|WRITE) wants to handle VMA
1448 * lookups+error reporting differently.
1449 */
1450 if (gup_flags & FOLL_MADV_POPULATE) {
1451 vma = vma_lookup(mm, start);
1452 if (!vma) {
1453 ret = -ENOMEM;
1454 goto out;
1455 }
1456 if (check_vma_flags(vma, gup_flags)) {
1457 ret = -EINVAL;
1458 goto out;
1459 }
1460 goto retry;
1461 }
1462 vma = gup_vma_lookup(mm, start);
1463 if (!vma && in_gate_area(mm, start)) {
1464 ret = get_gate_page(mm, start & PAGE_MASK,
1465 gup_flags, &vma,
1466 pages ? &page : NULL);
1467 if (ret)
1468 goto out;
1469 ctx.page_mask = 0;
1470 goto next_page;
1471 }
1472
1473 if (!vma) {
1474 ret = -EFAULT;
1475 goto out;
1476 }
1477 ret = check_vma_flags(vma, gup_flags);
1478 if (ret)
1479 goto out;
1480 }
1481 retry:
1482 /*
1483 * If we have a pending SIGKILL, don't keep faulting pages and
1484 * potentially allocating memory.
1485 */
1486 if (fatal_signal_pending(current)) {
1487 ret = -EINTR;
1488 goto out;
1489 }
1490 cond_resched();
1491
1492 page = follow_page_mask(vma, start, gup_flags, &ctx);
1493 if (!page || PTR_ERR(page) == -EMLINK) {
1494 ret = faultin_page(vma, start, gup_flags,
1495 PTR_ERR(page) == -EMLINK, locked);
1496 switch (ret) {
1497 case 0:
1498 goto retry;
1499 case -EBUSY:
1500 case -EAGAIN:
1501 ret = 0;
1502 fallthrough;
1503 case -EFAULT:
1504 case -ENOMEM:
1505 case -EHWPOISON:
1506 goto out;
1507 }
1508 BUG();
1509 } else if (PTR_ERR(page) == -EEXIST) {
1510 /*
1511 * Proper page table entry exists, but no corresponding
1512 * struct page. If the caller expects **pages to be
1513 * filled in, bail out now, because that can't be done
1514 * for this page.
1515 */
1516 if (pages) {
1517 ret = PTR_ERR(page);
1518 goto out;
1519 }
1520 } else if (IS_ERR(page)) {
1521 ret = PTR_ERR(page);
1522 goto out;
1523 }
1524 next_page:
1525 page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
1526 if (page_increm > nr_pages)
1527 page_increm = nr_pages;
1528
1529 if (pages) {
1530 struct page *subpage;
1531 unsigned int j;
1532
1533 /*
1534 * This must be a large folio (and doesn't need to
1535 * be the whole folio; it can be part of it), do
1536 * the refcount work for all the subpages too.
1537 *
1538 * NOTE: here the page may not be the head page
1539 * e.g. when start addr is not thp-size aligned.
1540 * try_grab_folio() should have taken care of tail
1541 * pages.
1542 */
1543 if (page_increm > 1) {
1544 struct folio *folio = page_folio(page);
1545
1546 /*
1547 * Since we already hold refcount on the
1548 * large folio, this should never fail.
1549 */
1550 if (try_grab_folio(folio, page_increm - 1,
1551 gup_flags)) {
1552 /*
1553 * Release the 1st page ref if the
1554 * folio is problematic, fail hard.
1555 */
1556 gup_put_folio(folio, 1, gup_flags);
1557 ret = -EFAULT;
1558 goto out;
1559 }
1560 }
1561
1562 for (j = 0; j < page_increm; j++) {
1563 subpage = nth_page(page, j);
1564 pages[i + j] = subpage;
1565 flush_anon_page(vma, subpage, start + j * PAGE_SIZE);
1566 flush_dcache_page(subpage);
1567 }
1568 }
1569
1570 i += page_increm;
1571 start += page_increm * PAGE_SIZE;
1572 nr_pages -= page_increm;
1573 } while (nr_pages);
1574 out:
1575 if (ctx.pgmap)
1576 put_dev_pagemap(ctx.pgmap);
1577 return i ? i : ret;
1578 }
1579
vma_permits_fault(struct vm_area_struct * vma,unsigned int fault_flags)1580 static bool vma_permits_fault(struct vm_area_struct *vma,
1581 unsigned int fault_flags)
1582 {
1583 bool write = !!(fault_flags & FAULT_FLAG_WRITE);
1584 bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
1585 vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
1586
1587 if (!(vm_flags & vma->vm_flags))
1588 return false;
1589
1590 /*
1591 * The architecture might have a hardware protection
1592 * mechanism other than read/write that can deny access.
1593 *
1594 * gup always represents data access, not instruction
1595 * fetches, so execute=false here:
1596 */
1597 if (!arch_vma_access_permitted(vma, write, false, foreign))
1598 return false;
1599
1600 return true;
1601 }
1602
1603 /**
1604 * fixup_user_fault() - manually resolve a user page fault
1605 * @mm: mm_struct of target mm
1606 * @address: user address
1607 * @fault_flags:flags to pass down to handle_mm_fault()
1608 * @unlocked: did we unlock the mmap_lock while retrying, maybe NULL if caller
1609 * does not allow retry. If NULL, the caller must guarantee
1610 * that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY.
1611 *
1612 * This is meant to be called in the specific scenario where for locking reasons
1613 * we try to access user memory in atomic context (within a pagefault_disable()
1614 * section), this returns -EFAULT, and we want to resolve the user fault before
1615 * trying again.
1616 *
1617 * Typically this is meant to be used by the futex code.
1618 *
1619 * The main difference with get_user_pages() is that this function will
1620 * unconditionally call handle_mm_fault() which will in turn perform all the
1621 * necessary SW fixup of the dirty and young bits in the PTE, while
1622 * get_user_pages() only guarantees to update these in the struct page.
1623 *
1624 * This is important for some architectures where those bits also gate the
1625 * access permission to the page because they are maintained in software. On
1626 * such architectures, gup() will not be enough to make a subsequent access
1627 * succeed.
1628 *
1629 * This function will not return with an unlocked mmap_lock. So it has not the
1630 * same semantics wrt the @mm->mmap_lock as does filemap_fault().
1631 */
fixup_user_fault(struct mm_struct * mm,unsigned long address,unsigned int fault_flags,bool * unlocked)1632 int fixup_user_fault(struct mm_struct *mm,
1633 unsigned long address, unsigned int fault_flags,
1634 bool *unlocked)
1635 {
1636 struct vm_area_struct *vma;
1637 vm_fault_t ret;
1638
1639 address = untagged_addr_remote(mm, address);
1640
1641 if (unlocked)
1642 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
1643
1644 retry:
1645 vma = gup_vma_lookup(mm, address);
1646 if (!vma)
1647 return -EFAULT;
1648
1649 if (!vma_permits_fault(vma, fault_flags))
1650 return -EFAULT;
1651
1652 if ((fault_flags & FAULT_FLAG_KILLABLE) &&
1653 fatal_signal_pending(current))
1654 return -EINTR;
1655
1656 ret = handle_mm_fault(vma, address, fault_flags, NULL);
1657
1658 if (ret & VM_FAULT_COMPLETED) {
1659 /*
1660 * NOTE: it's a pity that we need to retake the lock here
1661 * to pair with the unlock() in the callers. Ideally we
1662 * could tell the callers so they do not need to unlock.
1663 */
1664 mmap_read_lock(mm);
1665 *unlocked = true;
1666 return 0;
1667 }
1668
1669 if (ret & VM_FAULT_ERROR) {
1670 int err = vm_fault_to_errno(ret, 0);
1671
1672 if (err)
1673 return err;
1674 BUG();
1675 }
1676
1677 if (ret & VM_FAULT_RETRY) {
1678 mmap_read_lock(mm);
1679 *unlocked = true;
1680 fault_flags |= FAULT_FLAG_TRIED;
1681 goto retry;
1682 }
1683
1684 return 0;
1685 }
1686 EXPORT_SYMBOL_GPL(fixup_user_fault);
1687
1688 /*
1689 * GUP always responds to fatal signals. When FOLL_INTERRUPTIBLE is
1690 * specified, it'll also respond to generic signals. The caller of GUP
1691 * that has FOLL_INTERRUPTIBLE should take care of the GUP interruption.
1692 */
gup_signal_pending(unsigned int flags)1693 static bool gup_signal_pending(unsigned int flags)
1694 {
1695 if (fatal_signal_pending(current))
1696 return true;
1697
1698 if (!(flags & FOLL_INTERRUPTIBLE))
1699 return false;
1700
1701 return signal_pending(current);
1702 }
1703
1704 /*
1705 * Locking: (*locked == 1) means that the mmap_lock has already been acquired by
1706 * the caller. This function may drop the mmap_lock. If it does so, then it will
1707 * set (*locked = 0).
1708 *
1709 * (*locked == 0) means that the caller expects this function to acquire and
1710 * drop the mmap_lock. Therefore, the value of *locked will still be zero when
1711 * the function returns, even though it may have changed temporarily during
1712 * function execution.
1713 *
1714 * Please note that this function, unlike __get_user_pages(), will not return 0
1715 * for nr_pages > 0, unless FOLL_NOWAIT is used.
1716 */
__get_user_pages_locked(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,struct page ** pages,int * locked,unsigned int flags)1717 static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
1718 unsigned long start,
1719 unsigned long nr_pages,
1720 struct page **pages,
1721 int *locked,
1722 unsigned int flags)
1723 {
1724 long ret, pages_done;
1725 bool must_unlock = false;
1726
1727 if (!nr_pages)
1728 return 0;
1729
1730 /*
1731 * The internal caller expects GUP to manage the lock internally and the
1732 * lock must be released when this returns.
1733 */
1734 if (!*locked) {
1735 if (mmap_read_lock_killable(mm))
1736 return -EAGAIN;
1737 must_unlock = true;
1738 *locked = 1;
1739 }
1740 else
1741 mmap_assert_locked(mm);
1742
1743 if (flags & FOLL_PIN)
1744 mm_set_has_pinned_flag(&mm->flags);
1745
1746 /*
1747 * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
1748 * is to set FOLL_GET if the caller wants pages[] filled in (but has
1749 * carelessly failed to specify FOLL_GET), so keep doing that, but only
1750 * for FOLL_GET, not for the newer FOLL_PIN.
1751 *
1752 * FOLL_PIN always expects pages to be non-null, but no need to assert
1753 * that here, as any failures will be obvious enough.
1754 */
1755 if (pages && !(flags & FOLL_PIN))
1756 flags |= FOLL_GET;
1757
1758 pages_done = 0;
1759 for (;;) {
1760 ret = __get_user_pages(mm, start, nr_pages, flags, pages,
1761 locked);
1762 if (!(flags & FOLL_UNLOCKABLE)) {
1763 /* VM_FAULT_RETRY couldn't trigger, bypass */
1764 pages_done = ret;
1765 break;
1766 }
1767
1768 /* VM_FAULT_RETRY or VM_FAULT_COMPLETED cannot return errors */
1769 if (!*locked) {
1770 BUG_ON(ret < 0);
1771 BUG_ON(ret >= nr_pages);
1772 }
1773
1774 if (ret > 0) {
1775 nr_pages -= ret;
1776 pages_done += ret;
1777 if (!nr_pages)
1778 break;
1779 }
1780 if (*locked) {
1781 /*
1782 * VM_FAULT_RETRY didn't trigger or it was a
1783 * FOLL_NOWAIT.
1784 */
1785 if (!pages_done)
1786 pages_done = ret;
1787 break;
1788 }
1789 /*
1790 * VM_FAULT_RETRY triggered, so seek to the faulting offset.
1791 * For the prefault case (!pages) we only update counts.
1792 */
1793 if (likely(pages))
1794 pages += ret;
1795 start += ret << PAGE_SHIFT;
1796
1797 /* The lock was temporarily dropped, so we must unlock later */
1798 must_unlock = true;
1799
1800 retry:
1801 /*
1802 * Repeat on the address that fired VM_FAULT_RETRY
1803 * with both FAULT_FLAG_ALLOW_RETRY and
1804 * FAULT_FLAG_TRIED. Note that GUP can be interrupted
1805 * by fatal signals of even common signals, depending on
1806 * the caller's request. So we need to check it before we
1807 * start trying again otherwise it can loop forever.
1808 */
1809 if (gup_signal_pending(flags)) {
1810 if (!pages_done)
1811 pages_done = -EINTR;
1812 break;
1813 }
1814
1815 ret = mmap_read_lock_killable(mm);
1816 if (ret) {
1817 BUG_ON(ret > 0);
1818 if (!pages_done)
1819 pages_done = ret;
1820 break;
1821 }
1822
1823 *locked = 1;
1824 ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED,
1825 pages, locked);
1826 if (!*locked) {
1827 /* Continue to retry until we succeeded */
1828 BUG_ON(ret != 0);
1829 goto retry;
1830 }
1831 if (ret != 1) {
1832 BUG_ON(ret > 1);
1833 if (!pages_done)
1834 pages_done = ret;
1835 break;
1836 }
1837 nr_pages--;
1838 pages_done++;
1839 if (!nr_pages)
1840 break;
1841 if (likely(pages))
1842 pages++;
1843 start += PAGE_SIZE;
1844 }
1845 if (must_unlock && *locked) {
1846 /*
1847 * We either temporarily dropped the lock, or the caller
1848 * requested that we both acquire and drop the lock. Either way,
1849 * we must now unlock, and notify the caller of that state.
1850 */
1851 mmap_read_unlock(mm);
1852 *locked = 0;
1853 }
1854
1855 /*
1856 * Failing to pin anything implies something has gone wrong (except when
1857 * FOLL_NOWAIT is specified).
1858 */
1859 if (WARN_ON_ONCE(pages_done == 0 && !(flags & FOLL_NOWAIT)))
1860 return -EFAULT;
1861
1862 return pages_done;
1863 }
1864
1865 /**
1866 * populate_vma_page_range() - populate a range of pages in the vma.
1867 * @vma: target vma
1868 * @start: start address
1869 * @end: end address
1870 * @locked: whether the mmap_lock is still held
1871 *
1872 * This takes care of mlocking the pages too if VM_LOCKED is set.
1873 *
1874 * Return either number of pages pinned in the vma, or a negative error
1875 * code on error.
1876 *
1877 * vma->vm_mm->mmap_lock must be held.
1878 *
1879 * If @locked is NULL, it may be held for read or write and will
1880 * be unperturbed.
1881 *
1882 * If @locked is non-NULL, it must held for read only and may be
1883 * released. If it's released, *@locked will be set to 0.
1884 */
populate_vma_page_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,int * locked)1885 long populate_vma_page_range(struct vm_area_struct *vma,
1886 unsigned long start, unsigned long end, int *locked)
1887 {
1888 struct mm_struct *mm = vma->vm_mm;
1889 unsigned long nr_pages = (end - start) / PAGE_SIZE;
1890 int local_locked = 1;
1891 int gup_flags;
1892 long ret;
1893
1894 VM_BUG_ON(!PAGE_ALIGNED(start));
1895 VM_BUG_ON(!PAGE_ALIGNED(end));
1896 VM_BUG_ON_VMA(start < vma->vm_start, vma);
1897 VM_BUG_ON_VMA(end > vma->vm_end, vma);
1898 mmap_assert_locked(mm);
1899
1900 /*
1901 * Rightly or wrongly, the VM_LOCKONFAULT case has never used
1902 * faultin_page() to break COW, so it has no work to do here.
1903 */
1904 if (vma->vm_flags & VM_LOCKONFAULT)
1905 return nr_pages;
1906
1907 /* ... similarly, we've never faulted in PROT_NONE pages */
1908 if (!vma_is_accessible(vma))
1909 return -EFAULT;
1910
1911 gup_flags = FOLL_TOUCH;
1912 /*
1913 * We want to touch writable mappings with a write fault in order
1914 * to break COW, except for shared mappings because these don't COW
1915 * and we would not want to dirty them for nothing.
1916 *
1917 * Otherwise, do a read fault, and use FOLL_FORCE in case it's not
1918 * readable (ie write-only or executable).
1919 */
1920 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
1921 gup_flags |= FOLL_WRITE;
1922 else
1923 gup_flags |= FOLL_FORCE;
1924
1925 if (locked)
1926 gup_flags |= FOLL_UNLOCKABLE;
1927
1928 /*
1929 * We made sure addr is within a VMA, so the following will
1930 * not result in a stack expansion that recurses back here.
1931 */
1932 ret = __get_user_pages(mm, start, nr_pages, gup_flags,
1933 NULL, locked ? locked : &local_locked);
1934 lru_add_drain();
1935 return ret;
1936 }
1937
1938 /*
1939 * faultin_page_range() - populate (prefault) page tables inside the
1940 * given range readable/writable
1941 *
1942 * This takes care of mlocking the pages, too, if VM_LOCKED is set.
1943 *
1944 * @mm: the mm to populate page tables in
1945 * @start: start address
1946 * @end: end address
1947 * @write: whether to prefault readable or writable
1948 * @locked: whether the mmap_lock is still held
1949 *
1950 * Returns either number of processed pages in the MM, or a negative error
1951 * code on error (see __get_user_pages()). Note that this function reports
1952 * errors related to VMAs, such as incompatible mappings, as expected by
1953 * MADV_POPULATE_(READ|WRITE).
1954 *
1955 * The range must be page-aligned.
1956 *
1957 * mm->mmap_lock must be held. If it's released, *@locked will be set to 0.
1958 */
faultin_page_range(struct mm_struct * mm,unsigned long start,unsigned long end,bool write,int * locked)1959 long faultin_page_range(struct mm_struct *mm, unsigned long start,
1960 unsigned long end, bool write, int *locked)
1961 {
1962 unsigned long nr_pages = (end - start) / PAGE_SIZE;
1963 int gup_flags;
1964 long ret;
1965
1966 VM_BUG_ON(!PAGE_ALIGNED(start));
1967 VM_BUG_ON(!PAGE_ALIGNED(end));
1968 mmap_assert_locked(mm);
1969
1970 /*
1971 * FOLL_TOUCH: Mark page accessed and thereby young; will also mark
1972 * the page dirty with FOLL_WRITE -- which doesn't make a
1973 * difference with !FOLL_FORCE, because the page is writable
1974 * in the page table.
1975 * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit
1976 * a poisoned page.
1977 * !FOLL_FORCE: Require proper access permissions.
1978 */
1979 gup_flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_UNLOCKABLE |
1980 FOLL_MADV_POPULATE;
1981 if (write)
1982 gup_flags |= FOLL_WRITE;
1983
1984 ret = __get_user_pages_locked(mm, start, nr_pages, NULL, locked,
1985 gup_flags);
1986 lru_add_drain();
1987 return ret;
1988 }
1989
1990 /*
1991 * __mm_populate - populate and/or mlock pages within a range of address space.
1992 *
1993 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
1994 * flags. VMAs must be already marked with the desired vm_flags, and
1995 * mmap_lock must not be held.
1996 */
__mm_populate(unsigned long start,unsigned long len,int ignore_errors)1997 int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
1998 {
1999 struct mm_struct *mm = current->mm;
2000 unsigned long end, nstart, nend;
2001 struct vm_area_struct *vma = NULL;
2002 int locked = 0;
2003 long ret = 0;
2004
2005 end = start + len;
2006
2007 for (nstart = start; nstart < end; nstart = nend) {
2008 /*
2009 * We want to fault in pages for [nstart; end) address range.
2010 * Find first corresponding VMA.
2011 */
2012 if (!locked) {
2013 locked = 1;
2014 mmap_read_lock(mm);
2015 vma = find_vma_intersection(mm, nstart, end);
2016 } else if (nstart >= vma->vm_end)
2017 vma = find_vma_intersection(mm, vma->vm_end, end);
2018
2019 if (!vma)
2020 break;
2021 /*
2022 * Set [nstart; nend) to intersection of desired address
2023 * range with the first VMA. Also, skip undesirable VMA types.
2024 */
2025 nend = min(end, vma->vm_end);
2026 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
2027 continue;
2028 if (nstart < vma->vm_start)
2029 nstart = vma->vm_start;
2030 /*
2031 * Now fault in a range of pages. populate_vma_page_range()
2032 * double checks the vma flags, so that it won't mlock pages
2033 * if the vma was already munlocked.
2034 */
2035 ret = populate_vma_page_range(vma, nstart, nend, &locked);
2036 if (ret < 0) {
2037 if (ignore_errors) {
2038 ret = 0;
2039 continue; /* continue at next VMA */
2040 }
2041 break;
2042 }
2043 nend = nstart + ret * PAGE_SIZE;
2044 ret = 0;
2045 }
2046 if (locked)
2047 mmap_read_unlock(mm);
2048 return ret; /* 0 or negative error code */
2049 }
2050 #else /* CONFIG_MMU */
__get_user_pages_locked(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,struct page ** pages,int * locked,unsigned int foll_flags)2051 static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
2052 unsigned long nr_pages, struct page **pages,
2053 int *locked, unsigned int foll_flags)
2054 {
2055 struct vm_area_struct *vma;
2056 bool must_unlock = false;
2057 unsigned long vm_flags;
2058 long i;
2059
2060 if (!nr_pages)
2061 return 0;
2062
2063 /*
2064 * The internal caller expects GUP to manage the lock internally and the
2065 * lock must be released when this returns.
2066 */
2067 if (!*locked) {
2068 if (mmap_read_lock_killable(mm))
2069 return -EAGAIN;
2070 must_unlock = true;
2071 *locked = 1;
2072 }
2073
2074 /* calculate required read or write permissions.
2075 * If FOLL_FORCE is set, we only require the "MAY" flags.
2076 */
2077 vm_flags = (foll_flags & FOLL_WRITE) ?
2078 (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
2079 vm_flags &= (foll_flags & FOLL_FORCE) ?
2080 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
2081
2082 for (i = 0; i < nr_pages; i++) {
2083 vma = find_vma(mm, start);
2084 if (!vma)
2085 break;
2086
2087 /* protect what we can, including chardevs */
2088 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
2089 !(vm_flags & vma->vm_flags))
2090 break;
2091
2092 if (pages) {
2093 pages[i] = virt_to_page((void *)start);
2094 if (pages[i])
2095 get_page(pages[i]);
2096 }
2097
2098 start = (start + PAGE_SIZE) & PAGE_MASK;
2099 }
2100
2101 if (must_unlock && *locked) {
2102 mmap_read_unlock(mm);
2103 *locked = 0;
2104 }
2105
2106 return i ? : -EFAULT;
2107 }
2108 #endif /* !CONFIG_MMU */
2109
2110 /**
2111 * fault_in_writeable - fault in userspace address range for writing
2112 * @uaddr: start of address range
2113 * @size: size of address range
2114 *
2115 * Returns the number of bytes not faulted in (like copy_to_user() and
2116 * copy_from_user()).
2117 */
fault_in_writeable(char __user * uaddr,size_t size)2118 size_t fault_in_writeable(char __user *uaddr, size_t size)
2119 {
2120 char __user *start = uaddr, *end;
2121
2122 if (unlikely(size == 0))
2123 return 0;
2124 if (!user_write_access_begin(uaddr, size))
2125 return size;
2126 if (!PAGE_ALIGNED(uaddr)) {
2127 unsafe_put_user(0, uaddr, out);
2128 uaddr = (char __user *)PAGE_ALIGN((unsigned long)uaddr);
2129 }
2130 end = (char __user *)PAGE_ALIGN((unsigned long)start + size);
2131 if (unlikely(end < start))
2132 end = NULL;
2133 while (uaddr != end) {
2134 unsafe_put_user(0, uaddr, out);
2135 uaddr += PAGE_SIZE;
2136 }
2137
2138 out:
2139 user_write_access_end();
2140 if (size > uaddr - start)
2141 return size - (uaddr - start);
2142 return 0;
2143 }
2144 EXPORT_SYMBOL(fault_in_writeable);
2145
2146 /**
2147 * fault_in_subpage_writeable - fault in an address range for writing
2148 * @uaddr: start of address range
2149 * @size: size of address range
2150 *
2151 * Fault in a user address range for writing while checking for permissions at
2152 * sub-page granularity (e.g. arm64 MTE). This function should be used when
2153 * the caller cannot guarantee forward progress of a copy_to_user() loop.
2154 *
2155 * Returns the number of bytes not faulted in (like copy_to_user() and
2156 * copy_from_user()).
2157 */
fault_in_subpage_writeable(char __user * uaddr,size_t size)2158 size_t fault_in_subpage_writeable(char __user *uaddr, size_t size)
2159 {
2160 size_t faulted_in;
2161
2162 /*
2163 * Attempt faulting in at page granularity first for page table
2164 * permission checking. The arch-specific probe_subpage_writeable()
2165 * functions may not check for this.
2166 */
2167 faulted_in = size - fault_in_writeable(uaddr, size);
2168 if (faulted_in)
2169 faulted_in -= probe_subpage_writeable(uaddr, faulted_in);
2170
2171 return size - faulted_in;
2172 }
2173 EXPORT_SYMBOL(fault_in_subpage_writeable);
2174
2175 /*
2176 * fault_in_safe_writeable - fault in an address range for writing
2177 * @uaddr: start of address range
2178 * @size: length of address range
2179 *
2180 * Faults in an address range for writing. This is primarily useful when we
2181 * already know that some or all of the pages in the address range aren't in
2182 * memory.
2183 *
2184 * Unlike fault_in_writeable(), this function is non-destructive.
2185 *
2186 * Note that we don't pin or otherwise hold the pages referenced that we fault
2187 * in. There's no guarantee that they'll stay in memory for any duration of
2188 * time.
2189 *
2190 * Returns the number of bytes not faulted in, like copy_to_user() and
2191 * copy_from_user().
2192 */
fault_in_safe_writeable(const char __user * uaddr,size_t size)2193 size_t fault_in_safe_writeable(const char __user *uaddr, size_t size)
2194 {
2195 unsigned long start = (unsigned long)uaddr, end;
2196 struct mm_struct *mm = current->mm;
2197 bool unlocked = false;
2198
2199 if (unlikely(size == 0))
2200 return 0;
2201 end = PAGE_ALIGN(start + size);
2202 if (end < start)
2203 end = 0;
2204
2205 mmap_read_lock(mm);
2206 do {
2207 if (fixup_user_fault(mm, start, FAULT_FLAG_WRITE, &unlocked))
2208 break;
2209 start = (start + PAGE_SIZE) & PAGE_MASK;
2210 } while (start != end);
2211 mmap_read_unlock(mm);
2212
2213 if (size > (unsigned long)uaddr - start)
2214 return size - ((unsigned long)uaddr - start);
2215 return 0;
2216 }
2217 EXPORT_SYMBOL(fault_in_safe_writeable);
2218
2219 /**
2220 * fault_in_readable - fault in userspace address range for reading
2221 * @uaddr: start of user address range
2222 * @size: size of user address range
2223 *
2224 * Returns the number of bytes not faulted in (like copy_to_user() and
2225 * copy_from_user()).
2226 */
fault_in_readable(const char __user * uaddr,size_t size)2227 size_t fault_in_readable(const char __user *uaddr, size_t size)
2228 {
2229 const char __user *start = uaddr, *end;
2230 volatile char c;
2231
2232 if (unlikely(size == 0))
2233 return 0;
2234 if (!user_read_access_begin(uaddr, size))
2235 return size;
2236 if (!PAGE_ALIGNED(uaddr)) {
2237 unsafe_get_user(c, uaddr, out);
2238 uaddr = (const char __user *)PAGE_ALIGN((unsigned long)uaddr);
2239 }
2240 end = (const char __user *)PAGE_ALIGN((unsigned long)start + size);
2241 if (unlikely(end < start))
2242 end = NULL;
2243 while (uaddr != end) {
2244 unsafe_get_user(c, uaddr, out);
2245 uaddr += PAGE_SIZE;
2246 }
2247
2248 out:
2249 user_read_access_end();
2250 (void)c;
2251 if (size > uaddr - start)
2252 return size - (uaddr - start);
2253 return 0;
2254 }
2255 EXPORT_SYMBOL(fault_in_readable);
2256
2257 /**
2258 * get_dump_page() - pin user page in memory while writing it to core dump
2259 * @addr: user address
2260 *
2261 * Returns struct page pointer of user page pinned for dump,
2262 * to be freed afterwards by put_page().
2263 *
2264 * Returns NULL on any kind of failure - a hole must then be inserted into
2265 * the corefile, to preserve alignment with its headers; and also returns
2266 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
2267 * allowing a hole to be left in the corefile to save disk space.
2268 *
2269 * Called without mmap_lock (takes and releases the mmap_lock by itself).
2270 */
2271 #ifdef CONFIG_ELF_CORE
get_dump_page(unsigned long addr)2272 struct page *get_dump_page(unsigned long addr)
2273 {
2274 struct page *page;
2275 int locked = 0;
2276 int ret;
2277
2278 ret = __get_user_pages_locked(current->mm, addr, 1, &page, &locked,
2279 FOLL_FORCE | FOLL_DUMP | FOLL_GET);
2280 return (ret == 1) ? page : NULL;
2281 }
2282 #endif /* CONFIG_ELF_CORE */
2283
2284 #ifdef CONFIG_MIGRATION
2285
2286 /*
2287 * An array of either pages or folios ("pofs"). Although it may seem tempting to
2288 * avoid this complication, by simply interpreting a list of folios as a list of
2289 * pages, that approach won't work in the longer term, because eventually the
2290 * layouts of struct page and struct folio will become completely different.
2291 * Furthermore, this pof approach avoids excessive page_folio() calls.
2292 */
2293 struct pages_or_folios {
2294 union {
2295 struct page **pages;
2296 struct folio **folios;
2297 void **entries;
2298 };
2299 bool has_folios;
2300 long nr_entries;
2301 };
2302
pofs_get_folio(struct pages_or_folios * pofs,long i)2303 static struct folio *pofs_get_folio(struct pages_or_folios *pofs, long i)
2304 {
2305 if (pofs->has_folios)
2306 return pofs->folios[i];
2307 return page_folio(pofs->pages[i]);
2308 }
2309
pofs_clear_entry(struct pages_or_folios * pofs,long i)2310 static void pofs_clear_entry(struct pages_or_folios *pofs, long i)
2311 {
2312 pofs->entries[i] = NULL;
2313 }
2314
pofs_unpin(struct pages_or_folios * pofs)2315 static void pofs_unpin(struct pages_or_folios *pofs)
2316 {
2317 if (pofs->has_folios)
2318 unpin_folios(pofs->folios, pofs->nr_entries);
2319 else
2320 unpin_user_pages(pofs->pages, pofs->nr_entries);
2321 }
2322
2323 /*
2324 * Returns the number of collected folios. Return value is always >= 0.
2325 */
collect_longterm_unpinnable_folios(struct list_head * movable_folio_list,struct pages_or_folios * pofs)2326 static unsigned long collect_longterm_unpinnable_folios(
2327 struct list_head *movable_folio_list,
2328 struct pages_or_folios *pofs)
2329 {
2330 unsigned long i, collected = 0;
2331 struct folio *prev_folio = NULL;
2332 bool drain_allow = true;
2333
2334 for (i = 0; i < pofs->nr_entries; i++) {
2335 struct folio *folio = pofs_get_folio(pofs, i);
2336
2337 if (folio == prev_folio)
2338 continue;
2339 prev_folio = folio;
2340
2341 if (folio_is_longterm_pinnable(folio))
2342 continue;
2343
2344 collected++;
2345
2346 if (folio_is_device_coherent(folio))
2347 continue;
2348
2349 if (folio_test_hugetlb(folio)) {
2350 isolate_hugetlb(folio, movable_folio_list);
2351 continue;
2352 }
2353
2354 if (!folio_test_lru(folio) && drain_allow) {
2355 lru_add_drain_all();
2356 drain_allow = false;
2357 }
2358
2359 if (!folio_isolate_lru(folio))
2360 continue;
2361
2362 list_add_tail(&folio->lru, movable_folio_list);
2363 node_stat_mod_folio(folio,
2364 NR_ISOLATED_ANON + folio_is_file_lru(folio),
2365 folio_nr_pages(folio));
2366 }
2367
2368 return collected;
2369 }
2370
2371 /*
2372 * Unpins all folios and migrates device coherent folios and movable_folio_list.
2373 * Returns -EAGAIN if all folios were successfully migrated or -errno for
2374 * failure (or partial success).
2375 */
2376 static int
migrate_longterm_unpinnable_folios(struct list_head * movable_folio_list,struct pages_or_folios * pofs)2377 migrate_longterm_unpinnable_folios(struct list_head *movable_folio_list,
2378 struct pages_or_folios *pofs)
2379 {
2380 int ret;
2381 unsigned long i;
2382
2383 for (i = 0; i < pofs->nr_entries; i++) {
2384 struct folio *folio = pofs_get_folio(pofs, i);
2385
2386 if (folio_is_device_coherent(folio)) {
2387 /*
2388 * Migration will fail if the folio is pinned, so
2389 * convert the pin on the source folio to a normal
2390 * reference.
2391 */
2392 pofs_clear_entry(pofs, i);
2393 folio_get(folio);
2394 gup_put_folio(folio, 1, FOLL_PIN);
2395
2396 if (migrate_device_coherent_folio(folio)) {
2397 ret = -EBUSY;
2398 goto err;
2399 }
2400
2401 continue;
2402 }
2403
2404 /*
2405 * We can't migrate folios with unexpected references, so drop
2406 * the reference obtained by __get_user_pages_locked().
2407 * Migrating folios have been added to movable_folio_list after
2408 * calling folio_isolate_lru() which takes a reference so the
2409 * folio won't be freed if it's migrating.
2410 */
2411 unpin_folio(folio);
2412 pofs_clear_entry(pofs, i);
2413 }
2414
2415 if (!list_empty(movable_folio_list)) {
2416 struct migration_target_control mtc = {
2417 .nid = NUMA_NO_NODE,
2418 .gfp_mask = GFP_USER | __GFP_NOWARN,
2419 .reason = MR_LONGTERM_PIN,
2420 };
2421
2422 if (migrate_pages(movable_folio_list, alloc_migration_target,
2423 NULL, (unsigned long)&mtc, MIGRATE_SYNC,
2424 MR_LONGTERM_PIN, NULL)) {
2425 ret = -ENOMEM;
2426 goto err;
2427 }
2428 }
2429
2430 putback_movable_pages(movable_folio_list);
2431
2432 return -EAGAIN;
2433
2434 err:
2435 pofs_unpin(pofs);
2436 putback_movable_pages(movable_folio_list);
2437
2438 return ret;
2439 }
2440
2441 static long
check_and_migrate_movable_pages_or_folios(struct pages_or_folios * pofs)2442 check_and_migrate_movable_pages_or_folios(struct pages_or_folios *pofs)
2443 {
2444 LIST_HEAD(movable_folio_list);
2445 unsigned long collected;
2446
2447 collected = collect_longterm_unpinnable_folios(&movable_folio_list,
2448 pofs);
2449 if (!collected)
2450 return 0;
2451
2452 return migrate_longterm_unpinnable_folios(&movable_folio_list, pofs);
2453 }
2454
2455 /*
2456 * Check whether all folios are *allowed* to be pinned indefinitely (long term).
2457 * Rather confusingly, all folios in the range are required to be pinned via
2458 * FOLL_PIN, before calling this routine.
2459 *
2460 * Return values:
2461 *
2462 * 0: if everything is OK and all folios in the range are allowed to be pinned,
2463 * then this routine leaves all folios pinned and returns zero for success.
2464 *
2465 * -EAGAIN: if any folios in the range are not allowed to be pinned, then this
2466 * routine will migrate those folios away, unpin all the folios in the range. If
2467 * migration of the entire set of folios succeeds, then -EAGAIN is returned. The
2468 * caller should re-pin the entire range with FOLL_PIN and then call this
2469 * routine again.
2470 *
2471 * -ENOMEM, or any other -errno: if an error *other* than -EAGAIN occurs, this
2472 * indicates a migration failure. The caller should give up, and propagate the
2473 * error back up the call stack. The caller does not need to unpin any folios in
2474 * that case, because this routine will do the unpinning.
2475 */
check_and_migrate_movable_folios(unsigned long nr_folios,struct folio ** folios)2476 static long check_and_migrate_movable_folios(unsigned long nr_folios,
2477 struct folio **folios)
2478 {
2479 struct pages_or_folios pofs = {
2480 .folios = folios,
2481 .has_folios = true,
2482 .nr_entries = nr_folios,
2483 };
2484
2485 return check_and_migrate_movable_pages_or_folios(&pofs);
2486 }
2487
2488 /*
2489 * Return values and behavior are the same as those for
2490 * check_and_migrate_movable_folios().
2491 */
check_and_migrate_movable_pages(unsigned long nr_pages,struct page ** pages)2492 static long check_and_migrate_movable_pages(unsigned long nr_pages,
2493 struct page **pages)
2494 {
2495 struct pages_or_folios pofs = {
2496 .pages = pages,
2497 .has_folios = false,
2498 .nr_entries = nr_pages,
2499 };
2500
2501 return check_and_migrate_movable_pages_or_folios(&pofs);
2502 }
2503 #else
check_and_migrate_movable_pages(unsigned long nr_pages,struct page ** pages)2504 static long check_and_migrate_movable_pages(unsigned long nr_pages,
2505 struct page **pages)
2506 {
2507 return 0;
2508 }
2509
check_and_migrate_movable_folios(unsigned long nr_folios,struct folio ** folios)2510 static long check_and_migrate_movable_folios(unsigned long nr_folios,
2511 struct folio **folios)
2512 {
2513 return 0;
2514 }
2515 #endif /* CONFIG_MIGRATION */
2516
2517 /*
2518 * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
2519 * allows us to process the FOLL_LONGTERM flag.
2520 */
__gup_longterm_locked(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,struct page ** pages,int * locked,unsigned int gup_flags)2521 static long __gup_longterm_locked(struct mm_struct *mm,
2522 unsigned long start,
2523 unsigned long nr_pages,
2524 struct page **pages,
2525 int *locked,
2526 unsigned int gup_flags)
2527 {
2528 unsigned int flags;
2529 long rc, nr_pinned_pages;
2530
2531 if (!(gup_flags & FOLL_LONGTERM))
2532 return __get_user_pages_locked(mm, start, nr_pages, pages,
2533 locked, gup_flags);
2534
2535 flags = memalloc_pin_save();
2536 do {
2537 nr_pinned_pages = __get_user_pages_locked(mm, start, nr_pages,
2538 pages, locked,
2539 gup_flags);
2540 if (nr_pinned_pages <= 0) {
2541 rc = nr_pinned_pages;
2542 break;
2543 }
2544
2545 /* FOLL_LONGTERM implies FOLL_PIN */
2546 rc = check_and_migrate_movable_pages(nr_pinned_pages, pages);
2547 } while (rc == -EAGAIN);
2548 memalloc_pin_restore(flags);
2549 return rc ? rc : nr_pinned_pages;
2550 }
2551
2552 /*
2553 * Check that the given flags are valid for the exported gup/pup interface, and
2554 * update them with the required flags that the caller must have set.
2555 */
is_valid_gup_args(struct page ** pages,int * locked,unsigned int * gup_flags_p,unsigned int to_set)2556 static bool is_valid_gup_args(struct page **pages, int *locked,
2557 unsigned int *gup_flags_p, unsigned int to_set)
2558 {
2559 unsigned int gup_flags = *gup_flags_p;
2560
2561 /*
2562 * These flags not allowed to be specified externally to the gup
2563 * interfaces:
2564 * - FOLL_TOUCH/FOLL_PIN/FOLL_TRIED/FOLL_FAST_ONLY are internal only
2565 * - FOLL_REMOTE is internal only, set in (get|pin)_user_pages_remote()
2566 * - FOLL_UNLOCKABLE is internal only and used if locked is !NULL
2567 */
2568 if (WARN_ON_ONCE(gup_flags & INTERNAL_GUP_FLAGS))
2569 return false;
2570
2571 gup_flags |= to_set;
2572 if (locked) {
2573 /* At the external interface locked must be set */
2574 if (WARN_ON_ONCE(*locked != 1))
2575 return false;
2576
2577 gup_flags |= FOLL_UNLOCKABLE;
2578 }
2579
2580 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
2581 if (WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) ==
2582 (FOLL_PIN | FOLL_GET)))
2583 return false;
2584
2585 /* LONGTERM can only be specified when pinning */
2586 if (WARN_ON_ONCE(!(gup_flags & FOLL_PIN) && (gup_flags & FOLL_LONGTERM)))
2587 return false;
2588
2589 /* Pages input must be given if using GET/PIN */
2590 if (WARN_ON_ONCE((gup_flags & (FOLL_GET | FOLL_PIN)) && !pages))
2591 return false;
2592
2593 /* We want to allow the pgmap to be hot-unplugged at all times */
2594 if (WARN_ON_ONCE((gup_flags & FOLL_LONGTERM) &&
2595 (gup_flags & FOLL_PCI_P2PDMA)))
2596 return false;
2597
2598 *gup_flags_p = gup_flags;
2599 return true;
2600 }
2601
2602 #ifdef CONFIG_MMU
2603 /**
2604 * get_user_pages_remote() - pin user pages in memory
2605 * @mm: mm_struct of target mm
2606 * @start: starting user address
2607 * @nr_pages: number of pages from start to pin
2608 * @gup_flags: flags modifying lookup behaviour
2609 * @pages: array that receives pointers to the pages pinned.
2610 * Should be at least nr_pages long. Or NULL, if caller
2611 * only intends to ensure the pages are faulted in.
2612 * @locked: pointer to lock flag indicating whether lock is held and
2613 * subsequently whether VM_FAULT_RETRY functionality can be
2614 * utilised. Lock must initially be held.
2615 *
2616 * Returns either number of pages pinned (which may be less than the
2617 * number requested), or an error. Details about the return value:
2618 *
2619 * -- If nr_pages is 0, returns 0.
2620 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
2621 * -- If nr_pages is >0, and some pages were pinned, returns the number of
2622 * pages pinned. Again, this may be less than nr_pages.
2623 *
2624 * The caller is responsible for releasing returned @pages, via put_page().
2625 *
2626 * Must be called with mmap_lock held for read or write.
2627 *
2628 * get_user_pages_remote walks a process's page tables and takes a reference
2629 * to each struct page that each user address corresponds to at a given
2630 * instant. That is, it takes the page that would be accessed if a user
2631 * thread accesses the given user virtual address at that instant.
2632 *
2633 * This does not guarantee that the page exists in the user mappings when
2634 * get_user_pages_remote returns, and there may even be a completely different
2635 * page there in some cases (eg. if mmapped pagecache has been invalidated
2636 * and subsequently re-faulted). However it does guarantee that the page
2637 * won't be freed completely. And mostly callers simply care that the page
2638 * contains data that was valid *at some point in time*. Typically, an IO
2639 * or similar operation cannot guarantee anything stronger anyway because
2640 * locks can't be held over the syscall boundary.
2641 *
2642 * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
2643 * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
2644 * be called after the page is finished with, and before put_page is called.
2645 *
2646 * get_user_pages_remote is typically used for fewer-copy IO operations,
2647 * to get a handle on the memory by some means other than accesses
2648 * via the user virtual addresses. The pages may be submitted for
2649 * DMA to devices or accessed via their kernel linear mapping (via the
2650 * kmap APIs). Care should be taken to use the correct cache flushing APIs.
2651 *
2652 * See also get_user_pages_fast, for performance critical applications.
2653 *
2654 * get_user_pages_remote should be phased out in favor of
2655 * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
2656 * should use get_user_pages_remote because it cannot pass
2657 * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
2658 */
get_user_pages_remote(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,int * locked)2659 long get_user_pages_remote(struct mm_struct *mm,
2660 unsigned long start, unsigned long nr_pages,
2661 unsigned int gup_flags, struct page **pages,
2662 int *locked)
2663 {
2664 int local_locked = 1;
2665
2666 if (!is_valid_gup_args(pages, locked, &gup_flags,
2667 FOLL_TOUCH | FOLL_REMOTE))
2668 return -EINVAL;
2669
2670 return __get_user_pages_locked(mm, start, nr_pages, pages,
2671 locked ? locked : &local_locked,
2672 gup_flags);
2673 }
2674 EXPORT_SYMBOL(get_user_pages_remote);
2675
2676 #else /* CONFIG_MMU */
get_user_pages_remote(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,int * locked)2677 long get_user_pages_remote(struct mm_struct *mm,
2678 unsigned long start, unsigned long nr_pages,
2679 unsigned int gup_flags, struct page **pages,
2680 int *locked)
2681 {
2682 return 0;
2683 }
2684 #endif /* !CONFIG_MMU */
2685
2686 /**
2687 * get_user_pages() - pin user pages in memory
2688 * @start: starting user address
2689 * @nr_pages: number of pages from start to pin
2690 * @gup_flags: flags modifying lookup behaviour
2691 * @pages: array that receives pointers to the pages pinned.
2692 * Should be at least nr_pages long. Or NULL, if caller
2693 * only intends to ensure the pages are faulted in.
2694 *
2695 * This is the same as get_user_pages_remote(), just with a less-flexible
2696 * calling convention where we assume that the mm being operated on belongs to
2697 * the current task, and doesn't allow passing of a locked parameter. We also
2698 * obviously don't pass FOLL_REMOTE in here.
2699 */
get_user_pages(unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages)2700 long get_user_pages(unsigned long start, unsigned long nr_pages,
2701 unsigned int gup_flags, struct page **pages)
2702 {
2703 int locked = 1;
2704
2705 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_TOUCH))
2706 return -EINVAL;
2707
2708 return __get_user_pages_locked(current->mm, start, nr_pages, pages,
2709 &locked, gup_flags);
2710 }
2711 EXPORT_SYMBOL(get_user_pages);
2712
2713 /*
2714 * get_user_pages_unlocked() is suitable to replace the form:
2715 *
2716 * mmap_read_lock(mm);
2717 * get_user_pages(mm, ..., pages, NULL);
2718 * mmap_read_unlock(mm);
2719 *
2720 * with:
2721 *
2722 * get_user_pages_unlocked(mm, ..., pages);
2723 *
2724 * It is functionally equivalent to get_user_pages_fast so
2725 * get_user_pages_fast should be used instead if specific gup_flags
2726 * (e.g. FOLL_FORCE) are not required.
2727 */
get_user_pages_unlocked(unsigned long start,unsigned long nr_pages,struct page ** pages,unsigned int gup_flags)2728 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2729 struct page **pages, unsigned int gup_flags)
2730 {
2731 int locked = 0;
2732
2733 if (!is_valid_gup_args(pages, NULL, &gup_flags,
2734 FOLL_TOUCH | FOLL_UNLOCKABLE))
2735 return -EINVAL;
2736
2737 return __get_user_pages_locked(current->mm, start, nr_pages, pages,
2738 &locked, gup_flags);
2739 }
2740 EXPORT_SYMBOL(get_user_pages_unlocked);
2741
2742 /*
2743 * GUP-fast
2744 *
2745 * get_user_pages_fast attempts to pin user pages by walking the page
2746 * tables directly and avoids taking locks. Thus the walker needs to be
2747 * protected from page table pages being freed from under it, and should
2748 * block any THP splits.
2749 *
2750 * One way to achieve this is to have the walker disable interrupts, and
2751 * rely on IPIs from the TLB flushing code blocking before the page table
2752 * pages are freed. This is unsuitable for architectures that do not need
2753 * to broadcast an IPI when invalidating TLBs.
2754 *
2755 * Another way to achieve this is to batch up page table containing pages
2756 * belonging to more than one mm_user, then rcu_sched a callback to free those
2757 * pages. Disabling interrupts will allow the gup_fast() walker to both block
2758 * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
2759 * (which is a relatively rare event). The code below adopts this strategy.
2760 *
2761 * Before activating this code, please be aware that the following assumptions
2762 * are currently made:
2763 *
2764 * *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
2765 * free pages containing page tables or TLB flushing requires IPI broadcast.
2766 *
2767 * *) ptes can be read atomically by the architecture.
2768 *
2769 * *) access_ok is sufficient to validate userspace address ranges.
2770 *
2771 * The last two assumptions can be relaxed by the addition of helper functions.
2772 *
2773 * This code is based heavily on the PowerPC implementation by Nick Piggin.
2774 */
2775 #ifdef CONFIG_HAVE_GUP_FAST
2776 /*
2777 * Used in the GUP-fast path to determine whether GUP is permitted to work on
2778 * a specific folio.
2779 *
2780 * This call assumes the caller has pinned the folio, that the lowest page table
2781 * level still points to this folio, and that interrupts have been disabled.
2782 *
2783 * GUP-fast must reject all secretmem folios.
2784 *
2785 * Writing to pinned file-backed dirty tracked folios is inherently problematic
2786 * (see comment describing the writable_file_mapping_allowed() function). We
2787 * therefore try to avoid the most egregious case of a long-term mapping doing
2788 * so.
2789 *
2790 * This function cannot be as thorough as that one as the VMA is not available
2791 * in the fast path, so instead we whitelist known good cases and if in doubt,
2792 * fall back to the slow path.
2793 */
gup_fast_folio_allowed(struct folio * folio,unsigned int flags)2794 static bool gup_fast_folio_allowed(struct folio *folio, unsigned int flags)
2795 {
2796 bool reject_file_backed = false;
2797 struct address_space *mapping;
2798 bool check_secretmem = false;
2799 unsigned long mapping_flags;
2800
2801 /*
2802 * If we aren't pinning then no problematic write can occur. A long term
2803 * pin is the most egregious case so this is the one we disallow.
2804 */
2805 if ((flags & (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) ==
2806 (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE))
2807 reject_file_backed = true;
2808
2809 /* We hold a folio reference, so we can safely access folio fields. */
2810
2811 /* secretmem folios are always order-0 folios. */
2812 if (IS_ENABLED(CONFIG_SECRETMEM) && !folio_test_large(folio))
2813 check_secretmem = true;
2814
2815 if (!reject_file_backed && !check_secretmem)
2816 return true;
2817
2818 if (WARN_ON_ONCE(folio_test_slab(folio)))
2819 return false;
2820
2821 /* hugetlb neither requires dirty-tracking nor can be secretmem. */
2822 if (folio_test_hugetlb(folio))
2823 return true;
2824
2825 /*
2826 * GUP-fast disables IRQs. When IRQS are disabled, RCU grace periods
2827 * cannot proceed, which means no actions performed under RCU can
2828 * proceed either.
2829 *
2830 * inodes and thus their mappings are freed under RCU, which means the
2831 * mapping cannot be freed beneath us and thus we can safely dereference
2832 * it.
2833 */
2834 lockdep_assert_irqs_disabled();
2835
2836 /*
2837 * However, there may be operations which _alter_ the mapping, so ensure
2838 * we read it once and only once.
2839 */
2840 mapping = READ_ONCE(folio->mapping);
2841
2842 /*
2843 * The mapping may have been truncated, in any case we cannot determine
2844 * if this mapping is safe - fall back to slow path to determine how to
2845 * proceed.
2846 */
2847 if (!mapping)
2848 return false;
2849
2850 /* Anonymous folios pose no problem. */
2851 mapping_flags = (unsigned long)mapping & PAGE_MAPPING_FLAGS;
2852 if (mapping_flags)
2853 return mapping_flags & PAGE_MAPPING_ANON;
2854
2855 /*
2856 * At this point, we know the mapping is non-null and points to an
2857 * address_space object.
2858 */
2859 if (check_secretmem && secretmem_mapping(mapping))
2860 return false;
2861 /* The only remaining allowed file system is shmem. */
2862 return !reject_file_backed || shmem_mapping(mapping);
2863 }
2864
gup_fast_undo_dev_pagemap(int * nr,int nr_start,unsigned int flags,struct page ** pages)2865 static void __maybe_unused gup_fast_undo_dev_pagemap(int *nr, int nr_start,
2866 unsigned int flags, struct page **pages)
2867 {
2868 while ((*nr) - nr_start) {
2869 struct folio *folio = page_folio(pages[--(*nr)]);
2870
2871 folio_clear_referenced(folio);
2872 gup_put_folio(folio, 1, flags);
2873 }
2874 }
2875
2876 #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
2877 /*
2878 * GUP-fast relies on pte change detection to avoid concurrent pgtable
2879 * operations.
2880 *
2881 * To pin the page, GUP-fast needs to do below in order:
2882 * (1) pin the page (by prefetching pte), then (2) check pte not changed.
2883 *
2884 * For the rest of pgtable operations where pgtable updates can be racy
2885 * with GUP-fast, we need to do (1) clear pte, then (2) check whether page
2886 * is pinned.
2887 *
2888 * Above will work for all pte-level operations, including THP split.
2889 *
2890 * For THP collapse, it's a bit more complicated because GUP-fast may be
2891 * walking a pgtable page that is being freed (pte is still valid but pmd
2892 * can be cleared already). To avoid race in such condition, we need to
2893 * also check pmd here to make sure pmd doesn't change (corresponds to
2894 * pmdp_collapse_flush() in the THP collapse code path).
2895 */
gup_fast_pte_range(pmd_t pmd,pmd_t * pmdp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2896 static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
2897 unsigned long end, unsigned int flags, struct page **pages,
2898 int *nr)
2899 {
2900 struct dev_pagemap *pgmap = NULL;
2901 int nr_start = *nr, ret = 0;
2902 pte_t *ptep, *ptem;
2903
2904 ptem = ptep = pte_offset_map(&pmd, addr);
2905 if (!ptep)
2906 return 0;
2907 do {
2908 pte_t pte = ptep_get_lockless(ptep);
2909 struct page *page;
2910 struct folio *folio;
2911
2912 /*
2913 * Always fallback to ordinary GUP on PROT_NONE-mapped pages:
2914 * pte_access_permitted() better should reject these pages
2915 * either way: otherwise, GUP-fast might succeed in
2916 * cases where ordinary GUP would fail due to VMA access
2917 * permissions.
2918 */
2919 if (pte_protnone(pte))
2920 goto pte_unmap;
2921
2922 if (!pte_access_permitted(pte, flags & FOLL_WRITE))
2923 goto pte_unmap;
2924
2925 if (pte_devmap(pte)) {
2926 if (unlikely(flags & FOLL_LONGTERM))
2927 goto pte_unmap;
2928
2929 pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
2930 if (unlikely(!pgmap)) {
2931 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
2932 goto pte_unmap;
2933 }
2934 } else if (pte_special(pte))
2935 goto pte_unmap;
2936
2937 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2938 page = pte_page(pte);
2939
2940 folio = try_grab_folio_fast(page, 1, flags);
2941 if (!folio)
2942 goto pte_unmap;
2943
2944 if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) ||
2945 unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) {
2946 gup_put_folio(folio, 1, flags);
2947 goto pte_unmap;
2948 }
2949
2950 if (!gup_fast_folio_allowed(folio, flags)) {
2951 gup_put_folio(folio, 1, flags);
2952 goto pte_unmap;
2953 }
2954
2955 if (!pte_write(pte) && gup_must_unshare(NULL, flags, page)) {
2956 gup_put_folio(folio, 1, flags);
2957 goto pte_unmap;
2958 }
2959
2960 /*
2961 * We need to make the page accessible if and only if we are
2962 * going to access its content (the FOLL_PIN case). Please
2963 * see Documentation/core-api/pin_user_pages.rst for
2964 * details.
2965 */
2966 if (flags & FOLL_PIN) {
2967 ret = arch_make_folio_accessible(folio);
2968 if (ret) {
2969 gup_put_folio(folio, 1, flags);
2970 goto pte_unmap;
2971 }
2972 }
2973 folio_set_referenced(folio);
2974 pages[*nr] = page;
2975 (*nr)++;
2976 } while (ptep++, addr += PAGE_SIZE, addr != end);
2977
2978 ret = 1;
2979
2980 pte_unmap:
2981 if (pgmap)
2982 put_dev_pagemap(pgmap);
2983 pte_unmap(ptem);
2984 return ret;
2985 }
2986 #else
2987
2988 /*
2989 * If we can't determine whether or not a pte is special, then fail immediately
2990 * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
2991 * to be special.
2992 *
2993 * For a futex to be placed on a THP tail page, get_futex_key requires a
2994 * get_user_pages_fast_only implementation that can pin pages. Thus it's still
2995 * useful to have gup_fast_pmd_leaf even if we can't operate on ptes.
2996 */
gup_fast_pte_range(pmd_t pmd,pmd_t * pmdp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2997 static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
2998 unsigned long end, unsigned int flags, struct page **pages,
2999 int *nr)
3000 {
3001 return 0;
3002 }
3003 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
3004
3005 #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
gup_fast_devmap_leaf(unsigned long pfn,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)3006 static int gup_fast_devmap_leaf(unsigned long pfn, unsigned long addr,
3007 unsigned long end, unsigned int flags, struct page **pages, int *nr)
3008 {
3009 int nr_start = *nr;
3010 struct dev_pagemap *pgmap = NULL;
3011
3012 do {
3013 struct folio *folio;
3014 struct page *page = pfn_to_page(pfn);
3015
3016 pgmap = get_dev_pagemap(pfn, pgmap);
3017 if (unlikely(!pgmap)) {
3018 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
3019 break;
3020 }
3021
3022 if (!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)) {
3023 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
3024 break;
3025 }
3026
3027 folio = try_grab_folio_fast(page, 1, flags);
3028 if (!folio) {
3029 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
3030 break;
3031 }
3032 folio_set_referenced(folio);
3033 pages[*nr] = page;
3034 (*nr)++;
3035 pfn++;
3036 } while (addr += PAGE_SIZE, addr != end);
3037
3038 put_dev_pagemap(pgmap);
3039 return addr == end;
3040 }
3041
gup_fast_devmap_pmd_leaf(pmd_t orig,pmd_t * pmdp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)3042 static int gup_fast_devmap_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr,
3043 unsigned long end, unsigned int flags, struct page **pages,
3044 int *nr)
3045 {
3046 unsigned long fault_pfn;
3047 int nr_start = *nr;
3048
3049 fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
3050 if (!gup_fast_devmap_leaf(fault_pfn, addr, end, flags, pages, nr))
3051 return 0;
3052
3053 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
3054 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
3055 return 0;
3056 }
3057 return 1;
3058 }
3059
gup_fast_devmap_pud_leaf(pud_t orig,pud_t * pudp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)3060 static int gup_fast_devmap_pud_leaf(pud_t orig, pud_t *pudp, unsigned long addr,
3061 unsigned long end, unsigned int flags, struct page **pages,
3062 int *nr)
3063 {
3064 unsigned long fault_pfn;
3065 int nr_start = *nr;
3066
3067 fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
3068 if (!gup_fast_devmap_leaf(fault_pfn, addr, end, flags, pages, nr))
3069 return 0;
3070
3071 if (unlikely(pud_val(orig) != pud_val(*pudp))) {
3072 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
3073 return 0;
3074 }
3075 return 1;
3076 }
3077 #else
gup_fast_devmap_pmd_leaf(pmd_t orig,pmd_t * pmdp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)3078 static int gup_fast_devmap_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr,
3079 unsigned long end, unsigned int flags, struct page **pages,
3080 int *nr)
3081 {
3082 BUILD_BUG();
3083 return 0;
3084 }
3085
gup_fast_devmap_pud_leaf(pud_t pud,pud_t * pudp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)3086 static int gup_fast_devmap_pud_leaf(pud_t pud, pud_t *pudp, unsigned long addr,
3087 unsigned long end, unsigned int flags, struct page **pages,
3088 int *nr)
3089 {
3090 BUILD_BUG();
3091 return 0;
3092 }
3093 #endif
3094
gup_fast_pmd_leaf(pmd_t orig,pmd_t * pmdp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)3095 static int gup_fast_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr,
3096 unsigned long end, unsigned int flags, struct page **pages,
3097 int *nr)
3098 {
3099 struct page *page;
3100 struct folio *folio;
3101 int refs;
3102
3103 if (!pmd_access_permitted(orig, flags & FOLL_WRITE))
3104 return 0;
3105
3106 if (pmd_special(orig))
3107 return 0;
3108
3109 if (pmd_devmap(orig)) {
3110 if (unlikely(flags & FOLL_LONGTERM))
3111 return 0;
3112 return gup_fast_devmap_pmd_leaf(orig, pmdp, addr, end, flags,
3113 pages, nr);
3114 }
3115
3116 page = pmd_page(orig);
3117 refs = record_subpages(page, PMD_SIZE, addr, end, pages + *nr);
3118
3119 folio = try_grab_folio_fast(page, refs, flags);
3120 if (!folio)
3121 return 0;
3122
3123 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
3124 gup_put_folio(folio, refs, flags);
3125 return 0;
3126 }
3127
3128 if (!gup_fast_folio_allowed(folio, flags)) {
3129 gup_put_folio(folio, refs, flags);
3130 return 0;
3131 }
3132 if (!pmd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
3133 gup_put_folio(folio, refs, flags);
3134 return 0;
3135 }
3136
3137 *nr += refs;
3138 folio_set_referenced(folio);
3139 return 1;
3140 }
3141
gup_fast_pud_leaf(pud_t orig,pud_t * pudp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)3142 static int gup_fast_pud_leaf(pud_t orig, pud_t *pudp, unsigned long addr,
3143 unsigned long end, unsigned int flags, struct page **pages,
3144 int *nr)
3145 {
3146 struct page *page;
3147 struct folio *folio;
3148 int refs;
3149
3150 if (!pud_access_permitted(orig, flags & FOLL_WRITE))
3151 return 0;
3152
3153 if (pud_special(orig))
3154 return 0;
3155
3156 if (pud_devmap(orig)) {
3157 if (unlikely(flags & FOLL_LONGTERM))
3158 return 0;
3159 return gup_fast_devmap_pud_leaf(orig, pudp, addr, end, flags,
3160 pages, nr);
3161 }
3162
3163 page = pud_page(orig);
3164 refs = record_subpages(page, PUD_SIZE, addr, end, pages + *nr);
3165
3166 folio = try_grab_folio_fast(page, refs, flags);
3167 if (!folio)
3168 return 0;
3169
3170 if (unlikely(pud_val(orig) != pud_val(*pudp))) {
3171 gup_put_folio(folio, refs, flags);
3172 return 0;
3173 }
3174
3175 if (!gup_fast_folio_allowed(folio, flags)) {
3176 gup_put_folio(folio, refs, flags);
3177 return 0;
3178 }
3179
3180 if (!pud_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
3181 gup_put_folio(folio, refs, flags);
3182 return 0;
3183 }
3184
3185 *nr += refs;
3186 folio_set_referenced(folio);
3187 return 1;
3188 }
3189
gup_fast_pgd_leaf(pgd_t orig,pgd_t * pgdp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)3190 static int gup_fast_pgd_leaf(pgd_t orig, pgd_t *pgdp, unsigned long addr,
3191 unsigned long end, unsigned int flags, struct page **pages,
3192 int *nr)
3193 {
3194 int refs;
3195 struct page *page;
3196 struct folio *folio;
3197
3198 if (!pgd_access_permitted(orig, flags & FOLL_WRITE))
3199 return 0;
3200
3201 BUILD_BUG_ON(pgd_devmap(orig));
3202
3203 page = pgd_page(orig);
3204 refs = record_subpages(page, PGDIR_SIZE, addr, end, pages + *nr);
3205
3206 folio = try_grab_folio_fast(page, refs, flags);
3207 if (!folio)
3208 return 0;
3209
3210 if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
3211 gup_put_folio(folio, refs, flags);
3212 return 0;
3213 }
3214
3215 if (!pgd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
3216 gup_put_folio(folio, refs, flags);
3217 return 0;
3218 }
3219
3220 if (!gup_fast_folio_allowed(folio, flags)) {
3221 gup_put_folio(folio, refs, flags);
3222 return 0;
3223 }
3224
3225 *nr += refs;
3226 folio_set_referenced(folio);
3227 return 1;
3228 }
3229
gup_fast_pmd_range(pud_t * pudp,pud_t pud,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)3230 static int gup_fast_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
3231 unsigned long end, unsigned int flags, struct page **pages,
3232 int *nr)
3233 {
3234 unsigned long next;
3235 pmd_t *pmdp;
3236
3237 pmdp = pmd_offset_lockless(pudp, pud, addr);
3238 do {
3239 pmd_t pmd = pmdp_get_lockless(pmdp);
3240
3241 next = pmd_addr_end(addr, end);
3242 if (!pmd_present(pmd))
3243 return 0;
3244
3245 if (unlikely(pmd_leaf(pmd))) {
3246 /* See gup_fast_pte_range() */
3247 if (pmd_protnone(pmd))
3248 return 0;
3249
3250 if (!gup_fast_pmd_leaf(pmd, pmdp, addr, next, flags,
3251 pages, nr))
3252 return 0;
3253
3254 } else if (!gup_fast_pte_range(pmd, pmdp, addr, next, flags,
3255 pages, nr))
3256 return 0;
3257 } while (pmdp++, addr = next, addr != end);
3258
3259 return 1;
3260 }
3261
gup_fast_pud_range(p4d_t * p4dp,p4d_t p4d,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)3262 static int gup_fast_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr,
3263 unsigned long end, unsigned int flags, struct page **pages,
3264 int *nr)
3265 {
3266 unsigned long next;
3267 pud_t *pudp;
3268
3269 pudp = pud_offset_lockless(p4dp, p4d, addr);
3270 do {
3271 pud_t pud = READ_ONCE(*pudp);
3272
3273 next = pud_addr_end(addr, end);
3274 if (unlikely(!pud_present(pud)))
3275 return 0;
3276 if (unlikely(pud_leaf(pud))) {
3277 if (!gup_fast_pud_leaf(pud, pudp, addr, next, flags,
3278 pages, nr))
3279 return 0;
3280 } else if (!gup_fast_pmd_range(pudp, pud, addr, next, flags,
3281 pages, nr))
3282 return 0;
3283 } while (pudp++, addr = next, addr != end);
3284
3285 return 1;
3286 }
3287
gup_fast_p4d_range(pgd_t * pgdp,pgd_t pgd,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)3288 static int gup_fast_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
3289 unsigned long end, unsigned int flags, struct page **pages,
3290 int *nr)
3291 {
3292 unsigned long next;
3293 p4d_t *p4dp;
3294
3295 p4dp = p4d_offset_lockless(pgdp, pgd, addr);
3296 do {
3297 p4d_t p4d = READ_ONCE(*p4dp);
3298
3299 next = p4d_addr_end(addr, end);
3300 if (!p4d_present(p4d))
3301 return 0;
3302 BUILD_BUG_ON(p4d_leaf(p4d));
3303 if (!gup_fast_pud_range(p4dp, p4d, addr, next, flags,
3304 pages, nr))
3305 return 0;
3306 } while (p4dp++, addr = next, addr != end);
3307
3308 return 1;
3309 }
3310
gup_fast_pgd_range(unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)3311 static void gup_fast_pgd_range(unsigned long addr, unsigned long end,
3312 unsigned int flags, struct page **pages, int *nr)
3313 {
3314 unsigned long next;
3315 pgd_t *pgdp;
3316
3317 pgdp = pgd_offset(current->mm, addr);
3318 do {
3319 pgd_t pgd = READ_ONCE(*pgdp);
3320
3321 next = pgd_addr_end(addr, end);
3322 if (pgd_none(pgd))
3323 return;
3324 if (unlikely(pgd_leaf(pgd))) {
3325 if (!gup_fast_pgd_leaf(pgd, pgdp, addr, next, flags,
3326 pages, nr))
3327 return;
3328 } else if (!gup_fast_p4d_range(pgdp, pgd, addr, next, flags,
3329 pages, nr))
3330 return;
3331 } while (pgdp++, addr = next, addr != end);
3332 }
3333 #else
gup_fast_pgd_range(unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)3334 static inline void gup_fast_pgd_range(unsigned long addr, unsigned long end,
3335 unsigned int flags, struct page **pages, int *nr)
3336 {
3337 }
3338 #endif /* CONFIG_HAVE_GUP_FAST */
3339
3340 #ifndef gup_fast_permitted
3341 /*
3342 * Check if it's allowed to use get_user_pages_fast_only() for the range, or
3343 * we need to fall back to the slow version:
3344 */
gup_fast_permitted(unsigned long start,unsigned long end)3345 static bool gup_fast_permitted(unsigned long start, unsigned long end)
3346 {
3347 return true;
3348 }
3349 #endif
3350
gup_fast(unsigned long start,unsigned long end,unsigned int gup_flags,struct page ** pages)3351 static unsigned long gup_fast(unsigned long start, unsigned long end,
3352 unsigned int gup_flags, struct page **pages)
3353 {
3354 unsigned long flags;
3355 int nr_pinned = 0;
3356 unsigned seq;
3357
3358 if (!IS_ENABLED(CONFIG_HAVE_GUP_FAST) ||
3359 !gup_fast_permitted(start, end))
3360 return 0;
3361
3362 if (gup_flags & FOLL_PIN) {
3363 seq = raw_read_seqcount(¤t->mm->write_protect_seq);
3364 if (seq & 1)
3365 return 0;
3366 }
3367
3368 /*
3369 * Disable interrupts. The nested form is used, in order to allow full,
3370 * general purpose use of this routine.
3371 *
3372 * With interrupts disabled, we block page table pages from being freed
3373 * from under us. See struct mmu_table_batch comments in
3374 * include/asm-generic/tlb.h for more details.
3375 *
3376 * We do not adopt an rcu_read_lock() here as we also want to block IPIs
3377 * that come from THPs splitting.
3378 */
3379 local_irq_save(flags);
3380 gup_fast_pgd_range(start, end, gup_flags, pages, &nr_pinned);
3381 local_irq_restore(flags);
3382
3383 /*
3384 * When pinning pages for DMA there could be a concurrent write protect
3385 * from fork() via copy_page_range(), in this case always fail GUP-fast.
3386 */
3387 if (gup_flags & FOLL_PIN) {
3388 if (read_seqcount_retry(¤t->mm->write_protect_seq, seq)) {
3389 gup_fast_unpin_user_pages(pages, nr_pinned);
3390 return 0;
3391 } else {
3392 sanity_check_pinned_pages(pages, nr_pinned);
3393 }
3394 }
3395 return nr_pinned;
3396 }
3397
gup_fast_fallback(unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages)3398 static int gup_fast_fallback(unsigned long start, unsigned long nr_pages,
3399 unsigned int gup_flags, struct page **pages)
3400 {
3401 unsigned long len, end;
3402 unsigned long nr_pinned;
3403 int locked = 0;
3404 int ret;
3405
3406 if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
3407 FOLL_FORCE | FOLL_PIN | FOLL_GET |
3408 FOLL_FAST_ONLY | FOLL_NOFAULT |
3409 FOLL_PCI_P2PDMA | FOLL_HONOR_NUMA_FAULT)))
3410 return -EINVAL;
3411
3412 if (gup_flags & FOLL_PIN)
3413 mm_set_has_pinned_flag(¤t->mm->flags);
3414
3415 if (!(gup_flags & FOLL_FAST_ONLY))
3416 might_lock_read(¤t->mm->mmap_lock);
3417
3418 start = untagged_addr(start) & PAGE_MASK;
3419 len = nr_pages << PAGE_SHIFT;
3420 if (check_add_overflow(start, len, &end))
3421 return -EOVERFLOW;
3422 if (end > TASK_SIZE_MAX)
3423 return -EFAULT;
3424 if (unlikely(!access_ok((void __user *)start, len)))
3425 return -EFAULT;
3426
3427 nr_pinned = gup_fast(start, end, gup_flags, pages);
3428 if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY)
3429 return nr_pinned;
3430
3431 /* Slow path: try to get the remaining pages with get_user_pages */
3432 start += nr_pinned << PAGE_SHIFT;
3433 pages += nr_pinned;
3434 ret = __gup_longterm_locked(current->mm, start, nr_pages - nr_pinned,
3435 pages, &locked,
3436 gup_flags | FOLL_TOUCH | FOLL_UNLOCKABLE);
3437 if (ret < 0) {
3438 /*
3439 * The caller has to unpin the pages we already pinned so
3440 * returning -errno is not an option
3441 */
3442 if (nr_pinned)
3443 return nr_pinned;
3444 return ret;
3445 }
3446 return ret + nr_pinned;
3447 }
3448
3449 /**
3450 * get_user_pages_fast_only() - pin user pages in memory
3451 * @start: starting user address
3452 * @nr_pages: number of pages from start to pin
3453 * @gup_flags: flags modifying pin behaviour
3454 * @pages: array that receives pointers to the pages pinned.
3455 * Should be at least nr_pages long.
3456 *
3457 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
3458 * the regular GUP.
3459 *
3460 * If the architecture does not support this function, simply return with no
3461 * pages pinned.
3462 *
3463 * Careful, careful! COW breaking can go either way, so a non-write
3464 * access can get ambiguous page results. If you call this function without
3465 * 'write' set, you'd better be sure that you're ok with that ambiguity.
3466 */
get_user_pages_fast_only(unsigned long start,int nr_pages,unsigned int gup_flags,struct page ** pages)3467 int get_user_pages_fast_only(unsigned long start, int nr_pages,
3468 unsigned int gup_flags, struct page **pages)
3469 {
3470 /*
3471 * Internally (within mm/gup.c), gup fast variants must set FOLL_GET,
3472 * because gup fast is always a "pin with a +1 page refcount" request.
3473 *
3474 * FOLL_FAST_ONLY is required in order to match the API description of
3475 * this routine: no fall back to regular ("slow") GUP.
3476 */
3477 if (!is_valid_gup_args(pages, NULL, &gup_flags,
3478 FOLL_GET | FOLL_FAST_ONLY))
3479 return -EINVAL;
3480
3481 return gup_fast_fallback(start, nr_pages, gup_flags, pages);
3482 }
3483 EXPORT_SYMBOL_GPL(get_user_pages_fast_only);
3484
3485 /**
3486 * get_user_pages_fast() - pin user pages in memory
3487 * @start: starting user address
3488 * @nr_pages: number of pages from start to pin
3489 * @gup_flags: flags modifying pin behaviour
3490 * @pages: array that receives pointers to the pages pinned.
3491 * Should be at least nr_pages long.
3492 *
3493 * Attempt to pin user pages in memory without taking mm->mmap_lock.
3494 * If not successful, it will fall back to taking the lock and
3495 * calling get_user_pages().
3496 *
3497 * Returns number of pages pinned. This may be fewer than the number requested.
3498 * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
3499 * -errno.
3500 */
get_user_pages_fast(unsigned long start,int nr_pages,unsigned int gup_flags,struct page ** pages)3501 int get_user_pages_fast(unsigned long start, int nr_pages,
3502 unsigned int gup_flags, struct page **pages)
3503 {
3504 /*
3505 * The caller may or may not have explicitly set FOLL_GET; either way is
3506 * OK. However, internally (within mm/gup.c), gup fast variants must set
3507 * FOLL_GET, because gup fast is always a "pin with a +1 page refcount"
3508 * request.
3509 */
3510 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_GET))
3511 return -EINVAL;
3512 return gup_fast_fallback(start, nr_pages, gup_flags, pages);
3513 }
3514 EXPORT_SYMBOL_GPL(get_user_pages_fast);
3515
3516 /**
3517 * pin_user_pages_fast() - pin user pages in memory without taking locks
3518 *
3519 * @start: starting user address
3520 * @nr_pages: number of pages from start to pin
3521 * @gup_flags: flags modifying pin behaviour
3522 * @pages: array that receives pointers to the pages pinned.
3523 * Should be at least nr_pages long.
3524 *
3525 * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See
3526 * get_user_pages_fast() for documentation on the function arguments, because
3527 * the arguments here are identical.
3528 *
3529 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3530 * see Documentation/core-api/pin_user_pages.rst for further details.
3531 *
3532 * Note that if a zero_page is amongst the returned pages, it will not have
3533 * pins in it and unpin_user_page() will not remove pins from it.
3534 */
pin_user_pages_fast(unsigned long start,int nr_pages,unsigned int gup_flags,struct page ** pages)3535 int pin_user_pages_fast(unsigned long start, int nr_pages,
3536 unsigned int gup_flags, struct page **pages)
3537 {
3538 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN))
3539 return -EINVAL;
3540 return gup_fast_fallback(start, nr_pages, gup_flags, pages);
3541 }
3542 EXPORT_SYMBOL_GPL(pin_user_pages_fast);
3543
3544 /**
3545 * pin_user_pages_remote() - pin pages of a remote process
3546 *
3547 * @mm: mm_struct of target mm
3548 * @start: starting user address
3549 * @nr_pages: number of pages from start to pin
3550 * @gup_flags: flags modifying lookup behaviour
3551 * @pages: array that receives pointers to the pages pinned.
3552 * Should be at least nr_pages long.
3553 * @locked: pointer to lock flag indicating whether lock is held and
3554 * subsequently whether VM_FAULT_RETRY functionality can be
3555 * utilised. Lock must initially be held.
3556 *
3557 * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See
3558 * get_user_pages_remote() for documentation on the function arguments, because
3559 * the arguments here are identical.
3560 *
3561 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3562 * see Documentation/core-api/pin_user_pages.rst for details.
3563 *
3564 * Note that if a zero_page is amongst the returned pages, it will not have
3565 * pins in it and unpin_user_page*() will not remove pins from it.
3566 */
pin_user_pages_remote(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,int * locked)3567 long pin_user_pages_remote(struct mm_struct *mm,
3568 unsigned long start, unsigned long nr_pages,
3569 unsigned int gup_flags, struct page **pages,
3570 int *locked)
3571 {
3572 int local_locked = 1;
3573
3574 if (!is_valid_gup_args(pages, locked, &gup_flags,
3575 FOLL_PIN | FOLL_TOUCH | FOLL_REMOTE))
3576 return 0;
3577 return __gup_longterm_locked(mm, start, nr_pages, pages,
3578 locked ? locked : &local_locked,
3579 gup_flags);
3580 }
3581 EXPORT_SYMBOL(pin_user_pages_remote);
3582
3583 /**
3584 * pin_user_pages() - pin user pages in memory for use by other devices
3585 *
3586 * @start: starting user address
3587 * @nr_pages: number of pages from start to pin
3588 * @gup_flags: flags modifying lookup behaviour
3589 * @pages: array that receives pointers to the pages pinned.
3590 * Should be at least nr_pages long.
3591 *
3592 * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and
3593 * FOLL_PIN is set.
3594 *
3595 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3596 * see Documentation/core-api/pin_user_pages.rst for details.
3597 *
3598 * Note that if a zero_page is amongst the returned pages, it will not have
3599 * pins in it and unpin_user_page*() will not remove pins from it.
3600 */
pin_user_pages(unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages)3601 long pin_user_pages(unsigned long start, unsigned long nr_pages,
3602 unsigned int gup_flags, struct page **pages)
3603 {
3604 int locked = 1;
3605
3606 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN))
3607 return 0;
3608 return __gup_longterm_locked(current->mm, start, nr_pages,
3609 pages, &locked, gup_flags);
3610 }
3611 EXPORT_SYMBOL(pin_user_pages);
3612
3613 /*
3614 * pin_user_pages_unlocked() is the FOLL_PIN variant of
3615 * get_user_pages_unlocked(). Behavior is the same, except that this one sets
3616 * FOLL_PIN and rejects FOLL_GET.
3617 *
3618 * Note that if a zero_page is amongst the returned pages, it will not have
3619 * pins in it and unpin_user_page*() will not remove pins from it.
3620 */
pin_user_pages_unlocked(unsigned long start,unsigned long nr_pages,struct page ** pages,unsigned int gup_flags)3621 long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
3622 struct page **pages, unsigned int gup_flags)
3623 {
3624 int locked = 0;
3625
3626 if (!is_valid_gup_args(pages, NULL, &gup_flags,
3627 FOLL_PIN | FOLL_TOUCH | FOLL_UNLOCKABLE))
3628 return 0;
3629
3630 return __gup_longterm_locked(current->mm, start, nr_pages, pages,
3631 &locked, gup_flags);
3632 }
3633 EXPORT_SYMBOL(pin_user_pages_unlocked);
3634
3635 /**
3636 * memfd_pin_folios() - pin folios associated with a memfd
3637 * @memfd: the memfd whose folios are to be pinned
3638 * @start: the first memfd offset
3639 * @end: the last memfd offset (inclusive)
3640 * @folios: array that receives pointers to the folios pinned
3641 * @max_folios: maximum number of entries in @folios
3642 * @offset: the offset into the first folio
3643 *
3644 * Attempt to pin folios associated with a memfd in the contiguous range
3645 * [start, end]. Given that a memfd is either backed by shmem or hugetlb,
3646 * the folios can either be found in the page cache or need to be allocated
3647 * if necessary. Once the folios are located, they are all pinned via
3648 * FOLL_PIN and @offset is populatedwith the offset into the first folio.
3649 * And, eventually, these pinned folios must be released either using
3650 * unpin_folios() or unpin_folio().
3651 *
3652 * It must be noted that the folios may be pinned for an indefinite amount
3653 * of time. And, in most cases, the duration of time they may stay pinned
3654 * would be controlled by the userspace. This behavior is effectively the
3655 * same as using FOLL_LONGTERM with other GUP APIs.
3656 *
3657 * Returns number of folios pinned, which could be less than @max_folios
3658 * as it depends on the folio sizes that cover the range [start, end].
3659 * If no folios were pinned, it returns -errno.
3660 */
memfd_pin_folios(struct file * memfd,loff_t start,loff_t end,struct folio ** folios,unsigned int max_folios,pgoff_t * offset)3661 long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end,
3662 struct folio **folios, unsigned int max_folios,
3663 pgoff_t *offset)
3664 {
3665 unsigned int flags, nr_folios, nr_found;
3666 unsigned int i, pgshift = PAGE_SHIFT;
3667 pgoff_t start_idx, end_idx, next_idx;
3668 struct folio *folio = NULL;
3669 struct folio_batch fbatch;
3670 struct hstate *h;
3671 long ret = -EINVAL;
3672
3673 if (start < 0 || start > end || !max_folios)
3674 return -EINVAL;
3675
3676 if (!memfd)
3677 return -EINVAL;
3678
3679 if (!shmem_file(memfd) && !is_file_hugepages(memfd))
3680 return -EINVAL;
3681
3682 if (end >= i_size_read(file_inode(memfd)))
3683 return -EINVAL;
3684
3685 if (is_file_hugepages(memfd)) {
3686 h = hstate_file(memfd);
3687 pgshift = huge_page_shift(h);
3688 }
3689
3690 flags = memalloc_pin_save();
3691 do {
3692 nr_folios = 0;
3693 start_idx = start >> pgshift;
3694 end_idx = end >> pgshift;
3695 if (is_file_hugepages(memfd)) {
3696 start_idx <<= huge_page_order(h);
3697 end_idx <<= huge_page_order(h);
3698 }
3699
3700 folio_batch_init(&fbatch);
3701 while (start_idx <= end_idx && nr_folios < max_folios) {
3702 /*
3703 * In most cases, we should be able to find the folios
3704 * in the page cache. If we cannot find them for some
3705 * reason, we try to allocate them and add them to the
3706 * page cache.
3707 */
3708 nr_found = filemap_get_folios_contig(memfd->f_mapping,
3709 &start_idx,
3710 end_idx,
3711 &fbatch);
3712 if (folio) {
3713 folio_put(folio);
3714 folio = NULL;
3715 }
3716
3717 next_idx = 0;
3718 for (i = 0; i < nr_found; i++) {
3719 /*
3720 * As there can be multiple entries for a
3721 * given folio in the batch returned by
3722 * filemap_get_folios_contig(), the below
3723 * check is to ensure that we pin and return a
3724 * unique set of folios between start and end.
3725 */
3726 if (next_idx &&
3727 next_idx != folio_index(fbatch.folios[i]))
3728 continue;
3729
3730 folio = page_folio(&fbatch.folios[i]->page);
3731
3732 if (try_grab_folio(folio, 1, FOLL_PIN)) {
3733 folio_batch_release(&fbatch);
3734 ret = -EINVAL;
3735 goto err;
3736 }
3737
3738 if (nr_folios == 0)
3739 *offset = offset_in_folio(folio, start);
3740
3741 folios[nr_folios] = folio;
3742 next_idx = folio_next_index(folio);
3743 if (++nr_folios == max_folios)
3744 break;
3745 }
3746
3747 folio = NULL;
3748 folio_batch_release(&fbatch);
3749 if (!nr_found) {
3750 folio = memfd_alloc_folio(memfd, start_idx);
3751 if (IS_ERR(folio)) {
3752 ret = PTR_ERR(folio);
3753 if (ret != -EEXIST)
3754 goto err;
3755 folio = NULL;
3756 }
3757 }
3758 }
3759
3760 ret = check_and_migrate_movable_folios(nr_folios, folios);
3761 } while (ret == -EAGAIN);
3762
3763 memalloc_pin_restore(flags);
3764 return ret ? ret : nr_folios;
3765 err:
3766 memalloc_pin_restore(flags);
3767 unpin_folios(folios, nr_folios);
3768
3769 return ret;
3770 }
3771 EXPORT_SYMBOL_GPL(memfd_pin_folios);
3772
3773 /**
3774 * folio_add_pins() - add pins to an already-pinned folio
3775 * @folio: the folio to add more pins to
3776 * @pins: number of pins to add
3777 *
3778 * Try to add more pins to an already-pinned folio. The semantics
3779 * of the pin (e.g., FOLL_WRITE) follow any existing pin and cannot
3780 * be changed.
3781 *
3782 * This function is helpful when having obtained a pin on a large folio
3783 * using memfd_pin_folios(), but wanting to logically unpin parts
3784 * (e.g., individual pages) of the folio later, for example, using
3785 * unpin_user_page_range_dirty_lock().
3786 *
3787 * This is not the right interface to initially pin a folio.
3788 */
folio_add_pins(struct folio * folio,unsigned int pins)3789 int folio_add_pins(struct folio *folio, unsigned int pins)
3790 {
3791 VM_WARN_ON_ONCE(!folio_maybe_dma_pinned(folio));
3792
3793 return try_grab_folio(folio, pins, FOLL_PIN);
3794 }
3795 EXPORT_SYMBOL_GPL(folio_add_pins);
3796