1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/err.h> 5 #include <linux/spinlock.h> 6 7 #include <linux/mm.h> 8 #include <linux/memfd.h> 9 #include <linux/memremap.h> 10 #include <linux/pagemap.h> 11 #include <linux/rmap.h> 12 #include <linux/swap.h> 13 #include <linux/swapops.h> 14 #include <linux/secretmem.h> 15 16 #include <linux/sched/signal.h> 17 #include <linux/rwsem.h> 18 #include <linux/hugetlb.h> 19 #include <linux/migrate.h> 20 #include <linux/mm_inline.h> 21 #include <linux/pagevec.h> 22 #include <linux/sched/mm.h> 23 #include <linux/shmem_fs.h> 24 25 #include <asm/mmu_context.h> 26 #include <asm/tlbflush.h> 27 28 #include "internal.h" 29 30 struct follow_page_context { 31 struct dev_pagemap *pgmap; 32 unsigned int page_mask; 33 }; 34 35 static inline void sanity_check_pinned_pages(struct page **pages, 36 unsigned long npages) 37 { 38 if (!IS_ENABLED(CONFIG_DEBUG_VM)) 39 return; 40 41 /* 42 * We only pin anonymous pages if they are exclusive. Once pinned, we 43 * can no longer turn them possibly shared and PageAnonExclusive() will 44 * stick around until the page is freed. 45 * 46 * We'd like to verify that our pinned anonymous pages are still mapped 47 * exclusively. The issue with anon THP is that we don't know how 48 * they are/were mapped when pinning them. However, for anon 49 * THP we can assume that either the given page (PTE-mapped THP) or 50 * the head page (PMD-mapped THP) should be PageAnonExclusive(). If 51 * neither is the case, there is certainly something wrong. 52 */ 53 for (; npages; npages--, pages++) { 54 struct page *page = *pages; 55 struct folio *folio; 56 57 if (!page) 58 continue; 59 60 folio = page_folio(page); 61 62 if (is_zero_page(page) || 63 !folio_test_anon(folio)) 64 continue; 65 if (!folio_test_large(folio) || folio_test_hugetlb(folio)) 66 VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page), page); 67 else 68 /* Either a PTE-mapped or a PMD-mapped THP. */ 69 VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page) && 70 !PageAnonExclusive(page), page); 71 } 72 } 73 74 /* 75 * Return the folio with ref appropriately incremented, 76 * or NULL if that failed. 77 */ 78 static inline struct folio *try_get_folio(struct page *page, int refs) 79 { 80 struct folio *folio; 81 82 retry: 83 folio = page_folio(page); 84 if (WARN_ON_ONCE(folio_ref_count(folio) < 0)) 85 return NULL; 86 if (unlikely(!folio_ref_try_add(folio, refs))) 87 return NULL; 88 89 /* 90 * At this point we have a stable reference to the folio; but it 91 * could be that between calling page_folio() and the refcount 92 * increment, the folio was split, in which case we'd end up 93 * holding a reference on a folio that has nothing to do with the page 94 * we were given anymore. 95 * So now that the folio is stable, recheck that the page still 96 * belongs to this folio. 97 */ 98 if (unlikely(page_folio(page) != folio)) { 99 if (!put_devmap_managed_folio_refs(folio, refs)) 100 folio_put_refs(folio, refs); 101 goto retry; 102 } 103 104 return folio; 105 } 106 107 static void gup_put_folio(struct folio *folio, int refs, unsigned int flags) 108 { 109 if (flags & FOLL_PIN) { 110 if (is_zero_folio(folio)) 111 return; 112 node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs); 113 if (folio_test_large(folio)) 114 atomic_sub(refs, &folio->_pincount); 115 else 116 refs *= GUP_PIN_COUNTING_BIAS; 117 } 118 119 if (!put_devmap_managed_folio_refs(folio, refs)) 120 folio_put_refs(folio, refs); 121 } 122 123 /** 124 * try_grab_folio() - add a folio's refcount by a flag-dependent amount 125 * @folio: pointer to folio to be grabbed 126 * @refs: the value to (effectively) add to the folio's refcount 127 * @flags: gup flags: these are the FOLL_* flag values 128 * 129 * This might not do anything at all, depending on the flags argument. 130 * 131 * "grab" names in this file mean, "look at flags to decide whether to use 132 * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount. 133 * 134 * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same 135 * time. 136 * 137 * Return: 0 for success, or if no action was required (if neither FOLL_PIN 138 * nor FOLL_GET was set, nothing is done). A negative error code for failure: 139 * 140 * -ENOMEM FOLL_GET or FOLL_PIN was set, but the folio could not 141 * be grabbed. 142 * 143 * It is called when we have a stable reference for the folio, typically in 144 * GUP slow path. 145 */ 146 int __must_check try_grab_folio(struct folio *folio, int refs, 147 unsigned int flags) 148 { 149 if (WARN_ON_ONCE(folio_ref_count(folio) <= 0)) 150 return -ENOMEM; 151 152 if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(&folio->page))) 153 return -EREMOTEIO; 154 155 if (flags & FOLL_GET) 156 folio_ref_add(folio, refs); 157 else if (flags & FOLL_PIN) { 158 /* 159 * Don't take a pin on the zero page - it's not going anywhere 160 * and it is used in a *lot* of places. 161 */ 162 if (is_zero_folio(folio)) 163 return 0; 164 165 /* 166 * Increment the normal page refcount field at least once, 167 * so that the page really is pinned. 168 */ 169 if (folio_test_large(folio)) { 170 folio_ref_add(folio, refs); 171 atomic_add(refs, &folio->_pincount); 172 } else { 173 folio_ref_add(folio, refs * GUP_PIN_COUNTING_BIAS); 174 } 175 176 node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs); 177 } 178 179 return 0; 180 } 181 182 /** 183 * unpin_user_page() - release a dma-pinned page 184 * @page: pointer to page to be released 185 * 186 * Pages that were pinned via pin_user_pages*() must be released via either 187 * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so 188 * that such pages can be separately tracked and uniquely handled. In 189 * particular, interactions with RDMA and filesystems need special handling. 190 */ 191 void unpin_user_page(struct page *page) 192 { 193 sanity_check_pinned_pages(&page, 1); 194 gup_put_folio(page_folio(page), 1, FOLL_PIN); 195 } 196 EXPORT_SYMBOL(unpin_user_page); 197 198 /** 199 * unpin_folio() - release a dma-pinned folio 200 * @folio: pointer to folio to be released 201 * 202 * Folios that were pinned via memfd_pin_folios() or other similar routines 203 * must be released either using unpin_folio() or unpin_folios(). 204 */ 205 void unpin_folio(struct folio *folio) 206 { 207 gup_put_folio(folio, 1, FOLL_PIN); 208 } 209 EXPORT_SYMBOL_GPL(unpin_folio); 210 211 /** 212 * folio_add_pin - Try to get an additional pin on a pinned folio 213 * @folio: The folio to be pinned 214 * 215 * Get an additional pin on a folio we already have a pin on. Makes no change 216 * if the folio is a zero_page. 217 */ 218 void folio_add_pin(struct folio *folio) 219 { 220 if (is_zero_folio(folio)) 221 return; 222 223 /* 224 * Similar to try_grab_folio(): be sure to *also* increment the normal 225 * page refcount field at least once, so that the page really is 226 * pinned. 227 */ 228 if (folio_test_large(folio)) { 229 WARN_ON_ONCE(atomic_read(&folio->_pincount) < 1); 230 folio_ref_inc(folio); 231 atomic_inc(&folio->_pincount); 232 } else { 233 WARN_ON_ONCE(folio_ref_count(folio) < GUP_PIN_COUNTING_BIAS); 234 folio_ref_add(folio, GUP_PIN_COUNTING_BIAS); 235 } 236 } 237 238 static inline struct folio *gup_folio_range_next(struct page *start, 239 unsigned long npages, unsigned long i, unsigned int *ntails) 240 { 241 struct page *next = nth_page(start, i); 242 struct folio *folio = page_folio(next); 243 unsigned int nr = 1; 244 245 if (folio_test_large(folio)) 246 nr = min_t(unsigned int, npages - i, 247 folio_nr_pages(folio) - folio_page_idx(folio, next)); 248 249 *ntails = nr; 250 return folio; 251 } 252 253 static inline struct folio *gup_folio_next(struct page **list, 254 unsigned long npages, unsigned long i, unsigned int *ntails) 255 { 256 struct folio *folio = page_folio(list[i]); 257 unsigned int nr; 258 259 for (nr = i + 1; nr < npages; nr++) { 260 if (page_folio(list[nr]) != folio) 261 break; 262 } 263 264 *ntails = nr - i; 265 return folio; 266 } 267 268 /** 269 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages 270 * @pages: array of pages to be maybe marked dirty, and definitely released. 271 * @npages: number of pages in the @pages array. 272 * @make_dirty: whether to mark the pages dirty 273 * 274 * "gup-pinned page" refers to a page that has had one of the get_user_pages() 275 * variants called on that page. 276 * 277 * For each page in the @pages array, make that page (or its head page, if a 278 * compound page) dirty, if @make_dirty is true, and if the page was previously 279 * listed as clean. In any case, releases all pages using unpin_user_page(), 280 * possibly via unpin_user_pages(), for the non-dirty case. 281 * 282 * Please see the unpin_user_page() documentation for details. 283 * 284 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is 285 * required, then the caller should a) verify that this is really correct, 286 * because _lock() is usually required, and b) hand code it: 287 * set_page_dirty_lock(), unpin_user_page(). 288 * 289 */ 290 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, 291 bool make_dirty) 292 { 293 unsigned long i; 294 struct folio *folio; 295 unsigned int nr; 296 297 if (!make_dirty) { 298 unpin_user_pages(pages, npages); 299 return; 300 } 301 302 sanity_check_pinned_pages(pages, npages); 303 for (i = 0; i < npages; i += nr) { 304 folio = gup_folio_next(pages, npages, i, &nr); 305 /* 306 * Checking PageDirty at this point may race with 307 * clear_page_dirty_for_io(), but that's OK. Two key 308 * cases: 309 * 310 * 1) This code sees the page as already dirty, so it 311 * skips the call to set_page_dirty(). That could happen 312 * because clear_page_dirty_for_io() called 313 * folio_mkclean(), followed by set_page_dirty(). 314 * However, now the page is going to get written back, 315 * which meets the original intention of setting it 316 * dirty, so all is well: clear_page_dirty_for_io() goes 317 * on to call TestClearPageDirty(), and write the page 318 * back. 319 * 320 * 2) This code sees the page as clean, so it calls 321 * set_page_dirty(). The page stays dirty, despite being 322 * written back, so it gets written back again in the 323 * next writeback cycle. This is harmless. 324 */ 325 if (!folio_test_dirty(folio)) { 326 folio_lock(folio); 327 folio_mark_dirty(folio); 328 folio_unlock(folio); 329 } 330 gup_put_folio(folio, nr, FOLL_PIN); 331 } 332 } 333 EXPORT_SYMBOL(unpin_user_pages_dirty_lock); 334 335 /** 336 * unpin_user_page_range_dirty_lock() - release and optionally dirty 337 * gup-pinned page range 338 * 339 * @page: the starting page of a range maybe marked dirty, and definitely released. 340 * @npages: number of consecutive pages to release. 341 * @make_dirty: whether to mark the pages dirty 342 * 343 * "gup-pinned page range" refers to a range of pages that has had one of the 344 * pin_user_pages() variants called on that page. 345 * 346 * For the page ranges defined by [page .. page+npages], make that range (or 347 * its head pages, if a compound page) dirty, if @make_dirty is true, and if the 348 * page range was previously listed as clean. 349 * 350 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is 351 * required, then the caller should a) verify that this is really correct, 352 * because _lock() is usually required, and b) hand code it: 353 * set_page_dirty_lock(), unpin_user_page(). 354 * 355 */ 356 void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, 357 bool make_dirty) 358 { 359 unsigned long i; 360 struct folio *folio; 361 unsigned int nr; 362 363 for (i = 0; i < npages; i += nr) { 364 folio = gup_folio_range_next(page, npages, i, &nr); 365 if (make_dirty && !folio_test_dirty(folio)) { 366 folio_lock(folio); 367 folio_mark_dirty(folio); 368 folio_unlock(folio); 369 } 370 gup_put_folio(folio, nr, FOLL_PIN); 371 } 372 } 373 EXPORT_SYMBOL(unpin_user_page_range_dirty_lock); 374 375 static void gup_fast_unpin_user_pages(struct page **pages, unsigned long npages) 376 { 377 unsigned long i; 378 struct folio *folio; 379 unsigned int nr; 380 381 /* 382 * Don't perform any sanity checks because we might have raced with 383 * fork() and some anonymous pages might now actually be shared -- 384 * which is why we're unpinning after all. 385 */ 386 for (i = 0; i < npages; i += nr) { 387 folio = gup_folio_next(pages, npages, i, &nr); 388 gup_put_folio(folio, nr, FOLL_PIN); 389 } 390 } 391 392 /** 393 * unpin_user_pages() - release an array of gup-pinned pages. 394 * @pages: array of pages to be marked dirty and released. 395 * @npages: number of pages in the @pages array. 396 * 397 * For each page in the @pages array, release the page using unpin_user_page(). 398 * 399 * Please see the unpin_user_page() documentation for details. 400 */ 401 void unpin_user_pages(struct page **pages, unsigned long npages) 402 { 403 unsigned long i; 404 struct folio *folio; 405 unsigned int nr; 406 407 /* 408 * If this WARN_ON() fires, then the system *might* be leaking pages (by 409 * leaving them pinned), but probably not. More likely, gup/pup returned 410 * a hard -ERRNO error to the caller, who erroneously passed it here. 411 */ 412 if (WARN_ON(IS_ERR_VALUE(npages))) 413 return; 414 415 sanity_check_pinned_pages(pages, npages); 416 for (i = 0; i < npages; i += nr) { 417 if (!pages[i]) { 418 nr = 1; 419 continue; 420 } 421 folio = gup_folio_next(pages, npages, i, &nr); 422 gup_put_folio(folio, nr, FOLL_PIN); 423 } 424 } 425 EXPORT_SYMBOL(unpin_user_pages); 426 427 /** 428 * unpin_user_folio() - release pages of a folio 429 * @folio: pointer to folio to be released 430 * @npages: number of pages of same folio 431 * 432 * Release npages of the folio 433 */ 434 void unpin_user_folio(struct folio *folio, unsigned long npages) 435 { 436 gup_put_folio(folio, npages, FOLL_PIN); 437 } 438 EXPORT_SYMBOL(unpin_user_folio); 439 440 /** 441 * unpin_folios() - release an array of gup-pinned folios. 442 * @folios: array of folios to be marked dirty and released. 443 * @nfolios: number of folios in the @folios array. 444 * 445 * For each folio in the @folios array, release the folio using gup_put_folio. 446 * 447 * Please see the unpin_folio() documentation for details. 448 */ 449 void unpin_folios(struct folio **folios, unsigned long nfolios) 450 { 451 unsigned long i = 0, j; 452 453 /* 454 * If this WARN_ON() fires, then the system *might* be leaking folios 455 * (by leaving them pinned), but probably not. More likely, gup/pup 456 * returned a hard -ERRNO error to the caller, who erroneously passed 457 * it here. 458 */ 459 if (WARN_ON(IS_ERR_VALUE(nfolios))) 460 return; 461 462 while (i < nfolios) { 463 for (j = i + 1; j < nfolios; j++) 464 if (folios[i] != folios[j]) 465 break; 466 467 if (folios[i]) 468 gup_put_folio(folios[i], j - i, FOLL_PIN); 469 i = j; 470 } 471 } 472 EXPORT_SYMBOL_GPL(unpin_folios); 473 474 /* 475 * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's 476 * lifecycle. Avoid setting the bit unless necessary, or it might cause write 477 * cache bouncing on large SMP machines for concurrent pinned gups. 478 */ 479 static inline void mm_set_has_pinned_flag(unsigned long *mm_flags) 480 { 481 if (!test_bit(MMF_HAS_PINNED, mm_flags)) 482 set_bit(MMF_HAS_PINNED, mm_flags); 483 } 484 485 #ifdef CONFIG_MMU 486 487 #ifdef CONFIG_HAVE_GUP_FAST 488 static int record_subpages(struct page *page, unsigned long sz, 489 unsigned long addr, unsigned long end, 490 struct page **pages) 491 { 492 struct page *start_page; 493 int nr; 494 495 start_page = nth_page(page, (addr & (sz - 1)) >> PAGE_SHIFT); 496 for (nr = 0; addr != end; nr++, addr += PAGE_SIZE) 497 pages[nr] = nth_page(start_page, nr); 498 499 return nr; 500 } 501 502 /** 503 * try_grab_folio_fast() - Attempt to get or pin a folio in fast path. 504 * @page: pointer to page to be grabbed 505 * @refs: the value to (effectively) add to the folio's refcount 506 * @flags: gup flags: these are the FOLL_* flag values. 507 * 508 * "grab" names in this file mean, "look at flags to decide whether to use 509 * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount. 510 * 511 * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the 512 * same time. (That's true throughout the get_user_pages*() and 513 * pin_user_pages*() APIs.) Cases: 514 * 515 * FOLL_GET: folio's refcount will be incremented by @refs. 516 * 517 * FOLL_PIN on large folios: folio's refcount will be incremented by 518 * @refs, and its pincount will be incremented by @refs. 519 * 520 * FOLL_PIN on single-page folios: folio's refcount will be incremented by 521 * @refs * GUP_PIN_COUNTING_BIAS. 522 * 523 * Return: The folio containing @page (with refcount appropriately 524 * incremented) for success, or NULL upon failure. If neither FOLL_GET 525 * nor FOLL_PIN was set, that's considered failure, and furthermore, 526 * a likely bug in the caller, so a warning is also emitted. 527 * 528 * It uses add ref unless zero to elevate the folio refcount and must be called 529 * in fast path only. 530 */ 531 static struct folio *try_grab_folio_fast(struct page *page, int refs, 532 unsigned int flags) 533 { 534 struct folio *folio; 535 536 /* Raise warn if it is not called in fast GUP */ 537 VM_WARN_ON_ONCE(!irqs_disabled()); 538 539 if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0)) 540 return NULL; 541 542 if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page))) 543 return NULL; 544 545 if (flags & FOLL_GET) 546 return try_get_folio(page, refs); 547 548 /* FOLL_PIN is set */ 549 550 /* 551 * Don't take a pin on the zero page - it's not going anywhere 552 * and it is used in a *lot* of places. 553 */ 554 if (is_zero_page(page)) 555 return page_folio(page); 556 557 folio = try_get_folio(page, refs); 558 if (!folio) 559 return NULL; 560 561 /* 562 * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a 563 * right zone, so fail and let the caller fall back to the slow 564 * path. 565 */ 566 if (unlikely((flags & FOLL_LONGTERM) && 567 !folio_is_longterm_pinnable(folio))) { 568 if (!put_devmap_managed_folio_refs(folio, refs)) 569 folio_put_refs(folio, refs); 570 return NULL; 571 } 572 573 /* 574 * When pinning a large folio, use an exact count to track it. 575 * 576 * However, be sure to *also* increment the normal folio 577 * refcount field at least once, so that the folio really 578 * is pinned. That's why the refcount from the earlier 579 * try_get_folio() is left intact. 580 */ 581 if (folio_test_large(folio)) 582 atomic_add(refs, &folio->_pincount); 583 else 584 folio_ref_add(folio, 585 refs * (GUP_PIN_COUNTING_BIAS - 1)); 586 /* 587 * Adjust the pincount before re-checking the PTE for changes. 588 * This is essentially a smp_mb() and is paired with a memory 589 * barrier in folio_try_share_anon_rmap_*(). 590 */ 591 smp_mb__after_atomic(); 592 593 node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs); 594 595 return folio; 596 } 597 #endif /* CONFIG_HAVE_GUP_FAST */ 598 599 /* Common code for can_follow_write_* */ 600 static inline bool can_follow_write_common(struct page *page, 601 struct vm_area_struct *vma, unsigned int flags) 602 { 603 /* Maybe FOLL_FORCE is set to override it? */ 604 if (!(flags & FOLL_FORCE)) 605 return false; 606 607 /* But FOLL_FORCE has no effect on shared mappings */ 608 if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) 609 return false; 610 611 /* ... or read-only private ones */ 612 if (!(vma->vm_flags & VM_MAYWRITE)) 613 return false; 614 615 /* ... or already writable ones that just need to take a write fault */ 616 if (vma->vm_flags & VM_WRITE) 617 return false; 618 619 /* 620 * See can_change_pte_writable(): we broke COW and could map the page 621 * writable if we have an exclusive anonymous page ... 622 */ 623 return page && PageAnon(page) && PageAnonExclusive(page); 624 } 625 626 static struct page *no_page_table(struct vm_area_struct *vma, 627 unsigned int flags, unsigned long address) 628 { 629 if (!(flags & FOLL_DUMP)) 630 return NULL; 631 632 /* 633 * When core dumping, we don't want to allocate unnecessary pages or 634 * page tables. Return error instead of NULL to skip handle_mm_fault, 635 * then get_dump_page() will return NULL to leave a hole in the dump. 636 * But we can only make this optimization where a hole would surely 637 * be zero-filled if handle_mm_fault() actually did handle it. 638 */ 639 if (is_vm_hugetlb_page(vma)) { 640 struct hstate *h = hstate_vma(vma); 641 642 if (!hugetlbfs_pagecache_present(h, vma, address)) 643 return ERR_PTR(-EFAULT); 644 } else if ((vma_is_anonymous(vma) || !vma->vm_ops->fault)) { 645 return ERR_PTR(-EFAULT); 646 } 647 648 return NULL; 649 } 650 651 #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES 652 /* FOLL_FORCE can write to even unwritable PUDs in COW mappings. */ 653 static inline bool can_follow_write_pud(pud_t pud, struct page *page, 654 struct vm_area_struct *vma, 655 unsigned int flags) 656 { 657 /* If the pud is writable, we can write to the page. */ 658 if (pud_write(pud)) 659 return true; 660 661 return can_follow_write_common(page, vma, flags); 662 } 663 664 static struct page *follow_huge_pud(struct vm_area_struct *vma, 665 unsigned long addr, pud_t *pudp, 666 int flags, struct follow_page_context *ctx) 667 { 668 struct mm_struct *mm = vma->vm_mm; 669 struct page *page; 670 pud_t pud = *pudp; 671 unsigned long pfn = pud_pfn(pud); 672 int ret; 673 674 assert_spin_locked(pud_lockptr(mm, pudp)); 675 676 if (!pud_present(pud)) 677 return NULL; 678 679 if ((flags & FOLL_WRITE) && 680 !can_follow_write_pud(pud, pfn_to_page(pfn), vma, flags)) 681 return NULL; 682 683 pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT; 684 685 if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) && 686 pud_devmap(pud)) { 687 /* 688 * device mapped pages can only be returned if the caller 689 * will manage the page reference count. 690 * 691 * At least one of FOLL_GET | FOLL_PIN must be set, so 692 * assert that here: 693 */ 694 if (!(flags & (FOLL_GET | FOLL_PIN))) 695 return ERR_PTR(-EEXIST); 696 697 if (flags & FOLL_TOUCH) 698 touch_pud(vma, addr, pudp, flags & FOLL_WRITE); 699 700 ctx->pgmap = get_dev_pagemap(pfn, ctx->pgmap); 701 if (!ctx->pgmap) 702 return ERR_PTR(-EFAULT); 703 } 704 705 page = pfn_to_page(pfn); 706 707 if (!pud_devmap(pud) && !pud_write(pud) && 708 gup_must_unshare(vma, flags, page)) 709 return ERR_PTR(-EMLINK); 710 711 ret = try_grab_folio(page_folio(page), 1, flags); 712 if (ret) 713 page = ERR_PTR(ret); 714 else 715 ctx->page_mask = HPAGE_PUD_NR - 1; 716 717 return page; 718 } 719 720 /* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */ 721 static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page, 722 struct vm_area_struct *vma, 723 unsigned int flags) 724 { 725 /* If the pmd is writable, we can write to the page. */ 726 if (pmd_write(pmd)) 727 return true; 728 729 if (!can_follow_write_common(page, vma, flags)) 730 return false; 731 732 /* ... and a write-fault isn't required for other reasons. */ 733 if (pmd_needs_soft_dirty_wp(vma, pmd)) 734 return false; 735 return !userfaultfd_huge_pmd_wp(vma, pmd); 736 } 737 738 static struct page *follow_huge_pmd(struct vm_area_struct *vma, 739 unsigned long addr, pmd_t *pmd, 740 unsigned int flags, 741 struct follow_page_context *ctx) 742 { 743 struct mm_struct *mm = vma->vm_mm; 744 pmd_t pmdval = *pmd; 745 struct page *page; 746 int ret; 747 748 assert_spin_locked(pmd_lockptr(mm, pmd)); 749 750 page = pmd_page(pmdval); 751 if ((flags & FOLL_WRITE) && 752 !can_follow_write_pmd(pmdval, page, vma, flags)) 753 return NULL; 754 755 /* Avoid dumping huge zero page */ 756 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(pmdval)) 757 return ERR_PTR(-EFAULT); 758 759 if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags)) 760 return NULL; 761 762 if (!pmd_write(pmdval) && gup_must_unshare(vma, flags, page)) 763 return ERR_PTR(-EMLINK); 764 765 VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) && 766 !PageAnonExclusive(page), page); 767 768 ret = try_grab_folio(page_folio(page), 1, flags); 769 if (ret) 770 return ERR_PTR(ret); 771 772 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 773 if (pmd_trans_huge(pmdval) && (flags & FOLL_TOUCH)) 774 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE); 775 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 776 777 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; 778 ctx->page_mask = HPAGE_PMD_NR - 1; 779 780 return page; 781 } 782 783 #else /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */ 784 static struct page *follow_huge_pud(struct vm_area_struct *vma, 785 unsigned long addr, pud_t *pudp, 786 int flags, struct follow_page_context *ctx) 787 { 788 return NULL; 789 } 790 791 static struct page *follow_huge_pmd(struct vm_area_struct *vma, 792 unsigned long addr, pmd_t *pmd, 793 unsigned int flags, 794 struct follow_page_context *ctx) 795 { 796 return NULL; 797 } 798 #endif /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */ 799 800 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, 801 pte_t *pte, unsigned int flags) 802 { 803 if (flags & FOLL_TOUCH) { 804 pte_t orig_entry = ptep_get(pte); 805 pte_t entry = orig_entry; 806 807 if (flags & FOLL_WRITE) 808 entry = pte_mkdirty(entry); 809 entry = pte_mkyoung(entry); 810 811 if (!pte_same(orig_entry, entry)) { 812 set_pte_at(vma->vm_mm, address, pte, entry); 813 update_mmu_cache(vma, address, pte); 814 } 815 } 816 817 /* Proper page table entry exists, but no corresponding struct page */ 818 return -EEXIST; 819 } 820 821 /* FOLL_FORCE can write to even unwritable PTEs in COW mappings. */ 822 static inline bool can_follow_write_pte(pte_t pte, struct page *page, 823 struct vm_area_struct *vma, 824 unsigned int flags) 825 { 826 /* If the pte is writable, we can write to the page. */ 827 if (pte_write(pte)) 828 return true; 829 830 if (!can_follow_write_common(page, vma, flags)) 831 return false; 832 833 /* ... and a write-fault isn't required for other reasons. */ 834 if (pte_needs_soft_dirty_wp(vma, pte)) 835 return false; 836 return !userfaultfd_pte_wp(vma, pte); 837 } 838 839 static struct page *follow_page_pte(struct vm_area_struct *vma, 840 unsigned long address, pmd_t *pmd, unsigned int flags, 841 struct dev_pagemap **pgmap) 842 { 843 struct mm_struct *mm = vma->vm_mm; 844 struct folio *folio; 845 struct page *page; 846 spinlock_t *ptl; 847 pte_t *ptep, pte; 848 int ret; 849 850 /* FOLL_GET and FOLL_PIN are mutually exclusive. */ 851 if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == 852 (FOLL_PIN | FOLL_GET))) 853 return ERR_PTR(-EINVAL); 854 855 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 856 if (!ptep) 857 return no_page_table(vma, flags, address); 858 pte = ptep_get(ptep); 859 if (!pte_present(pte)) 860 goto no_page; 861 if (pte_protnone(pte) && !gup_can_follow_protnone(vma, flags)) 862 goto no_page; 863 864 page = vm_normal_page(vma, address, pte); 865 866 /* 867 * We only care about anon pages in can_follow_write_pte() and don't 868 * have to worry about pte_devmap() because they are never anon. 869 */ 870 if ((flags & FOLL_WRITE) && 871 !can_follow_write_pte(pte, page, vma, flags)) { 872 page = NULL; 873 goto out; 874 } 875 876 if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) { 877 /* 878 * Only return device mapping pages in the FOLL_GET or FOLL_PIN 879 * case since they are only valid while holding the pgmap 880 * reference. 881 */ 882 *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap); 883 if (*pgmap) 884 page = pte_page(pte); 885 else 886 goto no_page; 887 } else if (unlikely(!page)) { 888 if (flags & FOLL_DUMP) { 889 /* Avoid special (like zero) pages in core dumps */ 890 page = ERR_PTR(-EFAULT); 891 goto out; 892 } 893 894 if (is_zero_pfn(pte_pfn(pte))) { 895 page = pte_page(pte); 896 } else { 897 ret = follow_pfn_pte(vma, address, ptep, flags); 898 page = ERR_PTR(ret); 899 goto out; 900 } 901 } 902 folio = page_folio(page); 903 904 if (!pte_write(pte) && gup_must_unshare(vma, flags, page)) { 905 page = ERR_PTR(-EMLINK); 906 goto out; 907 } 908 909 VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) && 910 !PageAnonExclusive(page), page); 911 912 /* try_grab_folio() does nothing unless FOLL_GET or FOLL_PIN is set. */ 913 ret = try_grab_folio(folio, 1, flags); 914 if (unlikely(ret)) { 915 page = ERR_PTR(ret); 916 goto out; 917 } 918 919 /* 920 * We need to make the page accessible if and only if we are going 921 * to access its content (the FOLL_PIN case). Please see 922 * Documentation/core-api/pin_user_pages.rst for details. 923 */ 924 if (flags & FOLL_PIN) { 925 ret = arch_make_folio_accessible(folio); 926 if (ret) { 927 unpin_user_page(page); 928 page = ERR_PTR(ret); 929 goto out; 930 } 931 } 932 if (flags & FOLL_TOUCH) { 933 if ((flags & FOLL_WRITE) && 934 !pte_dirty(pte) && !folio_test_dirty(folio)) 935 folio_mark_dirty(folio); 936 /* 937 * pte_mkyoung() would be more correct here, but atomic care 938 * is needed to avoid losing the dirty bit: it is easier to use 939 * folio_mark_accessed(). 940 */ 941 folio_mark_accessed(folio); 942 } 943 out: 944 pte_unmap_unlock(ptep, ptl); 945 return page; 946 no_page: 947 pte_unmap_unlock(ptep, ptl); 948 if (!pte_none(pte)) 949 return NULL; 950 return no_page_table(vma, flags, address); 951 } 952 953 static struct page *follow_pmd_mask(struct vm_area_struct *vma, 954 unsigned long address, pud_t *pudp, 955 unsigned int flags, 956 struct follow_page_context *ctx) 957 { 958 pmd_t *pmd, pmdval; 959 spinlock_t *ptl; 960 struct page *page; 961 struct mm_struct *mm = vma->vm_mm; 962 963 pmd = pmd_offset(pudp, address); 964 pmdval = pmdp_get_lockless(pmd); 965 if (pmd_none(pmdval)) 966 return no_page_table(vma, flags, address); 967 if (!pmd_present(pmdval)) 968 return no_page_table(vma, flags, address); 969 if (pmd_devmap(pmdval)) { 970 ptl = pmd_lock(mm, pmd); 971 page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap); 972 spin_unlock(ptl); 973 if (page) 974 return page; 975 return no_page_table(vma, flags, address); 976 } 977 if (likely(!pmd_leaf(pmdval))) 978 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); 979 980 if (pmd_protnone(pmdval) && !gup_can_follow_protnone(vma, flags)) 981 return no_page_table(vma, flags, address); 982 983 ptl = pmd_lock(mm, pmd); 984 pmdval = *pmd; 985 if (unlikely(!pmd_present(pmdval))) { 986 spin_unlock(ptl); 987 return no_page_table(vma, flags, address); 988 } 989 if (unlikely(!pmd_leaf(pmdval))) { 990 spin_unlock(ptl); 991 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); 992 } 993 if (pmd_trans_huge(pmdval) && (flags & FOLL_SPLIT_PMD)) { 994 spin_unlock(ptl); 995 split_huge_pmd(vma, pmd, address); 996 /* If pmd was left empty, stuff a page table in there quickly */ 997 return pte_alloc(mm, pmd) ? ERR_PTR(-ENOMEM) : 998 follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); 999 } 1000 page = follow_huge_pmd(vma, address, pmd, flags, ctx); 1001 spin_unlock(ptl); 1002 return page; 1003 } 1004 1005 static struct page *follow_pud_mask(struct vm_area_struct *vma, 1006 unsigned long address, p4d_t *p4dp, 1007 unsigned int flags, 1008 struct follow_page_context *ctx) 1009 { 1010 pud_t *pudp, pud; 1011 spinlock_t *ptl; 1012 struct page *page; 1013 struct mm_struct *mm = vma->vm_mm; 1014 1015 pudp = pud_offset(p4dp, address); 1016 pud = READ_ONCE(*pudp); 1017 if (!pud_present(pud)) 1018 return no_page_table(vma, flags, address); 1019 if (pud_leaf(pud)) { 1020 ptl = pud_lock(mm, pudp); 1021 page = follow_huge_pud(vma, address, pudp, flags, ctx); 1022 spin_unlock(ptl); 1023 if (page) 1024 return page; 1025 return no_page_table(vma, flags, address); 1026 } 1027 if (unlikely(pud_bad(pud))) 1028 return no_page_table(vma, flags, address); 1029 1030 return follow_pmd_mask(vma, address, pudp, flags, ctx); 1031 } 1032 1033 static struct page *follow_p4d_mask(struct vm_area_struct *vma, 1034 unsigned long address, pgd_t *pgdp, 1035 unsigned int flags, 1036 struct follow_page_context *ctx) 1037 { 1038 p4d_t *p4dp, p4d; 1039 1040 p4dp = p4d_offset(pgdp, address); 1041 p4d = READ_ONCE(*p4dp); 1042 BUILD_BUG_ON(p4d_leaf(p4d)); 1043 1044 if (!p4d_present(p4d) || p4d_bad(p4d)) 1045 return no_page_table(vma, flags, address); 1046 1047 return follow_pud_mask(vma, address, p4dp, flags, ctx); 1048 } 1049 1050 /** 1051 * follow_page_mask - look up a page descriptor from a user-virtual address 1052 * @vma: vm_area_struct mapping @address 1053 * @address: virtual address to look up 1054 * @flags: flags modifying lookup behaviour 1055 * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a 1056 * pointer to output page_mask 1057 * 1058 * @flags can have FOLL_ flags set, defined in <linux/mm.h> 1059 * 1060 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches 1061 * the device's dev_pagemap metadata to avoid repeating expensive lookups. 1062 * 1063 * When getting an anonymous page and the caller has to trigger unsharing 1064 * of a shared anonymous page first, -EMLINK is returned. The caller should 1065 * trigger a fault with FAULT_FLAG_UNSHARE set. Note that unsharing is only 1066 * relevant with FOLL_PIN and !FOLL_WRITE. 1067 * 1068 * On output, the @ctx->page_mask is set according to the size of the page. 1069 * 1070 * Return: the mapped (struct page *), %NULL if no mapping exists, or 1071 * an error pointer if there is a mapping to something not represented 1072 * by a page descriptor (see also vm_normal_page()). 1073 */ 1074 static struct page *follow_page_mask(struct vm_area_struct *vma, 1075 unsigned long address, unsigned int flags, 1076 struct follow_page_context *ctx) 1077 { 1078 pgd_t *pgd; 1079 struct mm_struct *mm = vma->vm_mm; 1080 struct page *page; 1081 1082 vma_pgtable_walk_begin(vma); 1083 1084 ctx->page_mask = 0; 1085 pgd = pgd_offset(mm, address); 1086 1087 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 1088 page = no_page_table(vma, flags, address); 1089 else 1090 page = follow_p4d_mask(vma, address, pgd, flags, ctx); 1091 1092 vma_pgtable_walk_end(vma); 1093 1094 return page; 1095 } 1096 1097 static int get_gate_page(struct mm_struct *mm, unsigned long address, 1098 unsigned int gup_flags, struct vm_area_struct **vma, 1099 struct page **page) 1100 { 1101 pgd_t *pgd; 1102 p4d_t *p4d; 1103 pud_t *pud; 1104 pmd_t *pmd; 1105 pte_t *pte; 1106 pte_t entry; 1107 int ret = -EFAULT; 1108 1109 /* user gate pages are read-only */ 1110 if (gup_flags & FOLL_WRITE) 1111 return -EFAULT; 1112 if (address > TASK_SIZE) 1113 pgd = pgd_offset_k(address); 1114 else 1115 pgd = pgd_offset_gate(mm, address); 1116 if (pgd_none(*pgd)) 1117 return -EFAULT; 1118 p4d = p4d_offset(pgd, address); 1119 if (p4d_none(*p4d)) 1120 return -EFAULT; 1121 pud = pud_offset(p4d, address); 1122 if (pud_none(*pud)) 1123 return -EFAULT; 1124 pmd = pmd_offset(pud, address); 1125 if (!pmd_present(*pmd)) 1126 return -EFAULT; 1127 pte = pte_offset_map(pmd, address); 1128 if (!pte) 1129 return -EFAULT; 1130 entry = ptep_get(pte); 1131 if (pte_none(entry)) 1132 goto unmap; 1133 *vma = get_gate_vma(mm); 1134 if (!page) 1135 goto out; 1136 *page = vm_normal_page(*vma, address, entry); 1137 if (!*page) { 1138 if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(entry))) 1139 goto unmap; 1140 *page = pte_page(entry); 1141 } 1142 ret = try_grab_folio(page_folio(*page), 1, gup_flags); 1143 if (unlikely(ret)) 1144 goto unmap; 1145 out: 1146 ret = 0; 1147 unmap: 1148 pte_unmap(pte); 1149 return ret; 1150 } 1151 1152 /* 1153 * mmap_lock must be held on entry. If @flags has FOLL_UNLOCKABLE but not 1154 * FOLL_NOWAIT, the mmap_lock may be released. If it is, *@locked will be set 1155 * to 0 and -EBUSY returned. 1156 */ 1157 static int faultin_page(struct vm_area_struct *vma, 1158 unsigned long address, unsigned int flags, bool unshare, 1159 int *locked) 1160 { 1161 unsigned int fault_flags = 0; 1162 vm_fault_t ret; 1163 1164 if (flags & FOLL_NOFAULT) 1165 return -EFAULT; 1166 if (flags & FOLL_WRITE) 1167 fault_flags |= FAULT_FLAG_WRITE; 1168 if (flags & FOLL_REMOTE) 1169 fault_flags |= FAULT_FLAG_REMOTE; 1170 if (flags & FOLL_UNLOCKABLE) { 1171 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 1172 /* 1173 * FAULT_FLAG_INTERRUPTIBLE is opt-in. GUP callers must set 1174 * FOLL_INTERRUPTIBLE to enable FAULT_FLAG_INTERRUPTIBLE. 1175 * That's because some callers may not be prepared to 1176 * handle early exits caused by non-fatal signals. 1177 */ 1178 if (flags & FOLL_INTERRUPTIBLE) 1179 fault_flags |= FAULT_FLAG_INTERRUPTIBLE; 1180 } 1181 if (flags & FOLL_NOWAIT) 1182 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; 1183 if (flags & FOLL_TRIED) { 1184 /* 1185 * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED 1186 * can co-exist 1187 */ 1188 fault_flags |= FAULT_FLAG_TRIED; 1189 } 1190 if (unshare) { 1191 fault_flags |= FAULT_FLAG_UNSHARE; 1192 /* FAULT_FLAG_WRITE and FAULT_FLAG_UNSHARE are incompatible */ 1193 VM_BUG_ON(fault_flags & FAULT_FLAG_WRITE); 1194 } 1195 1196 ret = handle_mm_fault(vma, address, fault_flags, NULL); 1197 1198 if (ret & VM_FAULT_COMPLETED) { 1199 /* 1200 * With FAULT_FLAG_RETRY_NOWAIT we'll never release the 1201 * mmap lock in the page fault handler. Sanity check this. 1202 */ 1203 WARN_ON_ONCE(fault_flags & FAULT_FLAG_RETRY_NOWAIT); 1204 *locked = 0; 1205 1206 /* 1207 * We should do the same as VM_FAULT_RETRY, but let's not 1208 * return -EBUSY since that's not reflecting the reality of 1209 * what has happened - we've just fully completed a page 1210 * fault, with the mmap lock released. Use -EAGAIN to show 1211 * that we want to take the mmap lock _again_. 1212 */ 1213 return -EAGAIN; 1214 } 1215 1216 if (ret & VM_FAULT_ERROR) { 1217 int err = vm_fault_to_errno(ret, flags); 1218 1219 if (err) 1220 return err; 1221 BUG(); 1222 } 1223 1224 if (ret & VM_FAULT_RETRY) { 1225 if (!(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) 1226 *locked = 0; 1227 return -EBUSY; 1228 } 1229 1230 return 0; 1231 } 1232 1233 /* 1234 * Writing to file-backed mappings which require folio dirty tracking using GUP 1235 * is a fundamentally broken operation, as kernel write access to GUP mappings 1236 * do not adhere to the semantics expected by a file system. 1237 * 1238 * Consider the following scenario:- 1239 * 1240 * 1. A folio is written to via GUP which write-faults the memory, notifying 1241 * the file system and dirtying the folio. 1242 * 2. Later, writeback is triggered, resulting in the folio being cleaned and 1243 * the PTE being marked read-only. 1244 * 3. The GUP caller writes to the folio, as it is mapped read/write via the 1245 * direct mapping. 1246 * 4. The GUP caller, now done with the page, unpins it and sets it dirty 1247 * (though it does not have to). 1248 * 1249 * This results in both data being written to a folio without writenotify, and 1250 * the folio being dirtied unexpectedly (if the caller decides to do so). 1251 */ 1252 static bool writable_file_mapping_allowed(struct vm_area_struct *vma, 1253 unsigned long gup_flags) 1254 { 1255 /* 1256 * If we aren't pinning then no problematic write can occur. A long term 1257 * pin is the most egregious case so this is the case we disallow. 1258 */ 1259 if ((gup_flags & (FOLL_PIN | FOLL_LONGTERM)) != 1260 (FOLL_PIN | FOLL_LONGTERM)) 1261 return true; 1262 1263 /* 1264 * If the VMA does not require dirty tracking then no problematic write 1265 * can occur either. 1266 */ 1267 return !vma_needs_dirty_tracking(vma); 1268 } 1269 1270 static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) 1271 { 1272 vm_flags_t vm_flags = vma->vm_flags; 1273 int write = (gup_flags & FOLL_WRITE); 1274 int foreign = (gup_flags & FOLL_REMOTE); 1275 bool vma_anon = vma_is_anonymous(vma); 1276 1277 if (vm_flags & (VM_IO | VM_PFNMAP)) 1278 return -EFAULT; 1279 1280 if ((gup_flags & FOLL_ANON) && !vma_anon) 1281 return -EFAULT; 1282 1283 if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma)) 1284 return -EOPNOTSUPP; 1285 1286 if (vma_is_secretmem(vma)) 1287 return -EFAULT; 1288 1289 if (write) { 1290 if (!vma_anon && 1291 !writable_file_mapping_allowed(vma, gup_flags)) 1292 return -EFAULT; 1293 1294 if (!(vm_flags & VM_WRITE) || (vm_flags & VM_SHADOW_STACK)) { 1295 if (!(gup_flags & FOLL_FORCE)) 1296 return -EFAULT; 1297 /* 1298 * We used to let the write,force case do COW in a 1299 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could 1300 * set a breakpoint in a read-only mapping of an 1301 * executable, without corrupting the file (yet only 1302 * when that file had been opened for writing!). 1303 * Anon pages in shared mappings are surprising: now 1304 * just reject it. 1305 */ 1306 if (!is_cow_mapping(vm_flags)) 1307 return -EFAULT; 1308 } 1309 } else if (!(vm_flags & VM_READ)) { 1310 if (!(gup_flags & FOLL_FORCE)) 1311 return -EFAULT; 1312 /* 1313 * Is there actually any vma we can reach here which does not 1314 * have VM_MAYREAD set? 1315 */ 1316 if (!(vm_flags & VM_MAYREAD)) 1317 return -EFAULT; 1318 } 1319 /* 1320 * gups are always data accesses, not instruction 1321 * fetches, so execute=false here 1322 */ 1323 if (!arch_vma_access_permitted(vma, write, false, foreign)) 1324 return -EFAULT; 1325 return 0; 1326 } 1327 1328 /* 1329 * This is "vma_lookup()", but with a warning if we would have 1330 * historically expanded the stack in the GUP code. 1331 */ 1332 static struct vm_area_struct *gup_vma_lookup(struct mm_struct *mm, 1333 unsigned long addr) 1334 { 1335 #ifdef CONFIG_STACK_GROWSUP 1336 return vma_lookup(mm, addr); 1337 #else 1338 static volatile unsigned long next_warn; 1339 struct vm_area_struct *vma; 1340 unsigned long now, next; 1341 1342 vma = find_vma(mm, addr); 1343 if (!vma || (addr >= vma->vm_start)) 1344 return vma; 1345 1346 /* Only warn for half-way relevant accesses */ 1347 if (!(vma->vm_flags & VM_GROWSDOWN)) 1348 return NULL; 1349 if (vma->vm_start - addr > 65536) 1350 return NULL; 1351 1352 /* Let's not warn more than once an hour.. */ 1353 now = jiffies; next = next_warn; 1354 if (next && time_before(now, next)) 1355 return NULL; 1356 next_warn = now + 60*60*HZ; 1357 1358 /* Let people know things may have changed. */ 1359 pr_warn("GUP no longer grows the stack in %s (%d): %lx-%lx (%lx)\n", 1360 current->comm, task_pid_nr(current), 1361 vma->vm_start, vma->vm_end, addr); 1362 dump_stack(); 1363 return NULL; 1364 #endif 1365 } 1366 1367 /** 1368 * __get_user_pages() - pin user pages in memory 1369 * @mm: mm_struct of target mm 1370 * @start: starting user address 1371 * @nr_pages: number of pages from start to pin 1372 * @gup_flags: flags modifying pin behaviour 1373 * @pages: array that receives pointers to the pages pinned. 1374 * Should be at least nr_pages long. Or NULL, if caller 1375 * only intends to ensure the pages are faulted in. 1376 * @locked: whether we're still with the mmap_lock held 1377 * 1378 * Returns either number of pages pinned (which may be less than the 1379 * number requested), or an error. Details about the return value: 1380 * 1381 * -- If nr_pages is 0, returns 0. 1382 * -- If nr_pages is >0, but no pages were pinned, returns -errno. 1383 * -- If nr_pages is >0, and some pages were pinned, returns the number of 1384 * pages pinned. Again, this may be less than nr_pages. 1385 * -- 0 return value is possible when the fault would need to be retried. 1386 * 1387 * The caller is responsible for releasing returned @pages, via put_page(). 1388 * 1389 * Must be called with mmap_lock held. It may be released. See below. 1390 * 1391 * __get_user_pages walks a process's page tables and takes a reference to 1392 * each struct page that each user address corresponds to at a given 1393 * instant. That is, it takes the page that would be accessed if a user 1394 * thread accesses the given user virtual address at that instant. 1395 * 1396 * This does not guarantee that the page exists in the user mappings when 1397 * __get_user_pages returns, and there may even be a completely different 1398 * page there in some cases (eg. if mmapped pagecache has been invalidated 1399 * and subsequently re-faulted). However it does guarantee that the page 1400 * won't be freed completely. And mostly callers simply care that the page 1401 * contains data that was valid *at some point in time*. Typically, an IO 1402 * or similar operation cannot guarantee anything stronger anyway because 1403 * locks can't be held over the syscall boundary. 1404 * 1405 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If 1406 * the page is written to, set_page_dirty (or set_page_dirty_lock, as 1407 * appropriate) must be called after the page is finished with, and 1408 * before put_page is called. 1409 * 1410 * If FOLL_UNLOCKABLE is set without FOLL_NOWAIT then the mmap_lock may 1411 * be released. If this happens *@locked will be set to 0 on return. 1412 * 1413 * A caller using such a combination of @gup_flags must therefore hold the 1414 * mmap_lock for reading only, and recognize when it's been released. Otherwise, 1415 * it must be held for either reading or writing and will not be released. 1416 * 1417 * In most cases, get_user_pages or get_user_pages_fast should be used 1418 * instead of __get_user_pages. __get_user_pages should be used only if 1419 * you need some special @gup_flags. 1420 */ 1421 static long __get_user_pages(struct mm_struct *mm, 1422 unsigned long start, unsigned long nr_pages, 1423 unsigned int gup_flags, struct page **pages, 1424 int *locked) 1425 { 1426 long ret = 0, i = 0; 1427 struct vm_area_struct *vma = NULL; 1428 struct follow_page_context ctx = { NULL }; 1429 1430 if (!nr_pages) 1431 return 0; 1432 1433 start = untagged_addr_remote(mm, start); 1434 1435 VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); 1436 1437 do { 1438 struct page *page; 1439 unsigned int page_increm; 1440 1441 /* first iteration or cross vma bound */ 1442 if (!vma || start >= vma->vm_end) { 1443 /* 1444 * MADV_POPULATE_(READ|WRITE) wants to handle VMA 1445 * lookups+error reporting differently. 1446 */ 1447 if (gup_flags & FOLL_MADV_POPULATE) { 1448 vma = vma_lookup(mm, start); 1449 if (!vma) { 1450 ret = -ENOMEM; 1451 goto out; 1452 } 1453 if (check_vma_flags(vma, gup_flags)) { 1454 ret = -EINVAL; 1455 goto out; 1456 } 1457 goto retry; 1458 } 1459 vma = gup_vma_lookup(mm, start); 1460 if (!vma && in_gate_area(mm, start)) { 1461 ret = get_gate_page(mm, start & PAGE_MASK, 1462 gup_flags, &vma, 1463 pages ? &page : NULL); 1464 if (ret) 1465 goto out; 1466 ctx.page_mask = 0; 1467 goto next_page; 1468 } 1469 1470 if (!vma) { 1471 ret = -EFAULT; 1472 goto out; 1473 } 1474 ret = check_vma_flags(vma, gup_flags); 1475 if (ret) 1476 goto out; 1477 } 1478 retry: 1479 /* 1480 * If we have a pending SIGKILL, don't keep faulting pages and 1481 * potentially allocating memory. 1482 */ 1483 if (fatal_signal_pending(current)) { 1484 ret = -EINTR; 1485 goto out; 1486 } 1487 cond_resched(); 1488 1489 page = follow_page_mask(vma, start, gup_flags, &ctx); 1490 if (!page || PTR_ERR(page) == -EMLINK) { 1491 ret = faultin_page(vma, start, gup_flags, 1492 PTR_ERR(page) == -EMLINK, locked); 1493 switch (ret) { 1494 case 0: 1495 goto retry; 1496 case -EBUSY: 1497 case -EAGAIN: 1498 ret = 0; 1499 fallthrough; 1500 case -EFAULT: 1501 case -ENOMEM: 1502 case -EHWPOISON: 1503 goto out; 1504 } 1505 BUG(); 1506 } else if (PTR_ERR(page) == -EEXIST) { 1507 /* 1508 * Proper page table entry exists, but no corresponding 1509 * struct page. If the caller expects **pages to be 1510 * filled in, bail out now, because that can't be done 1511 * for this page. 1512 */ 1513 if (pages) { 1514 ret = PTR_ERR(page); 1515 goto out; 1516 } 1517 } else if (IS_ERR(page)) { 1518 ret = PTR_ERR(page); 1519 goto out; 1520 } 1521 next_page: 1522 page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask); 1523 if (page_increm > nr_pages) 1524 page_increm = nr_pages; 1525 1526 if (pages) { 1527 struct page *subpage; 1528 unsigned int j; 1529 1530 /* 1531 * This must be a large folio (and doesn't need to 1532 * be the whole folio; it can be part of it), do 1533 * the refcount work for all the subpages too. 1534 * 1535 * NOTE: here the page may not be the head page 1536 * e.g. when start addr is not thp-size aligned. 1537 * try_grab_folio() should have taken care of tail 1538 * pages. 1539 */ 1540 if (page_increm > 1) { 1541 struct folio *folio = page_folio(page); 1542 1543 /* 1544 * Since we already hold refcount on the 1545 * large folio, this should never fail. 1546 */ 1547 if (try_grab_folio(folio, page_increm - 1, 1548 gup_flags)) { 1549 /* 1550 * Release the 1st page ref if the 1551 * folio is problematic, fail hard. 1552 */ 1553 gup_put_folio(folio, 1, gup_flags); 1554 ret = -EFAULT; 1555 goto out; 1556 } 1557 } 1558 1559 for (j = 0; j < page_increm; j++) { 1560 subpage = nth_page(page, j); 1561 pages[i + j] = subpage; 1562 flush_anon_page(vma, subpage, start + j * PAGE_SIZE); 1563 flush_dcache_page(subpage); 1564 } 1565 } 1566 1567 i += page_increm; 1568 start += page_increm * PAGE_SIZE; 1569 nr_pages -= page_increm; 1570 } while (nr_pages); 1571 out: 1572 if (ctx.pgmap) 1573 put_dev_pagemap(ctx.pgmap); 1574 return i ? i : ret; 1575 } 1576 1577 static bool vma_permits_fault(struct vm_area_struct *vma, 1578 unsigned int fault_flags) 1579 { 1580 bool write = !!(fault_flags & FAULT_FLAG_WRITE); 1581 bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE); 1582 vm_flags_t vm_flags = write ? VM_WRITE : VM_READ; 1583 1584 if (!(vm_flags & vma->vm_flags)) 1585 return false; 1586 1587 /* 1588 * The architecture might have a hardware protection 1589 * mechanism other than read/write that can deny access. 1590 * 1591 * gup always represents data access, not instruction 1592 * fetches, so execute=false here: 1593 */ 1594 if (!arch_vma_access_permitted(vma, write, false, foreign)) 1595 return false; 1596 1597 return true; 1598 } 1599 1600 /** 1601 * fixup_user_fault() - manually resolve a user page fault 1602 * @mm: mm_struct of target mm 1603 * @address: user address 1604 * @fault_flags:flags to pass down to handle_mm_fault() 1605 * @unlocked: did we unlock the mmap_lock while retrying, maybe NULL if caller 1606 * does not allow retry. If NULL, the caller must guarantee 1607 * that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY. 1608 * 1609 * This is meant to be called in the specific scenario where for locking reasons 1610 * we try to access user memory in atomic context (within a pagefault_disable() 1611 * section), this returns -EFAULT, and we want to resolve the user fault before 1612 * trying again. 1613 * 1614 * Typically this is meant to be used by the futex code. 1615 * 1616 * The main difference with get_user_pages() is that this function will 1617 * unconditionally call handle_mm_fault() which will in turn perform all the 1618 * necessary SW fixup of the dirty and young bits in the PTE, while 1619 * get_user_pages() only guarantees to update these in the struct page. 1620 * 1621 * This is important for some architectures where those bits also gate the 1622 * access permission to the page because they are maintained in software. On 1623 * such architectures, gup() will not be enough to make a subsequent access 1624 * succeed. 1625 * 1626 * This function will not return with an unlocked mmap_lock. So it has not the 1627 * same semantics wrt the @mm->mmap_lock as does filemap_fault(). 1628 */ 1629 int fixup_user_fault(struct mm_struct *mm, 1630 unsigned long address, unsigned int fault_flags, 1631 bool *unlocked) 1632 { 1633 struct vm_area_struct *vma; 1634 vm_fault_t ret; 1635 1636 address = untagged_addr_remote(mm, address); 1637 1638 if (unlocked) 1639 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 1640 1641 retry: 1642 vma = gup_vma_lookup(mm, address); 1643 if (!vma) 1644 return -EFAULT; 1645 1646 if (!vma_permits_fault(vma, fault_flags)) 1647 return -EFAULT; 1648 1649 if ((fault_flags & FAULT_FLAG_KILLABLE) && 1650 fatal_signal_pending(current)) 1651 return -EINTR; 1652 1653 ret = handle_mm_fault(vma, address, fault_flags, NULL); 1654 1655 if (ret & VM_FAULT_COMPLETED) { 1656 /* 1657 * NOTE: it's a pity that we need to retake the lock here 1658 * to pair with the unlock() in the callers. Ideally we 1659 * could tell the callers so they do not need to unlock. 1660 */ 1661 mmap_read_lock(mm); 1662 *unlocked = true; 1663 return 0; 1664 } 1665 1666 if (ret & VM_FAULT_ERROR) { 1667 int err = vm_fault_to_errno(ret, 0); 1668 1669 if (err) 1670 return err; 1671 BUG(); 1672 } 1673 1674 if (ret & VM_FAULT_RETRY) { 1675 mmap_read_lock(mm); 1676 *unlocked = true; 1677 fault_flags |= FAULT_FLAG_TRIED; 1678 goto retry; 1679 } 1680 1681 return 0; 1682 } 1683 EXPORT_SYMBOL_GPL(fixup_user_fault); 1684 1685 /* 1686 * GUP always responds to fatal signals. When FOLL_INTERRUPTIBLE is 1687 * specified, it'll also respond to generic signals. The caller of GUP 1688 * that has FOLL_INTERRUPTIBLE should take care of the GUP interruption. 1689 */ 1690 static bool gup_signal_pending(unsigned int flags) 1691 { 1692 if (fatal_signal_pending(current)) 1693 return true; 1694 1695 if (!(flags & FOLL_INTERRUPTIBLE)) 1696 return false; 1697 1698 return signal_pending(current); 1699 } 1700 1701 /* 1702 * Locking: (*locked == 1) means that the mmap_lock has already been acquired by 1703 * the caller. This function may drop the mmap_lock. If it does so, then it will 1704 * set (*locked = 0). 1705 * 1706 * (*locked == 0) means that the caller expects this function to acquire and 1707 * drop the mmap_lock. Therefore, the value of *locked will still be zero when 1708 * the function returns, even though it may have changed temporarily during 1709 * function execution. 1710 * 1711 * Please note that this function, unlike __get_user_pages(), will not return 0 1712 * for nr_pages > 0, unless FOLL_NOWAIT is used. 1713 */ 1714 static __always_inline long __get_user_pages_locked(struct mm_struct *mm, 1715 unsigned long start, 1716 unsigned long nr_pages, 1717 struct page **pages, 1718 int *locked, 1719 unsigned int flags) 1720 { 1721 long ret, pages_done; 1722 bool must_unlock = false; 1723 1724 if (!nr_pages) 1725 return 0; 1726 1727 /* 1728 * The internal caller expects GUP to manage the lock internally and the 1729 * lock must be released when this returns. 1730 */ 1731 if (!*locked) { 1732 if (mmap_read_lock_killable(mm)) 1733 return -EAGAIN; 1734 must_unlock = true; 1735 *locked = 1; 1736 } 1737 else 1738 mmap_assert_locked(mm); 1739 1740 if (flags & FOLL_PIN) 1741 mm_set_has_pinned_flag(&mm->flags); 1742 1743 /* 1744 * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior 1745 * is to set FOLL_GET if the caller wants pages[] filled in (but has 1746 * carelessly failed to specify FOLL_GET), so keep doing that, but only 1747 * for FOLL_GET, not for the newer FOLL_PIN. 1748 * 1749 * FOLL_PIN always expects pages to be non-null, but no need to assert 1750 * that here, as any failures will be obvious enough. 1751 */ 1752 if (pages && !(flags & FOLL_PIN)) 1753 flags |= FOLL_GET; 1754 1755 pages_done = 0; 1756 for (;;) { 1757 ret = __get_user_pages(mm, start, nr_pages, flags, pages, 1758 locked); 1759 if (!(flags & FOLL_UNLOCKABLE)) { 1760 /* VM_FAULT_RETRY couldn't trigger, bypass */ 1761 pages_done = ret; 1762 break; 1763 } 1764 1765 /* VM_FAULT_RETRY or VM_FAULT_COMPLETED cannot return errors */ 1766 if (!*locked) { 1767 BUG_ON(ret < 0); 1768 BUG_ON(ret >= nr_pages); 1769 } 1770 1771 if (ret > 0) { 1772 nr_pages -= ret; 1773 pages_done += ret; 1774 if (!nr_pages) 1775 break; 1776 } 1777 if (*locked) { 1778 /* 1779 * VM_FAULT_RETRY didn't trigger or it was a 1780 * FOLL_NOWAIT. 1781 */ 1782 if (!pages_done) 1783 pages_done = ret; 1784 break; 1785 } 1786 /* 1787 * VM_FAULT_RETRY triggered, so seek to the faulting offset. 1788 * For the prefault case (!pages) we only update counts. 1789 */ 1790 if (likely(pages)) 1791 pages += ret; 1792 start += ret << PAGE_SHIFT; 1793 1794 /* The lock was temporarily dropped, so we must unlock later */ 1795 must_unlock = true; 1796 1797 retry: 1798 /* 1799 * Repeat on the address that fired VM_FAULT_RETRY 1800 * with both FAULT_FLAG_ALLOW_RETRY and 1801 * FAULT_FLAG_TRIED. Note that GUP can be interrupted 1802 * by fatal signals of even common signals, depending on 1803 * the caller's request. So we need to check it before we 1804 * start trying again otherwise it can loop forever. 1805 */ 1806 if (gup_signal_pending(flags)) { 1807 if (!pages_done) 1808 pages_done = -EINTR; 1809 break; 1810 } 1811 1812 ret = mmap_read_lock_killable(mm); 1813 if (ret) { 1814 BUG_ON(ret > 0); 1815 if (!pages_done) 1816 pages_done = ret; 1817 break; 1818 } 1819 1820 *locked = 1; 1821 ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED, 1822 pages, locked); 1823 if (!*locked) { 1824 /* Continue to retry until we succeeded */ 1825 BUG_ON(ret != 0); 1826 goto retry; 1827 } 1828 if (ret != 1) { 1829 BUG_ON(ret > 1); 1830 if (!pages_done) 1831 pages_done = ret; 1832 break; 1833 } 1834 nr_pages--; 1835 pages_done++; 1836 if (!nr_pages) 1837 break; 1838 if (likely(pages)) 1839 pages++; 1840 start += PAGE_SIZE; 1841 } 1842 if (must_unlock && *locked) { 1843 /* 1844 * We either temporarily dropped the lock, or the caller 1845 * requested that we both acquire and drop the lock. Either way, 1846 * we must now unlock, and notify the caller of that state. 1847 */ 1848 mmap_read_unlock(mm); 1849 *locked = 0; 1850 } 1851 1852 /* 1853 * Failing to pin anything implies something has gone wrong (except when 1854 * FOLL_NOWAIT is specified). 1855 */ 1856 if (WARN_ON_ONCE(pages_done == 0 && !(flags & FOLL_NOWAIT))) 1857 return -EFAULT; 1858 1859 return pages_done; 1860 } 1861 1862 /** 1863 * populate_vma_page_range() - populate a range of pages in the vma. 1864 * @vma: target vma 1865 * @start: start address 1866 * @end: end address 1867 * @locked: whether the mmap_lock is still held 1868 * 1869 * This takes care of mlocking the pages too if VM_LOCKED is set. 1870 * 1871 * Return either number of pages pinned in the vma, or a negative error 1872 * code on error. 1873 * 1874 * vma->vm_mm->mmap_lock must be held. 1875 * 1876 * If @locked is NULL, it may be held for read or write and will 1877 * be unperturbed. 1878 * 1879 * If @locked is non-NULL, it must held for read only and may be 1880 * released. If it's released, *@locked will be set to 0. 1881 */ 1882 long populate_vma_page_range(struct vm_area_struct *vma, 1883 unsigned long start, unsigned long end, int *locked) 1884 { 1885 struct mm_struct *mm = vma->vm_mm; 1886 unsigned long nr_pages = (end - start) / PAGE_SIZE; 1887 int local_locked = 1; 1888 int gup_flags; 1889 long ret; 1890 1891 VM_BUG_ON(!PAGE_ALIGNED(start)); 1892 VM_BUG_ON(!PAGE_ALIGNED(end)); 1893 VM_BUG_ON_VMA(start < vma->vm_start, vma); 1894 VM_BUG_ON_VMA(end > vma->vm_end, vma); 1895 mmap_assert_locked(mm); 1896 1897 /* 1898 * Rightly or wrongly, the VM_LOCKONFAULT case has never used 1899 * faultin_page() to break COW, so it has no work to do here. 1900 */ 1901 if (vma->vm_flags & VM_LOCKONFAULT) 1902 return nr_pages; 1903 1904 /* ... similarly, we've never faulted in PROT_NONE pages */ 1905 if (!vma_is_accessible(vma)) 1906 return -EFAULT; 1907 1908 gup_flags = FOLL_TOUCH; 1909 /* 1910 * We want to touch writable mappings with a write fault in order 1911 * to break COW, except for shared mappings because these don't COW 1912 * and we would not want to dirty them for nothing. 1913 * 1914 * Otherwise, do a read fault, and use FOLL_FORCE in case it's not 1915 * readable (ie write-only or executable). 1916 */ 1917 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) 1918 gup_flags |= FOLL_WRITE; 1919 else 1920 gup_flags |= FOLL_FORCE; 1921 1922 if (locked) 1923 gup_flags |= FOLL_UNLOCKABLE; 1924 1925 /* 1926 * We made sure addr is within a VMA, so the following will 1927 * not result in a stack expansion that recurses back here. 1928 */ 1929 ret = __get_user_pages(mm, start, nr_pages, gup_flags, 1930 NULL, locked ? locked : &local_locked); 1931 lru_add_drain(); 1932 return ret; 1933 } 1934 1935 /* 1936 * faultin_page_range() - populate (prefault) page tables inside the 1937 * given range readable/writable 1938 * 1939 * This takes care of mlocking the pages, too, if VM_LOCKED is set. 1940 * 1941 * @mm: the mm to populate page tables in 1942 * @start: start address 1943 * @end: end address 1944 * @write: whether to prefault readable or writable 1945 * @locked: whether the mmap_lock is still held 1946 * 1947 * Returns either number of processed pages in the MM, or a negative error 1948 * code on error (see __get_user_pages()). Note that this function reports 1949 * errors related to VMAs, such as incompatible mappings, as expected by 1950 * MADV_POPULATE_(READ|WRITE). 1951 * 1952 * The range must be page-aligned. 1953 * 1954 * mm->mmap_lock must be held. If it's released, *@locked will be set to 0. 1955 */ 1956 long faultin_page_range(struct mm_struct *mm, unsigned long start, 1957 unsigned long end, bool write, int *locked) 1958 { 1959 unsigned long nr_pages = (end - start) / PAGE_SIZE; 1960 int gup_flags; 1961 long ret; 1962 1963 VM_BUG_ON(!PAGE_ALIGNED(start)); 1964 VM_BUG_ON(!PAGE_ALIGNED(end)); 1965 mmap_assert_locked(mm); 1966 1967 /* 1968 * FOLL_TOUCH: Mark page accessed and thereby young; will also mark 1969 * the page dirty with FOLL_WRITE -- which doesn't make a 1970 * difference with !FOLL_FORCE, because the page is writable 1971 * in the page table. 1972 * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit 1973 * a poisoned page. 1974 * !FOLL_FORCE: Require proper access permissions. 1975 */ 1976 gup_flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_UNLOCKABLE | 1977 FOLL_MADV_POPULATE; 1978 if (write) 1979 gup_flags |= FOLL_WRITE; 1980 1981 ret = __get_user_pages_locked(mm, start, nr_pages, NULL, locked, 1982 gup_flags); 1983 lru_add_drain(); 1984 return ret; 1985 } 1986 1987 /* 1988 * __mm_populate - populate and/or mlock pages within a range of address space. 1989 * 1990 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap 1991 * flags. VMAs must be already marked with the desired vm_flags, and 1992 * mmap_lock must not be held. 1993 */ 1994 int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) 1995 { 1996 struct mm_struct *mm = current->mm; 1997 unsigned long end, nstart, nend; 1998 struct vm_area_struct *vma = NULL; 1999 int locked = 0; 2000 long ret = 0; 2001 2002 end = start + len; 2003 2004 for (nstart = start; nstart < end; nstart = nend) { 2005 /* 2006 * We want to fault in pages for [nstart; end) address range. 2007 * Find first corresponding VMA. 2008 */ 2009 if (!locked) { 2010 locked = 1; 2011 mmap_read_lock(mm); 2012 vma = find_vma_intersection(mm, nstart, end); 2013 } else if (nstart >= vma->vm_end) 2014 vma = find_vma_intersection(mm, vma->vm_end, end); 2015 2016 if (!vma) 2017 break; 2018 /* 2019 * Set [nstart; nend) to intersection of desired address 2020 * range with the first VMA. Also, skip undesirable VMA types. 2021 */ 2022 nend = min(end, vma->vm_end); 2023 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) 2024 continue; 2025 if (nstart < vma->vm_start) 2026 nstart = vma->vm_start; 2027 /* 2028 * Now fault in a range of pages. populate_vma_page_range() 2029 * double checks the vma flags, so that it won't mlock pages 2030 * if the vma was already munlocked. 2031 */ 2032 ret = populate_vma_page_range(vma, nstart, nend, &locked); 2033 if (ret < 0) { 2034 if (ignore_errors) { 2035 ret = 0; 2036 continue; /* continue at next VMA */ 2037 } 2038 break; 2039 } 2040 nend = nstart + ret * PAGE_SIZE; 2041 ret = 0; 2042 } 2043 if (locked) 2044 mmap_read_unlock(mm); 2045 return ret; /* 0 or negative error code */ 2046 } 2047 #else /* CONFIG_MMU */ 2048 static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start, 2049 unsigned long nr_pages, struct page **pages, 2050 int *locked, unsigned int foll_flags) 2051 { 2052 struct vm_area_struct *vma; 2053 bool must_unlock = false; 2054 unsigned long vm_flags; 2055 long i; 2056 2057 if (!nr_pages) 2058 return 0; 2059 2060 /* 2061 * The internal caller expects GUP to manage the lock internally and the 2062 * lock must be released when this returns. 2063 */ 2064 if (!*locked) { 2065 if (mmap_read_lock_killable(mm)) 2066 return -EAGAIN; 2067 must_unlock = true; 2068 *locked = 1; 2069 } 2070 2071 /* calculate required read or write permissions. 2072 * If FOLL_FORCE is set, we only require the "MAY" flags. 2073 */ 2074 vm_flags = (foll_flags & FOLL_WRITE) ? 2075 (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); 2076 vm_flags &= (foll_flags & FOLL_FORCE) ? 2077 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); 2078 2079 for (i = 0; i < nr_pages; i++) { 2080 vma = find_vma(mm, start); 2081 if (!vma) 2082 break; 2083 2084 /* protect what we can, including chardevs */ 2085 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || 2086 !(vm_flags & vma->vm_flags)) 2087 break; 2088 2089 if (pages) { 2090 pages[i] = virt_to_page((void *)start); 2091 if (pages[i]) 2092 get_page(pages[i]); 2093 } 2094 2095 start = (start + PAGE_SIZE) & PAGE_MASK; 2096 } 2097 2098 if (must_unlock && *locked) { 2099 mmap_read_unlock(mm); 2100 *locked = 0; 2101 } 2102 2103 return i ? : -EFAULT; 2104 } 2105 #endif /* !CONFIG_MMU */ 2106 2107 /** 2108 * fault_in_writeable - fault in userspace address range for writing 2109 * @uaddr: start of address range 2110 * @size: size of address range 2111 * 2112 * Returns the number of bytes not faulted in (like copy_to_user() and 2113 * copy_from_user()). 2114 */ 2115 size_t fault_in_writeable(char __user *uaddr, size_t size) 2116 { 2117 char __user *start = uaddr, *end; 2118 2119 if (unlikely(size == 0)) 2120 return 0; 2121 if (!user_write_access_begin(uaddr, size)) 2122 return size; 2123 if (!PAGE_ALIGNED(uaddr)) { 2124 unsafe_put_user(0, uaddr, out); 2125 uaddr = (char __user *)PAGE_ALIGN((unsigned long)uaddr); 2126 } 2127 end = (char __user *)PAGE_ALIGN((unsigned long)start + size); 2128 if (unlikely(end < start)) 2129 end = NULL; 2130 while (uaddr != end) { 2131 unsafe_put_user(0, uaddr, out); 2132 uaddr += PAGE_SIZE; 2133 } 2134 2135 out: 2136 user_write_access_end(); 2137 if (size > uaddr - start) 2138 return size - (uaddr - start); 2139 return 0; 2140 } 2141 EXPORT_SYMBOL(fault_in_writeable); 2142 2143 /** 2144 * fault_in_subpage_writeable - fault in an address range for writing 2145 * @uaddr: start of address range 2146 * @size: size of address range 2147 * 2148 * Fault in a user address range for writing while checking for permissions at 2149 * sub-page granularity (e.g. arm64 MTE). This function should be used when 2150 * the caller cannot guarantee forward progress of a copy_to_user() loop. 2151 * 2152 * Returns the number of bytes not faulted in (like copy_to_user() and 2153 * copy_from_user()). 2154 */ 2155 size_t fault_in_subpage_writeable(char __user *uaddr, size_t size) 2156 { 2157 size_t faulted_in; 2158 2159 /* 2160 * Attempt faulting in at page granularity first for page table 2161 * permission checking. The arch-specific probe_subpage_writeable() 2162 * functions may not check for this. 2163 */ 2164 faulted_in = size - fault_in_writeable(uaddr, size); 2165 if (faulted_in) 2166 faulted_in -= probe_subpage_writeable(uaddr, faulted_in); 2167 2168 return size - faulted_in; 2169 } 2170 EXPORT_SYMBOL(fault_in_subpage_writeable); 2171 2172 /* 2173 * fault_in_safe_writeable - fault in an address range for writing 2174 * @uaddr: start of address range 2175 * @size: length of address range 2176 * 2177 * Faults in an address range for writing. This is primarily useful when we 2178 * already know that some or all of the pages in the address range aren't in 2179 * memory. 2180 * 2181 * Unlike fault_in_writeable(), this function is non-destructive. 2182 * 2183 * Note that we don't pin or otherwise hold the pages referenced that we fault 2184 * in. There's no guarantee that they'll stay in memory for any duration of 2185 * time. 2186 * 2187 * Returns the number of bytes not faulted in, like copy_to_user() and 2188 * copy_from_user(). 2189 */ 2190 size_t fault_in_safe_writeable(const char __user *uaddr, size_t size) 2191 { 2192 unsigned long start = (unsigned long)uaddr, end; 2193 struct mm_struct *mm = current->mm; 2194 bool unlocked = false; 2195 2196 if (unlikely(size == 0)) 2197 return 0; 2198 end = PAGE_ALIGN(start + size); 2199 if (end < start) 2200 end = 0; 2201 2202 mmap_read_lock(mm); 2203 do { 2204 if (fixup_user_fault(mm, start, FAULT_FLAG_WRITE, &unlocked)) 2205 break; 2206 start = (start + PAGE_SIZE) & PAGE_MASK; 2207 } while (start != end); 2208 mmap_read_unlock(mm); 2209 2210 if (size > (unsigned long)uaddr - start) 2211 return size - ((unsigned long)uaddr - start); 2212 return 0; 2213 } 2214 EXPORT_SYMBOL(fault_in_safe_writeable); 2215 2216 /** 2217 * fault_in_readable - fault in userspace address range for reading 2218 * @uaddr: start of user address range 2219 * @size: size of user address range 2220 * 2221 * Returns the number of bytes not faulted in (like copy_to_user() and 2222 * copy_from_user()). 2223 */ 2224 size_t fault_in_readable(const char __user *uaddr, size_t size) 2225 { 2226 const char __user *start = uaddr, *end; 2227 volatile char c; 2228 2229 if (unlikely(size == 0)) 2230 return 0; 2231 if (!user_read_access_begin(uaddr, size)) 2232 return size; 2233 if (!PAGE_ALIGNED(uaddr)) { 2234 unsafe_get_user(c, uaddr, out); 2235 uaddr = (const char __user *)PAGE_ALIGN((unsigned long)uaddr); 2236 } 2237 end = (const char __user *)PAGE_ALIGN((unsigned long)start + size); 2238 if (unlikely(end < start)) 2239 end = NULL; 2240 while (uaddr != end) { 2241 unsafe_get_user(c, uaddr, out); 2242 uaddr += PAGE_SIZE; 2243 } 2244 2245 out: 2246 user_read_access_end(); 2247 (void)c; 2248 if (size > uaddr - start) 2249 return size - (uaddr - start); 2250 return 0; 2251 } 2252 EXPORT_SYMBOL(fault_in_readable); 2253 2254 /** 2255 * get_dump_page() - pin user page in memory while writing it to core dump 2256 * @addr: user address 2257 * 2258 * Returns struct page pointer of user page pinned for dump, 2259 * to be freed afterwards by put_page(). 2260 * 2261 * Returns NULL on any kind of failure - a hole must then be inserted into 2262 * the corefile, to preserve alignment with its headers; and also returns 2263 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - 2264 * allowing a hole to be left in the corefile to save disk space. 2265 * 2266 * Called without mmap_lock (takes and releases the mmap_lock by itself). 2267 */ 2268 #ifdef CONFIG_ELF_CORE 2269 struct page *get_dump_page(unsigned long addr) 2270 { 2271 struct page *page; 2272 int locked = 0; 2273 int ret; 2274 2275 ret = __get_user_pages_locked(current->mm, addr, 1, &page, &locked, 2276 FOLL_FORCE | FOLL_DUMP | FOLL_GET); 2277 return (ret == 1) ? page : NULL; 2278 } 2279 #endif /* CONFIG_ELF_CORE */ 2280 2281 #ifdef CONFIG_MIGRATION 2282 2283 /* 2284 * An array of either pages or folios ("pofs"). Although it may seem tempting to 2285 * avoid this complication, by simply interpreting a list of folios as a list of 2286 * pages, that approach won't work in the longer term, because eventually the 2287 * layouts of struct page and struct folio will become completely different. 2288 * Furthermore, this pof approach avoids excessive page_folio() calls. 2289 */ 2290 struct pages_or_folios { 2291 union { 2292 struct page **pages; 2293 struct folio **folios; 2294 void **entries; 2295 }; 2296 bool has_folios; 2297 long nr_entries; 2298 }; 2299 2300 static struct folio *pofs_get_folio(struct pages_or_folios *pofs, long i) 2301 { 2302 if (pofs->has_folios) 2303 return pofs->folios[i]; 2304 return page_folio(pofs->pages[i]); 2305 } 2306 2307 static void pofs_clear_entry(struct pages_or_folios *pofs, long i) 2308 { 2309 pofs->entries[i] = NULL; 2310 } 2311 2312 static void pofs_unpin(struct pages_or_folios *pofs) 2313 { 2314 if (pofs->has_folios) 2315 unpin_folios(pofs->folios, pofs->nr_entries); 2316 else 2317 unpin_user_pages(pofs->pages, pofs->nr_entries); 2318 } 2319 2320 /* 2321 * Returns the number of collected folios. Return value is always >= 0. 2322 */ 2323 static unsigned long collect_longterm_unpinnable_folios( 2324 struct list_head *movable_folio_list, 2325 struct pages_or_folios *pofs) 2326 { 2327 unsigned long i, collected = 0; 2328 struct folio *prev_folio = NULL; 2329 bool drain_allow = true; 2330 2331 for (i = 0; i < pofs->nr_entries; i++) { 2332 struct folio *folio = pofs_get_folio(pofs, i); 2333 2334 if (folio == prev_folio) 2335 continue; 2336 prev_folio = folio; 2337 2338 if (folio_is_longterm_pinnable(folio)) 2339 continue; 2340 2341 collected++; 2342 2343 if (folio_is_device_coherent(folio)) 2344 continue; 2345 2346 if (folio_test_hugetlb(folio)) { 2347 folio_isolate_hugetlb(folio, movable_folio_list); 2348 continue; 2349 } 2350 2351 if (!folio_test_lru(folio) && drain_allow) { 2352 lru_add_drain_all(); 2353 drain_allow = false; 2354 } 2355 2356 if (!folio_isolate_lru(folio)) 2357 continue; 2358 2359 list_add_tail(&folio->lru, movable_folio_list); 2360 node_stat_mod_folio(folio, 2361 NR_ISOLATED_ANON + folio_is_file_lru(folio), 2362 folio_nr_pages(folio)); 2363 } 2364 2365 return collected; 2366 } 2367 2368 /* 2369 * Unpins all folios and migrates device coherent folios and movable_folio_list. 2370 * Returns -EAGAIN if all folios were successfully migrated or -errno for 2371 * failure (or partial success). 2372 */ 2373 static int 2374 migrate_longterm_unpinnable_folios(struct list_head *movable_folio_list, 2375 struct pages_or_folios *pofs) 2376 { 2377 int ret; 2378 unsigned long i; 2379 2380 for (i = 0; i < pofs->nr_entries; i++) { 2381 struct folio *folio = pofs_get_folio(pofs, i); 2382 2383 if (folio_is_device_coherent(folio)) { 2384 /* 2385 * Migration will fail if the folio is pinned, so 2386 * convert the pin on the source folio to a normal 2387 * reference. 2388 */ 2389 pofs_clear_entry(pofs, i); 2390 folio_get(folio); 2391 gup_put_folio(folio, 1, FOLL_PIN); 2392 2393 if (migrate_device_coherent_folio(folio)) { 2394 ret = -EBUSY; 2395 goto err; 2396 } 2397 2398 continue; 2399 } 2400 2401 /* 2402 * We can't migrate folios with unexpected references, so drop 2403 * the reference obtained by __get_user_pages_locked(). 2404 * Migrating folios have been added to movable_folio_list after 2405 * calling folio_isolate_lru() which takes a reference so the 2406 * folio won't be freed if it's migrating. 2407 */ 2408 unpin_folio(folio); 2409 pofs_clear_entry(pofs, i); 2410 } 2411 2412 if (!list_empty(movable_folio_list)) { 2413 struct migration_target_control mtc = { 2414 .nid = NUMA_NO_NODE, 2415 .gfp_mask = GFP_USER | __GFP_NOWARN, 2416 .reason = MR_LONGTERM_PIN, 2417 }; 2418 2419 if (migrate_pages(movable_folio_list, alloc_migration_target, 2420 NULL, (unsigned long)&mtc, MIGRATE_SYNC, 2421 MR_LONGTERM_PIN, NULL)) { 2422 ret = -ENOMEM; 2423 goto err; 2424 } 2425 } 2426 2427 putback_movable_pages(movable_folio_list); 2428 2429 return -EAGAIN; 2430 2431 err: 2432 pofs_unpin(pofs); 2433 putback_movable_pages(movable_folio_list); 2434 2435 return ret; 2436 } 2437 2438 static long 2439 check_and_migrate_movable_pages_or_folios(struct pages_or_folios *pofs) 2440 { 2441 LIST_HEAD(movable_folio_list); 2442 unsigned long collected; 2443 2444 collected = collect_longterm_unpinnable_folios(&movable_folio_list, 2445 pofs); 2446 if (!collected) 2447 return 0; 2448 2449 return migrate_longterm_unpinnable_folios(&movable_folio_list, pofs); 2450 } 2451 2452 /* 2453 * Check whether all folios are *allowed* to be pinned indefinitely (long term). 2454 * Rather confusingly, all folios in the range are required to be pinned via 2455 * FOLL_PIN, before calling this routine. 2456 * 2457 * Return values: 2458 * 2459 * 0: if everything is OK and all folios in the range are allowed to be pinned, 2460 * then this routine leaves all folios pinned and returns zero for success. 2461 * 2462 * -EAGAIN: if any folios in the range are not allowed to be pinned, then this 2463 * routine will migrate those folios away, unpin all the folios in the range. If 2464 * migration of the entire set of folios succeeds, then -EAGAIN is returned. The 2465 * caller should re-pin the entire range with FOLL_PIN and then call this 2466 * routine again. 2467 * 2468 * -ENOMEM, or any other -errno: if an error *other* than -EAGAIN occurs, this 2469 * indicates a migration failure. The caller should give up, and propagate the 2470 * error back up the call stack. The caller does not need to unpin any folios in 2471 * that case, because this routine will do the unpinning. 2472 */ 2473 static long check_and_migrate_movable_folios(unsigned long nr_folios, 2474 struct folio **folios) 2475 { 2476 struct pages_or_folios pofs = { 2477 .folios = folios, 2478 .has_folios = true, 2479 .nr_entries = nr_folios, 2480 }; 2481 2482 return check_and_migrate_movable_pages_or_folios(&pofs); 2483 } 2484 2485 /* 2486 * Return values and behavior are the same as those for 2487 * check_and_migrate_movable_folios(). 2488 */ 2489 static long check_and_migrate_movable_pages(unsigned long nr_pages, 2490 struct page **pages) 2491 { 2492 struct pages_or_folios pofs = { 2493 .pages = pages, 2494 .has_folios = false, 2495 .nr_entries = nr_pages, 2496 }; 2497 2498 return check_and_migrate_movable_pages_or_folios(&pofs); 2499 } 2500 #else 2501 static long check_and_migrate_movable_pages(unsigned long nr_pages, 2502 struct page **pages) 2503 { 2504 return 0; 2505 } 2506 2507 static long check_and_migrate_movable_folios(unsigned long nr_folios, 2508 struct folio **folios) 2509 { 2510 return 0; 2511 } 2512 #endif /* CONFIG_MIGRATION */ 2513 2514 /* 2515 * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which 2516 * allows us to process the FOLL_LONGTERM flag. 2517 */ 2518 static long __gup_longterm_locked(struct mm_struct *mm, 2519 unsigned long start, 2520 unsigned long nr_pages, 2521 struct page **pages, 2522 int *locked, 2523 unsigned int gup_flags) 2524 { 2525 unsigned int flags; 2526 long rc, nr_pinned_pages; 2527 2528 if (!(gup_flags & FOLL_LONGTERM)) 2529 return __get_user_pages_locked(mm, start, nr_pages, pages, 2530 locked, gup_flags); 2531 2532 flags = memalloc_pin_save(); 2533 do { 2534 nr_pinned_pages = __get_user_pages_locked(mm, start, nr_pages, 2535 pages, locked, 2536 gup_flags); 2537 if (nr_pinned_pages <= 0) { 2538 rc = nr_pinned_pages; 2539 break; 2540 } 2541 2542 /* FOLL_LONGTERM implies FOLL_PIN */ 2543 rc = check_and_migrate_movable_pages(nr_pinned_pages, pages); 2544 } while (rc == -EAGAIN); 2545 memalloc_pin_restore(flags); 2546 return rc ? rc : nr_pinned_pages; 2547 } 2548 2549 /* 2550 * Check that the given flags are valid for the exported gup/pup interface, and 2551 * update them with the required flags that the caller must have set. 2552 */ 2553 static bool is_valid_gup_args(struct page **pages, int *locked, 2554 unsigned int *gup_flags_p, unsigned int to_set) 2555 { 2556 unsigned int gup_flags = *gup_flags_p; 2557 2558 /* 2559 * These flags not allowed to be specified externally to the gup 2560 * interfaces: 2561 * - FOLL_TOUCH/FOLL_PIN/FOLL_TRIED/FOLL_FAST_ONLY are internal only 2562 * - FOLL_REMOTE is internal only, set in (get|pin)_user_pages_remote() 2563 * - FOLL_UNLOCKABLE is internal only and used if locked is !NULL 2564 */ 2565 if (WARN_ON_ONCE(gup_flags & INTERNAL_GUP_FLAGS)) 2566 return false; 2567 2568 gup_flags |= to_set; 2569 if (locked) { 2570 /* At the external interface locked must be set */ 2571 if (WARN_ON_ONCE(*locked != 1)) 2572 return false; 2573 2574 gup_flags |= FOLL_UNLOCKABLE; 2575 } 2576 2577 /* FOLL_GET and FOLL_PIN are mutually exclusive. */ 2578 if (WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) == 2579 (FOLL_PIN | FOLL_GET))) 2580 return false; 2581 2582 /* LONGTERM can only be specified when pinning */ 2583 if (WARN_ON_ONCE(!(gup_flags & FOLL_PIN) && (gup_flags & FOLL_LONGTERM))) 2584 return false; 2585 2586 /* Pages input must be given if using GET/PIN */ 2587 if (WARN_ON_ONCE((gup_flags & (FOLL_GET | FOLL_PIN)) && !pages)) 2588 return false; 2589 2590 /* We want to allow the pgmap to be hot-unplugged at all times */ 2591 if (WARN_ON_ONCE((gup_flags & FOLL_LONGTERM) && 2592 (gup_flags & FOLL_PCI_P2PDMA))) 2593 return false; 2594 2595 *gup_flags_p = gup_flags; 2596 return true; 2597 } 2598 2599 #ifdef CONFIG_MMU 2600 /** 2601 * get_user_pages_remote() - pin user pages in memory 2602 * @mm: mm_struct of target mm 2603 * @start: starting user address 2604 * @nr_pages: number of pages from start to pin 2605 * @gup_flags: flags modifying lookup behaviour 2606 * @pages: array that receives pointers to the pages pinned. 2607 * Should be at least nr_pages long. Or NULL, if caller 2608 * only intends to ensure the pages are faulted in. 2609 * @locked: pointer to lock flag indicating whether lock is held and 2610 * subsequently whether VM_FAULT_RETRY functionality can be 2611 * utilised. Lock must initially be held. 2612 * 2613 * Returns either number of pages pinned (which may be less than the 2614 * number requested), or an error. Details about the return value: 2615 * 2616 * -- If nr_pages is 0, returns 0. 2617 * -- If nr_pages is >0, but no pages were pinned, returns -errno. 2618 * -- If nr_pages is >0, and some pages were pinned, returns the number of 2619 * pages pinned. Again, this may be less than nr_pages. 2620 * 2621 * The caller is responsible for releasing returned @pages, via put_page(). 2622 * 2623 * Must be called with mmap_lock held for read or write. 2624 * 2625 * get_user_pages_remote walks a process's page tables and takes a reference 2626 * to each struct page that each user address corresponds to at a given 2627 * instant. That is, it takes the page that would be accessed if a user 2628 * thread accesses the given user virtual address at that instant. 2629 * 2630 * This does not guarantee that the page exists in the user mappings when 2631 * get_user_pages_remote returns, and there may even be a completely different 2632 * page there in some cases (eg. if mmapped pagecache has been invalidated 2633 * and subsequently re-faulted). However it does guarantee that the page 2634 * won't be freed completely. And mostly callers simply care that the page 2635 * contains data that was valid *at some point in time*. Typically, an IO 2636 * or similar operation cannot guarantee anything stronger anyway because 2637 * locks can't be held over the syscall boundary. 2638 * 2639 * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page 2640 * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must 2641 * be called after the page is finished with, and before put_page is called. 2642 * 2643 * get_user_pages_remote is typically used for fewer-copy IO operations, 2644 * to get a handle on the memory by some means other than accesses 2645 * via the user virtual addresses. The pages may be submitted for 2646 * DMA to devices or accessed via their kernel linear mapping (via the 2647 * kmap APIs). Care should be taken to use the correct cache flushing APIs. 2648 * 2649 * See also get_user_pages_fast, for performance critical applications. 2650 * 2651 * get_user_pages_remote should be phased out in favor of 2652 * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing 2653 * should use get_user_pages_remote because it cannot pass 2654 * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault. 2655 */ 2656 long get_user_pages_remote(struct mm_struct *mm, 2657 unsigned long start, unsigned long nr_pages, 2658 unsigned int gup_flags, struct page **pages, 2659 int *locked) 2660 { 2661 int local_locked = 1; 2662 2663 if (!is_valid_gup_args(pages, locked, &gup_flags, 2664 FOLL_TOUCH | FOLL_REMOTE)) 2665 return -EINVAL; 2666 2667 return __get_user_pages_locked(mm, start, nr_pages, pages, 2668 locked ? locked : &local_locked, 2669 gup_flags); 2670 } 2671 EXPORT_SYMBOL(get_user_pages_remote); 2672 2673 #else /* CONFIG_MMU */ 2674 long get_user_pages_remote(struct mm_struct *mm, 2675 unsigned long start, unsigned long nr_pages, 2676 unsigned int gup_flags, struct page **pages, 2677 int *locked) 2678 { 2679 return 0; 2680 } 2681 #endif /* !CONFIG_MMU */ 2682 2683 /** 2684 * get_user_pages() - pin user pages in memory 2685 * @start: starting user address 2686 * @nr_pages: number of pages from start to pin 2687 * @gup_flags: flags modifying lookup behaviour 2688 * @pages: array that receives pointers to the pages pinned. 2689 * Should be at least nr_pages long. Or NULL, if caller 2690 * only intends to ensure the pages are faulted in. 2691 * 2692 * This is the same as get_user_pages_remote(), just with a less-flexible 2693 * calling convention where we assume that the mm being operated on belongs to 2694 * the current task, and doesn't allow passing of a locked parameter. We also 2695 * obviously don't pass FOLL_REMOTE in here. 2696 */ 2697 long get_user_pages(unsigned long start, unsigned long nr_pages, 2698 unsigned int gup_flags, struct page **pages) 2699 { 2700 int locked = 1; 2701 2702 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_TOUCH)) 2703 return -EINVAL; 2704 2705 return __get_user_pages_locked(current->mm, start, nr_pages, pages, 2706 &locked, gup_flags); 2707 } 2708 EXPORT_SYMBOL(get_user_pages); 2709 2710 /* 2711 * get_user_pages_unlocked() is suitable to replace the form: 2712 * 2713 * mmap_read_lock(mm); 2714 * get_user_pages(mm, ..., pages, NULL); 2715 * mmap_read_unlock(mm); 2716 * 2717 * with: 2718 * 2719 * get_user_pages_unlocked(mm, ..., pages); 2720 * 2721 * It is functionally equivalent to get_user_pages_fast so 2722 * get_user_pages_fast should be used instead if specific gup_flags 2723 * (e.g. FOLL_FORCE) are not required. 2724 */ 2725 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 2726 struct page **pages, unsigned int gup_flags) 2727 { 2728 int locked = 0; 2729 2730 if (!is_valid_gup_args(pages, NULL, &gup_flags, 2731 FOLL_TOUCH | FOLL_UNLOCKABLE)) 2732 return -EINVAL; 2733 2734 return __get_user_pages_locked(current->mm, start, nr_pages, pages, 2735 &locked, gup_flags); 2736 } 2737 EXPORT_SYMBOL(get_user_pages_unlocked); 2738 2739 /* 2740 * GUP-fast 2741 * 2742 * get_user_pages_fast attempts to pin user pages by walking the page 2743 * tables directly and avoids taking locks. Thus the walker needs to be 2744 * protected from page table pages being freed from under it, and should 2745 * block any THP splits. 2746 * 2747 * One way to achieve this is to have the walker disable interrupts, and 2748 * rely on IPIs from the TLB flushing code blocking before the page table 2749 * pages are freed. This is unsuitable for architectures that do not need 2750 * to broadcast an IPI when invalidating TLBs. 2751 * 2752 * Another way to achieve this is to batch up page table containing pages 2753 * belonging to more than one mm_user, then rcu_sched a callback to free those 2754 * pages. Disabling interrupts will allow the gup_fast() walker to both block 2755 * the rcu_sched callback, and an IPI that we broadcast for splitting THPs 2756 * (which is a relatively rare event). The code below adopts this strategy. 2757 * 2758 * Before activating this code, please be aware that the following assumptions 2759 * are currently made: 2760 * 2761 * *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to 2762 * free pages containing page tables or TLB flushing requires IPI broadcast. 2763 * 2764 * *) ptes can be read atomically by the architecture. 2765 * 2766 * *) access_ok is sufficient to validate userspace address ranges. 2767 * 2768 * The last two assumptions can be relaxed by the addition of helper functions. 2769 * 2770 * This code is based heavily on the PowerPC implementation by Nick Piggin. 2771 */ 2772 #ifdef CONFIG_HAVE_GUP_FAST 2773 /* 2774 * Used in the GUP-fast path to determine whether GUP is permitted to work on 2775 * a specific folio. 2776 * 2777 * This call assumes the caller has pinned the folio, that the lowest page table 2778 * level still points to this folio, and that interrupts have been disabled. 2779 * 2780 * GUP-fast must reject all secretmem folios. 2781 * 2782 * Writing to pinned file-backed dirty tracked folios is inherently problematic 2783 * (see comment describing the writable_file_mapping_allowed() function). We 2784 * therefore try to avoid the most egregious case of a long-term mapping doing 2785 * so. 2786 * 2787 * This function cannot be as thorough as that one as the VMA is not available 2788 * in the fast path, so instead we whitelist known good cases and if in doubt, 2789 * fall back to the slow path. 2790 */ 2791 static bool gup_fast_folio_allowed(struct folio *folio, unsigned int flags) 2792 { 2793 bool reject_file_backed = false; 2794 struct address_space *mapping; 2795 bool check_secretmem = false; 2796 unsigned long mapping_flags; 2797 2798 /* 2799 * If we aren't pinning then no problematic write can occur. A long term 2800 * pin is the most egregious case so this is the one we disallow. 2801 */ 2802 if ((flags & (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) == 2803 (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) 2804 reject_file_backed = true; 2805 2806 /* We hold a folio reference, so we can safely access folio fields. */ 2807 2808 /* secretmem folios are always order-0 folios. */ 2809 if (IS_ENABLED(CONFIG_SECRETMEM) && !folio_test_large(folio)) 2810 check_secretmem = true; 2811 2812 if (!reject_file_backed && !check_secretmem) 2813 return true; 2814 2815 if (WARN_ON_ONCE(folio_test_slab(folio))) 2816 return false; 2817 2818 /* hugetlb neither requires dirty-tracking nor can be secretmem. */ 2819 if (folio_test_hugetlb(folio)) 2820 return true; 2821 2822 /* 2823 * GUP-fast disables IRQs. When IRQS are disabled, RCU grace periods 2824 * cannot proceed, which means no actions performed under RCU can 2825 * proceed either. 2826 * 2827 * inodes and thus their mappings are freed under RCU, which means the 2828 * mapping cannot be freed beneath us and thus we can safely dereference 2829 * it. 2830 */ 2831 lockdep_assert_irqs_disabled(); 2832 2833 /* 2834 * However, there may be operations which _alter_ the mapping, so ensure 2835 * we read it once and only once. 2836 */ 2837 mapping = READ_ONCE(folio->mapping); 2838 2839 /* 2840 * The mapping may have been truncated, in any case we cannot determine 2841 * if this mapping is safe - fall back to slow path to determine how to 2842 * proceed. 2843 */ 2844 if (!mapping) 2845 return false; 2846 2847 /* Anonymous folios pose no problem. */ 2848 mapping_flags = (unsigned long)mapping & PAGE_MAPPING_FLAGS; 2849 if (mapping_flags) 2850 return mapping_flags & PAGE_MAPPING_ANON; 2851 2852 /* 2853 * At this point, we know the mapping is non-null and points to an 2854 * address_space object. 2855 */ 2856 if (check_secretmem && secretmem_mapping(mapping)) 2857 return false; 2858 /* The only remaining allowed file system is shmem. */ 2859 return !reject_file_backed || shmem_mapping(mapping); 2860 } 2861 2862 static void __maybe_unused gup_fast_undo_dev_pagemap(int *nr, int nr_start, 2863 unsigned int flags, struct page **pages) 2864 { 2865 while ((*nr) - nr_start) { 2866 struct folio *folio = page_folio(pages[--(*nr)]); 2867 2868 folio_clear_referenced(folio); 2869 gup_put_folio(folio, 1, flags); 2870 } 2871 } 2872 2873 #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL 2874 /* 2875 * GUP-fast relies on pte change detection to avoid concurrent pgtable 2876 * operations. 2877 * 2878 * To pin the page, GUP-fast needs to do below in order: 2879 * (1) pin the page (by prefetching pte), then (2) check pte not changed. 2880 * 2881 * For the rest of pgtable operations where pgtable updates can be racy 2882 * with GUP-fast, we need to do (1) clear pte, then (2) check whether page 2883 * is pinned. 2884 * 2885 * Above will work for all pte-level operations, including THP split. 2886 * 2887 * For THP collapse, it's a bit more complicated because GUP-fast may be 2888 * walking a pgtable page that is being freed (pte is still valid but pmd 2889 * can be cleared already). To avoid race in such condition, we need to 2890 * also check pmd here to make sure pmd doesn't change (corresponds to 2891 * pmdp_collapse_flush() in the THP collapse code path). 2892 */ 2893 static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, 2894 unsigned long end, unsigned int flags, struct page **pages, 2895 int *nr) 2896 { 2897 struct dev_pagemap *pgmap = NULL; 2898 int nr_start = *nr, ret = 0; 2899 pte_t *ptep, *ptem; 2900 2901 ptem = ptep = pte_offset_map(&pmd, addr); 2902 if (!ptep) 2903 return 0; 2904 do { 2905 pte_t pte = ptep_get_lockless(ptep); 2906 struct page *page; 2907 struct folio *folio; 2908 2909 /* 2910 * Always fallback to ordinary GUP on PROT_NONE-mapped pages: 2911 * pte_access_permitted() better should reject these pages 2912 * either way: otherwise, GUP-fast might succeed in 2913 * cases where ordinary GUP would fail due to VMA access 2914 * permissions. 2915 */ 2916 if (pte_protnone(pte)) 2917 goto pte_unmap; 2918 2919 if (!pte_access_permitted(pte, flags & FOLL_WRITE)) 2920 goto pte_unmap; 2921 2922 if (pte_devmap(pte)) { 2923 if (unlikely(flags & FOLL_LONGTERM)) 2924 goto pte_unmap; 2925 2926 pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); 2927 if (unlikely(!pgmap)) { 2928 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); 2929 goto pte_unmap; 2930 } 2931 } else if (pte_special(pte)) 2932 goto pte_unmap; 2933 2934 VM_BUG_ON(!pfn_valid(pte_pfn(pte))); 2935 page = pte_page(pte); 2936 2937 folio = try_grab_folio_fast(page, 1, flags); 2938 if (!folio) 2939 goto pte_unmap; 2940 2941 if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) || 2942 unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) { 2943 gup_put_folio(folio, 1, flags); 2944 goto pte_unmap; 2945 } 2946 2947 if (!gup_fast_folio_allowed(folio, flags)) { 2948 gup_put_folio(folio, 1, flags); 2949 goto pte_unmap; 2950 } 2951 2952 if (!pte_write(pte) && gup_must_unshare(NULL, flags, page)) { 2953 gup_put_folio(folio, 1, flags); 2954 goto pte_unmap; 2955 } 2956 2957 /* 2958 * We need to make the page accessible if and only if we are 2959 * going to access its content (the FOLL_PIN case). Please 2960 * see Documentation/core-api/pin_user_pages.rst for 2961 * details. 2962 */ 2963 if (flags & FOLL_PIN) { 2964 ret = arch_make_folio_accessible(folio); 2965 if (ret) { 2966 gup_put_folio(folio, 1, flags); 2967 goto pte_unmap; 2968 } 2969 } 2970 folio_set_referenced(folio); 2971 pages[*nr] = page; 2972 (*nr)++; 2973 } while (ptep++, addr += PAGE_SIZE, addr != end); 2974 2975 ret = 1; 2976 2977 pte_unmap: 2978 if (pgmap) 2979 put_dev_pagemap(pgmap); 2980 pte_unmap(ptem); 2981 return ret; 2982 } 2983 #else 2984 2985 /* 2986 * If we can't determine whether or not a pte is special, then fail immediately 2987 * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not 2988 * to be special. 2989 * 2990 * For a futex to be placed on a THP tail page, get_futex_key requires a 2991 * get_user_pages_fast_only implementation that can pin pages. Thus it's still 2992 * useful to have gup_fast_pmd_leaf even if we can't operate on ptes. 2993 */ 2994 static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, 2995 unsigned long end, unsigned int flags, struct page **pages, 2996 int *nr) 2997 { 2998 return 0; 2999 } 3000 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ 3001 3002 #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE) 3003 static int gup_fast_devmap_leaf(unsigned long pfn, unsigned long addr, 3004 unsigned long end, unsigned int flags, struct page **pages, int *nr) 3005 { 3006 int nr_start = *nr; 3007 struct dev_pagemap *pgmap = NULL; 3008 3009 do { 3010 struct folio *folio; 3011 struct page *page = pfn_to_page(pfn); 3012 3013 pgmap = get_dev_pagemap(pfn, pgmap); 3014 if (unlikely(!pgmap)) { 3015 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); 3016 break; 3017 } 3018 3019 if (!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)) { 3020 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); 3021 break; 3022 } 3023 3024 folio = try_grab_folio_fast(page, 1, flags); 3025 if (!folio) { 3026 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); 3027 break; 3028 } 3029 folio_set_referenced(folio); 3030 pages[*nr] = page; 3031 (*nr)++; 3032 pfn++; 3033 } while (addr += PAGE_SIZE, addr != end); 3034 3035 put_dev_pagemap(pgmap); 3036 return addr == end; 3037 } 3038 3039 static int gup_fast_devmap_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr, 3040 unsigned long end, unsigned int flags, struct page **pages, 3041 int *nr) 3042 { 3043 unsigned long fault_pfn; 3044 int nr_start = *nr; 3045 3046 fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); 3047 if (!gup_fast_devmap_leaf(fault_pfn, addr, end, flags, pages, nr)) 3048 return 0; 3049 3050 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { 3051 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); 3052 return 0; 3053 } 3054 return 1; 3055 } 3056 3057 static int gup_fast_devmap_pud_leaf(pud_t orig, pud_t *pudp, unsigned long addr, 3058 unsigned long end, unsigned int flags, struct page **pages, 3059 int *nr) 3060 { 3061 unsigned long fault_pfn; 3062 int nr_start = *nr; 3063 3064 fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); 3065 if (!gup_fast_devmap_leaf(fault_pfn, addr, end, flags, pages, nr)) 3066 return 0; 3067 3068 if (unlikely(pud_val(orig) != pud_val(*pudp))) { 3069 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); 3070 return 0; 3071 } 3072 return 1; 3073 } 3074 #else 3075 static int gup_fast_devmap_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr, 3076 unsigned long end, unsigned int flags, struct page **pages, 3077 int *nr) 3078 { 3079 BUILD_BUG(); 3080 return 0; 3081 } 3082 3083 static int gup_fast_devmap_pud_leaf(pud_t pud, pud_t *pudp, unsigned long addr, 3084 unsigned long end, unsigned int flags, struct page **pages, 3085 int *nr) 3086 { 3087 BUILD_BUG(); 3088 return 0; 3089 } 3090 #endif 3091 3092 static int gup_fast_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr, 3093 unsigned long end, unsigned int flags, struct page **pages, 3094 int *nr) 3095 { 3096 struct page *page; 3097 struct folio *folio; 3098 int refs; 3099 3100 if (!pmd_access_permitted(orig, flags & FOLL_WRITE)) 3101 return 0; 3102 3103 if (pmd_special(orig)) 3104 return 0; 3105 3106 if (pmd_devmap(orig)) { 3107 if (unlikely(flags & FOLL_LONGTERM)) 3108 return 0; 3109 return gup_fast_devmap_pmd_leaf(orig, pmdp, addr, end, flags, 3110 pages, nr); 3111 } 3112 3113 page = pmd_page(orig); 3114 refs = record_subpages(page, PMD_SIZE, addr, end, pages + *nr); 3115 3116 folio = try_grab_folio_fast(page, refs, flags); 3117 if (!folio) 3118 return 0; 3119 3120 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { 3121 gup_put_folio(folio, refs, flags); 3122 return 0; 3123 } 3124 3125 if (!gup_fast_folio_allowed(folio, flags)) { 3126 gup_put_folio(folio, refs, flags); 3127 return 0; 3128 } 3129 if (!pmd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) { 3130 gup_put_folio(folio, refs, flags); 3131 return 0; 3132 } 3133 3134 *nr += refs; 3135 folio_set_referenced(folio); 3136 return 1; 3137 } 3138 3139 static int gup_fast_pud_leaf(pud_t orig, pud_t *pudp, unsigned long addr, 3140 unsigned long end, unsigned int flags, struct page **pages, 3141 int *nr) 3142 { 3143 struct page *page; 3144 struct folio *folio; 3145 int refs; 3146 3147 if (!pud_access_permitted(orig, flags & FOLL_WRITE)) 3148 return 0; 3149 3150 if (pud_special(orig)) 3151 return 0; 3152 3153 if (pud_devmap(orig)) { 3154 if (unlikely(flags & FOLL_LONGTERM)) 3155 return 0; 3156 return gup_fast_devmap_pud_leaf(orig, pudp, addr, end, flags, 3157 pages, nr); 3158 } 3159 3160 page = pud_page(orig); 3161 refs = record_subpages(page, PUD_SIZE, addr, end, pages + *nr); 3162 3163 folio = try_grab_folio_fast(page, refs, flags); 3164 if (!folio) 3165 return 0; 3166 3167 if (unlikely(pud_val(orig) != pud_val(*pudp))) { 3168 gup_put_folio(folio, refs, flags); 3169 return 0; 3170 } 3171 3172 if (!gup_fast_folio_allowed(folio, flags)) { 3173 gup_put_folio(folio, refs, flags); 3174 return 0; 3175 } 3176 3177 if (!pud_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) { 3178 gup_put_folio(folio, refs, flags); 3179 return 0; 3180 } 3181 3182 *nr += refs; 3183 folio_set_referenced(folio); 3184 return 1; 3185 } 3186 3187 static int gup_fast_pgd_leaf(pgd_t orig, pgd_t *pgdp, unsigned long addr, 3188 unsigned long end, unsigned int flags, struct page **pages, 3189 int *nr) 3190 { 3191 int refs; 3192 struct page *page; 3193 struct folio *folio; 3194 3195 if (!pgd_access_permitted(orig, flags & FOLL_WRITE)) 3196 return 0; 3197 3198 BUILD_BUG_ON(pgd_devmap(orig)); 3199 3200 page = pgd_page(orig); 3201 refs = record_subpages(page, PGDIR_SIZE, addr, end, pages + *nr); 3202 3203 folio = try_grab_folio_fast(page, refs, flags); 3204 if (!folio) 3205 return 0; 3206 3207 if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) { 3208 gup_put_folio(folio, refs, flags); 3209 return 0; 3210 } 3211 3212 if (!pgd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) { 3213 gup_put_folio(folio, refs, flags); 3214 return 0; 3215 } 3216 3217 if (!gup_fast_folio_allowed(folio, flags)) { 3218 gup_put_folio(folio, refs, flags); 3219 return 0; 3220 } 3221 3222 *nr += refs; 3223 folio_set_referenced(folio); 3224 return 1; 3225 } 3226 3227 static int gup_fast_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, 3228 unsigned long end, unsigned int flags, struct page **pages, 3229 int *nr) 3230 { 3231 unsigned long next; 3232 pmd_t *pmdp; 3233 3234 pmdp = pmd_offset_lockless(pudp, pud, addr); 3235 do { 3236 pmd_t pmd = pmdp_get_lockless(pmdp); 3237 3238 next = pmd_addr_end(addr, end); 3239 if (!pmd_present(pmd)) 3240 return 0; 3241 3242 if (unlikely(pmd_leaf(pmd))) { 3243 /* See gup_fast_pte_range() */ 3244 if (pmd_protnone(pmd)) 3245 return 0; 3246 3247 if (!gup_fast_pmd_leaf(pmd, pmdp, addr, next, flags, 3248 pages, nr)) 3249 return 0; 3250 3251 } else if (!gup_fast_pte_range(pmd, pmdp, addr, next, flags, 3252 pages, nr)) 3253 return 0; 3254 } while (pmdp++, addr = next, addr != end); 3255 3256 return 1; 3257 } 3258 3259 static int gup_fast_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, 3260 unsigned long end, unsigned int flags, struct page **pages, 3261 int *nr) 3262 { 3263 unsigned long next; 3264 pud_t *pudp; 3265 3266 pudp = pud_offset_lockless(p4dp, p4d, addr); 3267 do { 3268 pud_t pud = READ_ONCE(*pudp); 3269 3270 next = pud_addr_end(addr, end); 3271 if (unlikely(!pud_present(pud))) 3272 return 0; 3273 if (unlikely(pud_leaf(pud))) { 3274 if (!gup_fast_pud_leaf(pud, pudp, addr, next, flags, 3275 pages, nr)) 3276 return 0; 3277 } else if (!gup_fast_pmd_range(pudp, pud, addr, next, flags, 3278 pages, nr)) 3279 return 0; 3280 } while (pudp++, addr = next, addr != end); 3281 3282 return 1; 3283 } 3284 3285 static int gup_fast_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, 3286 unsigned long end, unsigned int flags, struct page **pages, 3287 int *nr) 3288 { 3289 unsigned long next; 3290 p4d_t *p4dp; 3291 3292 p4dp = p4d_offset_lockless(pgdp, pgd, addr); 3293 do { 3294 p4d_t p4d = READ_ONCE(*p4dp); 3295 3296 next = p4d_addr_end(addr, end); 3297 if (!p4d_present(p4d)) 3298 return 0; 3299 BUILD_BUG_ON(p4d_leaf(p4d)); 3300 if (!gup_fast_pud_range(p4dp, p4d, addr, next, flags, 3301 pages, nr)) 3302 return 0; 3303 } while (p4dp++, addr = next, addr != end); 3304 3305 return 1; 3306 } 3307 3308 static void gup_fast_pgd_range(unsigned long addr, unsigned long end, 3309 unsigned int flags, struct page **pages, int *nr) 3310 { 3311 unsigned long next; 3312 pgd_t *pgdp; 3313 3314 pgdp = pgd_offset(current->mm, addr); 3315 do { 3316 pgd_t pgd = READ_ONCE(*pgdp); 3317 3318 next = pgd_addr_end(addr, end); 3319 if (pgd_none(pgd)) 3320 return; 3321 if (unlikely(pgd_leaf(pgd))) { 3322 if (!gup_fast_pgd_leaf(pgd, pgdp, addr, next, flags, 3323 pages, nr)) 3324 return; 3325 } else if (!gup_fast_p4d_range(pgdp, pgd, addr, next, flags, 3326 pages, nr)) 3327 return; 3328 } while (pgdp++, addr = next, addr != end); 3329 } 3330 #else 3331 static inline void gup_fast_pgd_range(unsigned long addr, unsigned long end, 3332 unsigned int flags, struct page **pages, int *nr) 3333 { 3334 } 3335 #endif /* CONFIG_HAVE_GUP_FAST */ 3336 3337 #ifndef gup_fast_permitted 3338 /* 3339 * Check if it's allowed to use get_user_pages_fast_only() for the range, or 3340 * we need to fall back to the slow version: 3341 */ 3342 static bool gup_fast_permitted(unsigned long start, unsigned long end) 3343 { 3344 return true; 3345 } 3346 #endif 3347 3348 static unsigned long gup_fast(unsigned long start, unsigned long end, 3349 unsigned int gup_flags, struct page **pages) 3350 { 3351 unsigned long flags; 3352 int nr_pinned = 0; 3353 unsigned seq; 3354 3355 if (!IS_ENABLED(CONFIG_HAVE_GUP_FAST) || 3356 !gup_fast_permitted(start, end)) 3357 return 0; 3358 3359 if (gup_flags & FOLL_PIN) { 3360 if (!raw_seqcount_try_begin(¤t->mm->write_protect_seq, seq)) 3361 return 0; 3362 } 3363 3364 /* 3365 * Disable interrupts. The nested form is used, in order to allow full, 3366 * general purpose use of this routine. 3367 * 3368 * With interrupts disabled, we block page table pages from being freed 3369 * from under us. See struct mmu_table_batch comments in 3370 * include/asm-generic/tlb.h for more details. 3371 * 3372 * We do not adopt an rcu_read_lock() here as we also want to block IPIs 3373 * that come from THPs splitting. 3374 */ 3375 local_irq_save(flags); 3376 gup_fast_pgd_range(start, end, gup_flags, pages, &nr_pinned); 3377 local_irq_restore(flags); 3378 3379 /* 3380 * When pinning pages for DMA there could be a concurrent write protect 3381 * from fork() via copy_page_range(), in this case always fail GUP-fast. 3382 */ 3383 if (gup_flags & FOLL_PIN) { 3384 if (read_seqcount_retry(¤t->mm->write_protect_seq, seq)) { 3385 gup_fast_unpin_user_pages(pages, nr_pinned); 3386 return 0; 3387 } else { 3388 sanity_check_pinned_pages(pages, nr_pinned); 3389 } 3390 } 3391 return nr_pinned; 3392 } 3393 3394 static int gup_fast_fallback(unsigned long start, unsigned long nr_pages, 3395 unsigned int gup_flags, struct page **pages) 3396 { 3397 unsigned long len, end; 3398 unsigned long nr_pinned; 3399 int locked = 0; 3400 int ret; 3401 3402 if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM | 3403 FOLL_FORCE | FOLL_PIN | FOLL_GET | 3404 FOLL_FAST_ONLY | FOLL_NOFAULT | 3405 FOLL_PCI_P2PDMA | FOLL_HONOR_NUMA_FAULT))) 3406 return -EINVAL; 3407 3408 if (gup_flags & FOLL_PIN) 3409 mm_set_has_pinned_flag(¤t->mm->flags); 3410 3411 if (!(gup_flags & FOLL_FAST_ONLY)) 3412 might_lock_read(¤t->mm->mmap_lock); 3413 3414 start = untagged_addr(start) & PAGE_MASK; 3415 len = nr_pages << PAGE_SHIFT; 3416 if (check_add_overflow(start, len, &end)) 3417 return -EOVERFLOW; 3418 if (end > TASK_SIZE_MAX) 3419 return -EFAULT; 3420 if (unlikely(!access_ok((void __user *)start, len))) 3421 return -EFAULT; 3422 3423 nr_pinned = gup_fast(start, end, gup_flags, pages); 3424 if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY) 3425 return nr_pinned; 3426 3427 /* Slow path: try to get the remaining pages with get_user_pages */ 3428 start += nr_pinned << PAGE_SHIFT; 3429 pages += nr_pinned; 3430 ret = __gup_longterm_locked(current->mm, start, nr_pages - nr_pinned, 3431 pages, &locked, 3432 gup_flags | FOLL_TOUCH | FOLL_UNLOCKABLE); 3433 if (ret < 0) { 3434 /* 3435 * The caller has to unpin the pages we already pinned so 3436 * returning -errno is not an option 3437 */ 3438 if (nr_pinned) 3439 return nr_pinned; 3440 return ret; 3441 } 3442 return ret + nr_pinned; 3443 } 3444 3445 /** 3446 * get_user_pages_fast_only() - pin user pages in memory 3447 * @start: starting user address 3448 * @nr_pages: number of pages from start to pin 3449 * @gup_flags: flags modifying pin behaviour 3450 * @pages: array that receives pointers to the pages pinned. 3451 * Should be at least nr_pages long. 3452 * 3453 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to 3454 * the regular GUP. 3455 * 3456 * If the architecture does not support this function, simply return with no 3457 * pages pinned. 3458 * 3459 * Careful, careful! COW breaking can go either way, so a non-write 3460 * access can get ambiguous page results. If you call this function without 3461 * 'write' set, you'd better be sure that you're ok with that ambiguity. 3462 */ 3463 int get_user_pages_fast_only(unsigned long start, int nr_pages, 3464 unsigned int gup_flags, struct page **pages) 3465 { 3466 /* 3467 * Internally (within mm/gup.c), gup fast variants must set FOLL_GET, 3468 * because gup fast is always a "pin with a +1 page refcount" request. 3469 * 3470 * FOLL_FAST_ONLY is required in order to match the API description of 3471 * this routine: no fall back to regular ("slow") GUP. 3472 */ 3473 if (!is_valid_gup_args(pages, NULL, &gup_flags, 3474 FOLL_GET | FOLL_FAST_ONLY)) 3475 return -EINVAL; 3476 3477 return gup_fast_fallback(start, nr_pages, gup_flags, pages); 3478 } 3479 EXPORT_SYMBOL_GPL(get_user_pages_fast_only); 3480 3481 /** 3482 * get_user_pages_fast() - pin user pages in memory 3483 * @start: starting user address 3484 * @nr_pages: number of pages from start to pin 3485 * @gup_flags: flags modifying pin behaviour 3486 * @pages: array that receives pointers to the pages pinned. 3487 * Should be at least nr_pages long. 3488 * 3489 * Attempt to pin user pages in memory without taking mm->mmap_lock. 3490 * If not successful, it will fall back to taking the lock and 3491 * calling get_user_pages(). 3492 * 3493 * Returns number of pages pinned. This may be fewer than the number requested. 3494 * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns 3495 * -errno. 3496 */ 3497 int get_user_pages_fast(unsigned long start, int nr_pages, 3498 unsigned int gup_flags, struct page **pages) 3499 { 3500 /* 3501 * The caller may or may not have explicitly set FOLL_GET; either way is 3502 * OK. However, internally (within mm/gup.c), gup fast variants must set 3503 * FOLL_GET, because gup fast is always a "pin with a +1 page refcount" 3504 * request. 3505 */ 3506 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_GET)) 3507 return -EINVAL; 3508 return gup_fast_fallback(start, nr_pages, gup_flags, pages); 3509 } 3510 EXPORT_SYMBOL_GPL(get_user_pages_fast); 3511 3512 /** 3513 * pin_user_pages_fast() - pin user pages in memory without taking locks 3514 * 3515 * @start: starting user address 3516 * @nr_pages: number of pages from start to pin 3517 * @gup_flags: flags modifying pin behaviour 3518 * @pages: array that receives pointers to the pages pinned. 3519 * Should be at least nr_pages long. 3520 * 3521 * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See 3522 * get_user_pages_fast() for documentation on the function arguments, because 3523 * the arguments here are identical. 3524 * 3525 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please 3526 * see Documentation/core-api/pin_user_pages.rst for further details. 3527 * 3528 * Note that if a zero_page is amongst the returned pages, it will not have 3529 * pins in it and unpin_user_page() will not remove pins from it. 3530 */ 3531 int pin_user_pages_fast(unsigned long start, int nr_pages, 3532 unsigned int gup_flags, struct page **pages) 3533 { 3534 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN)) 3535 return -EINVAL; 3536 return gup_fast_fallback(start, nr_pages, gup_flags, pages); 3537 } 3538 EXPORT_SYMBOL_GPL(pin_user_pages_fast); 3539 3540 /** 3541 * pin_user_pages_remote() - pin pages of a remote process 3542 * 3543 * @mm: mm_struct of target mm 3544 * @start: starting user address 3545 * @nr_pages: number of pages from start to pin 3546 * @gup_flags: flags modifying lookup behaviour 3547 * @pages: array that receives pointers to the pages pinned. 3548 * Should be at least nr_pages long. 3549 * @locked: pointer to lock flag indicating whether lock is held and 3550 * subsequently whether VM_FAULT_RETRY functionality can be 3551 * utilised. Lock must initially be held. 3552 * 3553 * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See 3554 * get_user_pages_remote() for documentation on the function arguments, because 3555 * the arguments here are identical. 3556 * 3557 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please 3558 * see Documentation/core-api/pin_user_pages.rst for details. 3559 * 3560 * Note that if a zero_page is amongst the returned pages, it will not have 3561 * pins in it and unpin_user_page*() will not remove pins from it. 3562 */ 3563 long pin_user_pages_remote(struct mm_struct *mm, 3564 unsigned long start, unsigned long nr_pages, 3565 unsigned int gup_flags, struct page **pages, 3566 int *locked) 3567 { 3568 int local_locked = 1; 3569 3570 if (!is_valid_gup_args(pages, locked, &gup_flags, 3571 FOLL_PIN | FOLL_TOUCH | FOLL_REMOTE)) 3572 return 0; 3573 return __gup_longterm_locked(mm, start, nr_pages, pages, 3574 locked ? locked : &local_locked, 3575 gup_flags); 3576 } 3577 EXPORT_SYMBOL(pin_user_pages_remote); 3578 3579 /** 3580 * pin_user_pages() - pin user pages in memory for use by other devices 3581 * 3582 * @start: starting user address 3583 * @nr_pages: number of pages from start to pin 3584 * @gup_flags: flags modifying lookup behaviour 3585 * @pages: array that receives pointers to the pages pinned. 3586 * Should be at least nr_pages long. 3587 * 3588 * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and 3589 * FOLL_PIN is set. 3590 * 3591 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please 3592 * see Documentation/core-api/pin_user_pages.rst for details. 3593 * 3594 * Note that if a zero_page is amongst the returned pages, it will not have 3595 * pins in it and unpin_user_page*() will not remove pins from it. 3596 */ 3597 long pin_user_pages(unsigned long start, unsigned long nr_pages, 3598 unsigned int gup_flags, struct page **pages) 3599 { 3600 int locked = 1; 3601 3602 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN)) 3603 return 0; 3604 return __gup_longterm_locked(current->mm, start, nr_pages, 3605 pages, &locked, gup_flags); 3606 } 3607 EXPORT_SYMBOL(pin_user_pages); 3608 3609 /* 3610 * pin_user_pages_unlocked() is the FOLL_PIN variant of 3611 * get_user_pages_unlocked(). Behavior is the same, except that this one sets 3612 * FOLL_PIN and rejects FOLL_GET. 3613 * 3614 * Note that if a zero_page is amongst the returned pages, it will not have 3615 * pins in it and unpin_user_page*() will not remove pins from it. 3616 */ 3617 long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 3618 struct page **pages, unsigned int gup_flags) 3619 { 3620 int locked = 0; 3621 3622 if (!is_valid_gup_args(pages, NULL, &gup_flags, 3623 FOLL_PIN | FOLL_TOUCH | FOLL_UNLOCKABLE)) 3624 return 0; 3625 3626 return __gup_longterm_locked(current->mm, start, nr_pages, pages, 3627 &locked, gup_flags); 3628 } 3629 EXPORT_SYMBOL(pin_user_pages_unlocked); 3630 3631 /** 3632 * memfd_pin_folios() - pin folios associated with a memfd 3633 * @memfd: the memfd whose folios are to be pinned 3634 * @start: the first memfd offset 3635 * @end: the last memfd offset (inclusive) 3636 * @folios: array that receives pointers to the folios pinned 3637 * @max_folios: maximum number of entries in @folios 3638 * @offset: the offset into the first folio 3639 * 3640 * Attempt to pin folios associated with a memfd in the contiguous range 3641 * [start, end]. Given that a memfd is either backed by shmem or hugetlb, 3642 * the folios can either be found in the page cache or need to be allocated 3643 * if necessary. Once the folios are located, they are all pinned via 3644 * FOLL_PIN and @offset is populatedwith the offset into the first folio. 3645 * And, eventually, these pinned folios must be released either using 3646 * unpin_folios() or unpin_folio(). 3647 * 3648 * It must be noted that the folios may be pinned for an indefinite amount 3649 * of time. And, in most cases, the duration of time they may stay pinned 3650 * would be controlled by the userspace. This behavior is effectively the 3651 * same as using FOLL_LONGTERM with other GUP APIs. 3652 * 3653 * Returns number of folios pinned, which could be less than @max_folios 3654 * as it depends on the folio sizes that cover the range [start, end]. 3655 * If no folios were pinned, it returns -errno. 3656 */ 3657 long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end, 3658 struct folio **folios, unsigned int max_folios, 3659 pgoff_t *offset) 3660 { 3661 unsigned int flags, nr_folios, nr_found; 3662 unsigned int i, pgshift = PAGE_SHIFT; 3663 pgoff_t start_idx, end_idx, next_idx; 3664 struct folio *folio = NULL; 3665 struct folio_batch fbatch; 3666 struct hstate *h; 3667 long ret = -EINVAL; 3668 3669 if (start < 0 || start > end || !max_folios) 3670 return -EINVAL; 3671 3672 if (!memfd) 3673 return -EINVAL; 3674 3675 if (!shmem_file(memfd) && !is_file_hugepages(memfd)) 3676 return -EINVAL; 3677 3678 if (end >= i_size_read(file_inode(memfd))) 3679 return -EINVAL; 3680 3681 if (is_file_hugepages(memfd)) { 3682 h = hstate_file(memfd); 3683 pgshift = huge_page_shift(h); 3684 } 3685 3686 flags = memalloc_pin_save(); 3687 do { 3688 nr_folios = 0; 3689 start_idx = start >> pgshift; 3690 end_idx = end >> pgshift; 3691 if (is_file_hugepages(memfd)) { 3692 start_idx <<= huge_page_order(h); 3693 end_idx <<= huge_page_order(h); 3694 } 3695 3696 folio_batch_init(&fbatch); 3697 while (start_idx <= end_idx && nr_folios < max_folios) { 3698 /* 3699 * In most cases, we should be able to find the folios 3700 * in the page cache. If we cannot find them for some 3701 * reason, we try to allocate them and add them to the 3702 * page cache. 3703 */ 3704 nr_found = filemap_get_folios_contig(memfd->f_mapping, 3705 &start_idx, 3706 end_idx, 3707 &fbatch); 3708 if (folio) { 3709 folio_put(folio); 3710 folio = NULL; 3711 } 3712 3713 next_idx = 0; 3714 for (i = 0; i < nr_found; i++) { 3715 /* 3716 * As there can be multiple entries for a 3717 * given folio in the batch returned by 3718 * filemap_get_folios_contig(), the below 3719 * check is to ensure that we pin and return a 3720 * unique set of folios between start and end. 3721 */ 3722 if (next_idx && 3723 next_idx != folio_index(fbatch.folios[i])) 3724 continue; 3725 3726 folio = page_folio(&fbatch.folios[i]->page); 3727 3728 if (try_grab_folio(folio, 1, FOLL_PIN)) { 3729 folio_batch_release(&fbatch); 3730 ret = -EINVAL; 3731 goto err; 3732 } 3733 3734 if (nr_folios == 0) 3735 *offset = offset_in_folio(folio, start); 3736 3737 folios[nr_folios] = folio; 3738 next_idx = folio_next_index(folio); 3739 if (++nr_folios == max_folios) 3740 break; 3741 } 3742 3743 folio = NULL; 3744 folio_batch_release(&fbatch); 3745 if (!nr_found) { 3746 folio = memfd_alloc_folio(memfd, start_idx); 3747 if (IS_ERR(folio)) { 3748 ret = PTR_ERR(folio); 3749 if (ret != -EEXIST) 3750 goto err; 3751 folio = NULL; 3752 } 3753 } 3754 } 3755 3756 ret = check_and_migrate_movable_folios(nr_folios, folios); 3757 } while (ret == -EAGAIN); 3758 3759 memalloc_pin_restore(flags); 3760 return ret ? ret : nr_folios; 3761 err: 3762 memalloc_pin_restore(flags); 3763 unpin_folios(folios, nr_folios); 3764 3765 return ret; 3766 } 3767 EXPORT_SYMBOL_GPL(memfd_pin_folios); 3768 3769 /** 3770 * folio_add_pins() - add pins to an already-pinned folio 3771 * @folio: the folio to add more pins to 3772 * @pins: number of pins to add 3773 * 3774 * Try to add more pins to an already-pinned folio. The semantics 3775 * of the pin (e.g., FOLL_WRITE) follow any existing pin and cannot 3776 * be changed. 3777 * 3778 * This function is helpful when having obtained a pin on a large folio 3779 * using memfd_pin_folios(), but wanting to logically unpin parts 3780 * (e.g., individual pages) of the folio later, for example, using 3781 * unpin_user_page_range_dirty_lock(). 3782 * 3783 * This is not the right interface to initially pin a folio. 3784 */ 3785 int folio_add_pins(struct folio *folio, unsigned int pins) 3786 { 3787 VM_WARN_ON_ONCE(!folio_maybe_dma_pinned(folio)); 3788 3789 return try_grab_folio(folio, pins, FOLL_PIN); 3790 } 3791 EXPORT_SYMBOL_GPL(folio_add_pins); 3792