1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/err.h> 5 #include <linux/spinlock.h> 6 7 #include <linux/mm.h> 8 #include <linux/memfd.h> 9 #include <linux/memremap.h> 10 #include <linux/pagemap.h> 11 #include <linux/rmap.h> 12 #include <linux/swap.h> 13 #include <linux/swapops.h> 14 #include <linux/secretmem.h> 15 16 #include <linux/sched/signal.h> 17 #include <linux/rwsem.h> 18 #include <linux/hugetlb.h> 19 #include <linux/migrate.h> 20 #include <linux/mm_inline.h> 21 #include <linux/pagevec.h> 22 #include <linux/sched/mm.h> 23 #include <linux/shmem_fs.h> 24 25 #include <asm/mmu_context.h> 26 #include <asm/tlbflush.h> 27 28 #include "internal.h" 29 #include "swap.h" 30 31 struct follow_page_context { 32 struct dev_pagemap *pgmap; 33 unsigned int page_mask; 34 }; 35 36 static inline void sanity_check_pinned_pages(struct page **pages, 37 unsigned long npages) 38 { 39 if (!IS_ENABLED(CONFIG_DEBUG_VM)) 40 return; 41 42 /* 43 * We only pin anonymous pages if they are exclusive. Once pinned, we 44 * can no longer turn them possibly shared and PageAnonExclusive() will 45 * stick around until the page is freed. 46 * 47 * We'd like to verify that our pinned anonymous pages are still mapped 48 * exclusively. The issue with anon THP is that we don't know how 49 * they are/were mapped when pinning them. However, for anon 50 * THP we can assume that either the given page (PTE-mapped THP) or 51 * the head page (PMD-mapped THP) should be PageAnonExclusive(). If 52 * neither is the case, there is certainly something wrong. 53 */ 54 for (; npages; npages--, pages++) { 55 struct page *page = *pages; 56 struct folio *folio; 57 58 if (!page) 59 continue; 60 61 folio = page_folio(page); 62 63 if (is_zero_page(page) || 64 !folio_test_anon(folio)) 65 continue; 66 if (!folio_test_large(folio) || folio_test_hugetlb(folio)) 67 VM_WARN_ON_ONCE_FOLIO(!PageAnonExclusive(&folio->page), folio); 68 else 69 /* Either a PTE-mapped or a PMD-mapped THP. */ 70 VM_WARN_ON_ONCE_PAGE(!PageAnonExclusive(&folio->page) && 71 !PageAnonExclusive(page), page); 72 } 73 } 74 75 /* 76 * Return the folio with ref appropriately incremented, 77 * or NULL if that failed. 78 */ 79 static inline struct folio *try_get_folio(struct page *page, int refs) 80 { 81 struct folio *folio; 82 83 retry: 84 folio = page_folio(page); 85 if (WARN_ON_ONCE(folio_ref_count(folio) < 0)) 86 return NULL; 87 if (unlikely(!folio_ref_try_add(folio, refs))) 88 return NULL; 89 90 /* 91 * At this point we have a stable reference to the folio; but it 92 * could be that between calling page_folio() and the refcount 93 * increment, the folio was split, in which case we'd end up 94 * holding a reference on a folio that has nothing to do with the page 95 * we were given anymore. 96 * So now that the folio is stable, recheck that the page still 97 * belongs to this folio. 98 */ 99 if (unlikely(page_folio(page) != folio)) { 100 folio_put_refs(folio, refs); 101 goto retry; 102 } 103 104 return folio; 105 } 106 107 static void gup_put_folio(struct folio *folio, int refs, unsigned int flags) 108 { 109 if (flags & FOLL_PIN) { 110 if (is_zero_folio(folio)) 111 return; 112 node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs); 113 if (folio_has_pincount(folio)) 114 atomic_sub(refs, &folio->_pincount); 115 else 116 refs *= GUP_PIN_COUNTING_BIAS; 117 } 118 119 folio_put_refs(folio, refs); 120 } 121 122 /** 123 * try_grab_folio() - add a folio's refcount by a flag-dependent amount 124 * @folio: pointer to folio to be grabbed 125 * @refs: the value to (effectively) add to the folio's refcount 126 * @flags: gup flags: these are the FOLL_* flag values 127 * 128 * This might not do anything at all, depending on the flags argument. 129 * 130 * "grab" names in this file mean, "look at flags to decide whether to use 131 * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount. 132 * 133 * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same 134 * time. 135 * 136 * Return: 0 for success, or if no action was required (if neither FOLL_PIN 137 * nor FOLL_GET was set, nothing is done). A negative error code for failure: 138 * 139 * -ENOMEM FOLL_GET or FOLL_PIN was set, but the folio could not 140 * be grabbed. 141 * 142 * It is called when we have a stable reference for the folio, typically in 143 * GUP slow path. 144 */ 145 int __must_check try_grab_folio(struct folio *folio, int refs, 146 unsigned int flags) 147 { 148 if (WARN_ON_ONCE(folio_ref_count(folio) <= 0)) 149 return -ENOMEM; 150 151 if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(&folio->page))) 152 return -EREMOTEIO; 153 154 if (flags & FOLL_GET) 155 folio_ref_add(folio, refs); 156 else if (flags & FOLL_PIN) { 157 /* 158 * Don't take a pin on the zero page - it's not going anywhere 159 * and it is used in a *lot* of places. 160 */ 161 if (is_zero_folio(folio)) 162 return 0; 163 164 /* 165 * Increment the normal page refcount field at least once, 166 * so that the page really is pinned. 167 */ 168 if (folio_has_pincount(folio)) { 169 folio_ref_add(folio, refs); 170 atomic_add(refs, &folio->_pincount); 171 } else { 172 folio_ref_add(folio, refs * GUP_PIN_COUNTING_BIAS); 173 } 174 175 node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs); 176 } 177 178 return 0; 179 } 180 181 /** 182 * unpin_user_page() - release a dma-pinned page 183 * @page: pointer to page to be released 184 * 185 * Pages that were pinned via pin_user_pages*() must be released via either 186 * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so 187 * that such pages can be separately tracked and uniquely handled. In 188 * particular, interactions with RDMA and filesystems need special handling. 189 */ 190 void unpin_user_page(struct page *page) 191 { 192 sanity_check_pinned_pages(&page, 1); 193 gup_put_folio(page_folio(page), 1, FOLL_PIN); 194 } 195 EXPORT_SYMBOL(unpin_user_page); 196 197 /** 198 * unpin_folio() - release a dma-pinned folio 199 * @folio: pointer to folio to be released 200 * 201 * Folios that were pinned via memfd_pin_folios() or other similar routines 202 * must be released either using unpin_folio() or unpin_folios(). 203 */ 204 void unpin_folio(struct folio *folio) 205 { 206 gup_put_folio(folio, 1, FOLL_PIN); 207 } 208 EXPORT_SYMBOL_GPL(unpin_folio); 209 210 /** 211 * folio_add_pin - Try to get an additional pin on a pinned folio 212 * @folio: The folio to be pinned 213 * 214 * Get an additional pin on a folio we already have a pin on. Makes no change 215 * if the folio is a zero_page. 216 */ 217 void folio_add_pin(struct folio *folio) 218 { 219 if (is_zero_folio(folio)) 220 return; 221 222 /* 223 * Similar to try_grab_folio(): be sure to *also* increment the normal 224 * page refcount field at least once, so that the page really is 225 * pinned. 226 */ 227 if (folio_has_pincount(folio)) { 228 WARN_ON_ONCE(atomic_read(&folio->_pincount) < 1); 229 folio_ref_inc(folio); 230 atomic_inc(&folio->_pincount); 231 } else { 232 WARN_ON_ONCE(folio_ref_count(folio) < GUP_PIN_COUNTING_BIAS); 233 folio_ref_add(folio, GUP_PIN_COUNTING_BIAS); 234 } 235 } 236 237 static inline struct folio *gup_folio_range_next(struct page *start, 238 unsigned long npages, unsigned long i, unsigned int *ntails) 239 { 240 struct page *next = nth_page(start, i); 241 struct folio *folio = page_folio(next); 242 unsigned int nr = 1; 243 244 if (folio_test_large(folio)) 245 nr = min_t(unsigned int, npages - i, 246 folio_nr_pages(folio) - folio_page_idx(folio, next)); 247 248 *ntails = nr; 249 return folio; 250 } 251 252 static inline struct folio *gup_folio_next(struct page **list, 253 unsigned long npages, unsigned long i, unsigned int *ntails) 254 { 255 struct folio *folio = page_folio(list[i]); 256 unsigned int nr; 257 258 for (nr = i + 1; nr < npages; nr++) { 259 if (page_folio(list[nr]) != folio) 260 break; 261 } 262 263 *ntails = nr - i; 264 return folio; 265 } 266 267 /** 268 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages 269 * @pages: array of pages to be maybe marked dirty, and definitely released. 270 * @npages: number of pages in the @pages array. 271 * @make_dirty: whether to mark the pages dirty 272 * 273 * "gup-pinned page" refers to a page that has had one of the get_user_pages() 274 * variants called on that page. 275 * 276 * For each page in the @pages array, make that page (or its head page, if a 277 * compound page) dirty, if @make_dirty is true, and if the page was previously 278 * listed as clean. In any case, releases all pages using unpin_user_page(), 279 * possibly via unpin_user_pages(), for the non-dirty case. 280 * 281 * Please see the unpin_user_page() documentation for details. 282 * 283 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is 284 * required, then the caller should a) verify that this is really correct, 285 * because _lock() is usually required, and b) hand code it: 286 * set_page_dirty_lock(), unpin_user_page(). 287 * 288 */ 289 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, 290 bool make_dirty) 291 { 292 unsigned long i; 293 struct folio *folio; 294 unsigned int nr; 295 296 if (!make_dirty) { 297 unpin_user_pages(pages, npages); 298 return; 299 } 300 301 sanity_check_pinned_pages(pages, npages); 302 for (i = 0; i < npages; i += nr) { 303 folio = gup_folio_next(pages, npages, i, &nr); 304 /* 305 * Checking PageDirty at this point may race with 306 * clear_page_dirty_for_io(), but that's OK. Two key 307 * cases: 308 * 309 * 1) This code sees the page as already dirty, so it 310 * skips the call to set_page_dirty(). That could happen 311 * because clear_page_dirty_for_io() called 312 * folio_mkclean(), followed by set_page_dirty(). 313 * However, now the page is going to get written back, 314 * which meets the original intention of setting it 315 * dirty, so all is well: clear_page_dirty_for_io() goes 316 * on to call TestClearPageDirty(), and write the page 317 * back. 318 * 319 * 2) This code sees the page as clean, so it calls 320 * set_page_dirty(). The page stays dirty, despite being 321 * written back, so it gets written back again in the 322 * next writeback cycle. This is harmless. 323 */ 324 if (!folio_test_dirty(folio)) { 325 folio_lock(folio); 326 folio_mark_dirty(folio); 327 folio_unlock(folio); 328 } 329 gup_put_folio(folio, nr, FOLL_PIN); 330 } 331 } 332 EXPORT_SYMBOL(unpin_user_pages_dirty_lock); 333 334 /** 335 * unpin_user_page_range_dirty_lock() - release and optionally dirty 336 * gup-pinned page range 337 * 338 * @page: the starting page of a range maybe marked dirty, and definitely released. 339 * @npages: number of consecutive pages to release. 340 * @make_dirty: whether to mark the pages dirty 341 * 342 * "gup-pinned page range" refers to a range of pages that has had one of the 343 * pin_user_pages() variants called on that page. 344 * 345 * For the page ranges defined by [page .. page+npages], make that range (or 346 * its head pages, if a compound page) dirty, if @make_dirty is true, and if the 347 * page range was previously listed as clean. 348 * 349 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is 350 * required, then the caller should a) verify that this is really correct, 351 * because _lock() is usually required, and b) hand code it: 352 * set_page_dirty_lock(), unpin_user_page(). 353 * 354 */ 355 void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, 356 bool make_dirty) 357 { 358 unsigned long i; 359 struct folio *folio; 360 unsigned int nr; 361 362 for (i = 0; i < npages; i += nr) { 363 folio = gup_folio_range_next(page, npages, i, &nr); 364 if (make_dirty && !folio_test_dirty(folio)) { 365 folio_lock(folio); 366 folio_mark_dirty(folio); 367 folio_unlock(folio); 368 } 369 gup_put_folio(folio, nr, FOLL_PIN); 370 } 371 } 372 EXPORT_SYMBOL(unpin_user_page_range_dirty_lock); 373 374 static void gup_fast_unpin_user_pages(struct page **pages, unsigned long npages) 375 { 376 unsigned long i; 377 struct folio *folio; 378 unsigned int nr; 379 380 /* 381 * Don't perform any sanity checks because we might have raced with 382 * fork() and some anonymous pages might now actually be shared -- 383 * which is why we're unpinning after all. 384 */ 385 for (i = 0; i < npages; i += nr) { 386 folio = gup_folio_next(pages, npages, i, &nr); 387 gup_put_folio(folio, nr, FOLL_PIN); 388 } 389 } 390 391 /** 392 * unpin_user_pages() - release an array of gup-pinned pages. 393 * @pages: array of pages to be marked dirty and released. 394 * @npages: number of pages in the @pages array. 395 * 396 * For each page in the @pages array, release the page using unpin_user_page(). 397 * 398 * Please see the unpin_user_page() documentation for details. 399 */ 400 void unpin_user_pages(struct page **pages, unsigned long npages) 401 { 402 unsigned long i; 403 struct folio *folio; 404 unsigned int nr; 405 406 /* 407 * If this WARN_ON() fires, then the system *might* be leaking pages (by 408 * leaving them pinned), but probably not. More likely, gup/pup returned 409 * a hard -ERRNO error to the caller, who erroneously passed it here. 410 */ 411 if (WARN_ON(IS_ERR_VALUE(npages))) 412 return; 413 414 sanity_check_pinned_pages(pages, npages); 415 for (i = 0; i < npages; i += nr) { 416 if (!pages[i]) { 417 nr = 1; 418 continue; 419 } 420 folio = gup_folio_next(pages, npages, i, &nr); 421 gup_put_folio(folio, nr, FOLL_PIN); 422 } 423 } 424 EXPORT_SYMBOL(unpin_user_pages); 425 426 /** 427 * unpin_user_folio() - release pages of a folio 428 * @folio: pointer to folio to be released 429 * @npages: number of pages of same folio 430 * 431 * Release npages of the folio 432 */ 433 void unpin_user_folio(struct folio *folio, unsigned long npages) 434 { 435 gup_put_folio(folio, npages, FOLL_PIN); 436 } 437 EXPORT_SYMBOL(unpin_user_folio); 438 439 /** 440 * unpin_folios() - release an array of gup-pinned folios. 441 * @folios: array of folios to be marked dirty and released. 442 * @nfolios: number of folios in the @folios array. 443 * 444 * For each folio in the @folios array, release the folio using gup_put_folio. 445 * 446 * Please see the unpin_folio() documentation for details. 447 */ 448 void unpin_folios(struct folio **folios, unsigned long nfolios) 449 { 450 unsigned long i = 0, j; 451 452 /* 453 * If this WARN_ON() fires, then the system *might* be leaking folios 454 * (by leaving them pinned), but probably not. More likely, gup/pup 455 * returned a hard -ERRNO error to the caller, who erroneously passed 456 * it here. 457 */ 458 if (WARN_ON(IS_ERR_VALUE(nfolios))) 459 return; 460 461 while (i < nfolios) { 462 for (j = i + 1; j < nfolios; j++) 463 if (folios[i] != folios[j]) 464 break; 465 466 if (folios[i]) 467 gup_put_folio(folios[i], j - i, FOLL_PIN); 468 i = j; 469 } 470 } 471 EXPORT_SYMBOL_GPL(unpin_folios); 472 473 /* 474 * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's 475 * lifecycle. Avoid setting the bit unless necessary, or it might cause write 476 * cache bouncing on large SMP machines for concurrent pinned gups. 477 */ 478 static inline void mm_set_has_pinned_flag(unsigned long *mm_flags) 479 { 480 if (!test_bit(MMF_HAS_PINNED, mm_flags)) 481 set_bit(MMF_HAS_PINNED, mm_flags); 482 } 483 484 #ifdef CONFIG_MMU 485 486 #ifdef CONFIG_HAVE_GUP_FAST 487 static int record_subpages(struct page *page, unsigned long sz, 488 unsigned long addr, unsigned long end, 489 struct page **pages) 490 { 491 struct page *start_page; 492 int nr; 493 494 start_page = nth_page(page, (addr & (sz - 1)) >> PAGE_SHIFT); 495 for (nr = 0; addr != end; nr++, addr += PAGE_SIZE) 496 pages[nr] = nth_page(start_page, nr); 497 498 return nr; 499 } 500 501 /** 502 * try_grab_folio_fast() - Attempt to get or pin a folio in fast path. 503 * @page: pointer to page to be grabbed 504 * @refs: the value to (effectively) add to the folio's refcount 505 * @flags: gup flags: these are the FOLL_* flag values. 506 * 507 * "grab" names in this file mean, "look at flags to decide whether to use 508 * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount. 509 * 510 * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the 511 * same time. (That's true throughout the get_user_pages*() and 512 * pin_user_pages*() APIs.) Cases: 513 * 514 * FOLL_GET: folio's refcount will be incremented by @refs. 515 * 516 * FOLL_PIN on large folios: folio's refcount will be incremented by 517 * @refs, and its pincount will be incremented by @refs. 518 * 519 * FOLL_PIN on single-page folios: folio's refcount will be incremented by 520 * @refs * GUP_PIN_COUNTING_BIAS. 521 * 522 * Return: The folio containing @page (with refcount appropriately 523 * incremented) for success, or NULL upon failure. If neither FOLL_GET 524 * nor FOLL_PIN was set, that's considered failure, and furthermore, 525 * a likely bug in the caller, so a warning is also emitted. 526 * 527 * It uses add ref unless zero to elevate the folio refcount and must be called 528 * in fast path only. 529 */ 530 static struct folio *try_grab_folio_fast(struct page *page, int refs, 531 unsigned int flags) 532 { 533 struct folio *folio; 534 535 /* Raise warn if it is not called in fast GUP */ 536 VM_WARN_ON_ONCE(!irqs_disabled()); 537 538 if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0)) 539 return NULL; 540 541 if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page))) 542 return NULL; 543 544 if (flags & FOLL_GET) 545 return try_get_folio(page, refs); 546 547 /* FOLL_PIN is set */ 548 549 /* 550 * Don't take a pin on the zero page - it's not going anywhere 551 * and it is used in a *lot* of places. 552 */ 553 if (is_zero_page(page)) 554 return page_folio(page); 555 556 folio = try_get_folio(page, refs); 557 if (!folio) 558 return NULL; 559 560 /* 561 * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a 562 * right zone, so fail and let the caller fall back to the slow 563 * path. 564 */ 565 if (unlikely((flags & FOLL_LONGTERM) && 566 !folio_is_longterm_pinnable(folio))) { 567 folio_put_refs(folio, refs); 568 return NULL; 569 } 570 571 /* 572 * When pinning a large folio, use an exact count to track it. 573 * 574 * However, be sure to *also* increment the normal folio 575 * refcount field at least once, so that the folio really 576 * is pinned. That's why the refcount from the earlier 577 * try_get_folio() is left intact. 578 */ 579 if (folio_has_pincount(folio)) 580 atomic_add(refs, &folio->_pincount); 581 else 582 folio_ref_add(folio, 583 refs * (GUP_PIN_COUNTING_BIAS - 1)); 584 /* 585 * Adjust the pincount before re-checking the PTE for changes. 586 * This is essentially a smp_mb() and is paired with a memory 587 * barrier in folio_try_share_anon_rmap_*(). 588 */ 589 smp_mb__after_atomic(); 590 591 node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs); 592 593 return folio; 594 } 595 #endif /* CONFIG_HAVE_GUP_FAST */ 596 597 /* Common code for can_follow_write_* */ 598 static inline bool can_follow_write_common(struct page *page, 599 struct vm_area_struct *vma, unsigned int flags) 600 { 601 /* Maybe FOLL_FORCE is set to override it? */ 602 if (!(flags & FOLL_FORCE)) 603 return false; 604 605 /* But FOLL_FORCE has no effect on shared mappings */ 606 if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) 607 return false; 608 609 /* ... or read-only private ones */ 610 if (!(vma->vm_flags & VM_MAYWRITE)) 611 return false; 612 613 /* ... or already writable ones that just need to take a write fault */ 614 if (vma->vm_flags & VM_WRITE) 615 return false; 616 617 /* 618 * See can_change_pte_writable(): we broke COW and could map the page 619 * writable if we have an exclusive anonymous page ... 620 */ 621 return page && PageAnon(page) && PageAnonExclusive(page); 622 } 623 624 static struct page *no_page_table(struct vm_area_struct *vma, 625 unsigned int flags, unsigned long address) 626 { 627 if (!(flags & FOLL_DUMP)) 628 return NULL; 629 630 /* 631 * When core dumping, we don't want to allocate unnecessary pages or 632 * page tables. Return error instead of NULL to skip handle_mm_fault, 633 * then get_dump_page() will return NULL to leave a hole in the dump. 634 * But we can only make this optimization where a hole would surely 635 * be zero-filled if handle_mm_fault() actually did handle it. 636 */ 637 if (is_vm_hugetlb_page(vma)) { 638 struct hstate *h = hstate_vma(vma); 639 640 if (!hugetlbfs_pagecache_present(h, vma, address)) 641 return ERR_PTR(-EFAULT); 642 } else if ((vma_is_anonymous(vma) || !vma->vm_ops->fault)) { 643 return ERR_PTR(-EFAULT); 644 } 645 646 return NULL; 647 } 648 649 #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES 650 /* FOLL_FORCE can write to even unwritable PUDs in COW mappings. */ 651 static inline bool can_follow_write_pud(pud_t pud, struct page *page, 652 struct vm_area_struct *vma, 653 unsigned int flags) 654 { 655 /* If the pud is writable, we can write to the page. */ 656 if (pud_write(pud)) 657 return true; 658 659 return can_follow_write_common(page, vma, flags); 660 } 661 662 static struct page *follow_huge_pud(struct vm_area_struct *vma, 663 unsigned long addr, pud_t *pudp, 664 int flags, struct follow_page_context *ctx) 665 { 666 struct mm_struct *mm = vma->vm_mm; 667 struct page *page; 668 pud_t pud = *pudp; 669 unsigned long pfn = pud_pfn(pud); 670 int ret; 671 672 assert_spin_locked(pud_lockptr(mm, pudp)); 673 674 if (!pud_present(pud)) 675 return NULL; 676 677 if ((flags & FOLL_WRITE) && 678 !can_follow_write_pud(pud, pfn_to_page(pfn), vma, flags)) 679 return NULL; 680 681 pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT; 682 page = pfn_to_page(pfn); 683 684 if (!pud_write(pud) && gup_must_unshare(vma, flags, page)) 685 return ERR_PTR(-EMLINK); 686 687 ret = try_grab_folio(page_folio(page), 1, flags); 688 if (ret) 689 page = ERR_PTR(ret); 690 else 691 ctx->page_mask = HPAGE_PUD_NR - 1; 692 693 return page; 694 } 695 696 /* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */ 697 static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page, 698 struct vm_area_struct *vma, 699 unsigned int flags) 700 { 701 /* If the pmd is writable, we can write to the page. */ 702 if (pmd_write(pmd)) 703 return true; 704 705 if (!can_follow_write_common(page, vma, flags)) 706 return false; 707 708 /* ... and a write-fault isn't required for other reasons. */ 709 if (pmd_needs_soft_dirty_wp(vma, pmd)) 710 return false; 711 return !userfaultfd_huge_pmd_wp(vma, pmd); 712 } 713 714 static struct page *follow_huge_pmd(struct vm_area_struct *vma, 715 unsigned long addr, pmd_t *pmd, 716 unsigned int flags, 717 struct follow_page_context *ctx) 718 { 719 struct mm_struct *mm = vma->vm_mm; 720 pmd_t pmdval = *pmd; 721 struct page *page; 722 int ret; 723 724 assert_spin_locked(pmd_lockptr(mm, pmd)); 725 726 page = pmd_page(pmdval); 727 if ((flags & FOLL_WRITE) && 728 !can_follow_write_pmd(pmdval, page, vma, flags)) 729 return NULL; 730 731 /* Avoid dumping huge zero page */ 732 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(pmdval)) 733 return ERR_PTR(-EFAULT); 734 735 if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags)) 736 return NULL; 737 738 if (!pmd_write(pmdval) && gup_must_unshare(vma, flags, page)) 739 return ERR_PTR(-EMLINK); 740 741 VM_WARN_ON_ONCE_PAGE((flags & FOLL_PIN) && PageAnon(page) && 742 !PageAnonExclusive(page), page); 743 744 ret = try_grab_folio(page_folio(page), 1, flags); 745 if (ret) 746 return ERR_PTR(ret); 747 748 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 749 if (pmd_trans_huge(pmdval) && (flags & FOLL_TOUCH)) 750 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE); 751 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 752 753 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; 754 ctx->page_mask = HPAGE_PMD_NR - 1; 755 756 return page; 757 } 758 759 #else /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */ 760 static struct page *follow_huge_pud(struct vm_area_struct *vma, 761 unsigned long addr, pud_t *pudp, 762 int flags, struct follow_page_context *ctx) 763 { 764 return NULL; 765 } 766 767 static struct page *follow_huge_pmd(struct vm_area_struct *vma, 768 unsigned long addr, pmd_t *pmd, 769 unsigned int flags, 770 struct follow_page_context *ctx) 771 { 772 return NULL; 773 } 774 #endif /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */ 775 776 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, 777 pte_t *pte, unsigned int flags) 778 { 779 if (flags & FOLL_TOUCH) { 780 pte_t orig_entry = ptep_get(pte); 781 pte_t entry = orig_entry; 782 783 if (flags & FOLL_WRITE) 784 entry = pte_mkdirty(entry); 785 entry = pte_mkyoung(entry); 786 787 if (!pte_same(orig_entry, entry)) { 788 set_pte_at(vma->vm_mm, address, pte, entry); 789 update_mmu_cache(vma, address, pte); 790 } 791 } 792 793 /* Proper page table entry exists, but no corresponding struct page */ 794 return -EEXIST; 795 } 796 797 /* FOLL_FORCE can write to even unwritable PTEs in COW mappings. */ 798 static inline bool can_follow_write_pte(pte_t pte, struct page *page, 799 struct vm_area_struct *vma, 800 unsigned int flags) 801 { 802 /* If the pte is writable, we can write to the page. */ 803 if (pte_write(pte)) 804 return true; 805 806 if (!can_follow_write_common(page, vma, flags)) 807 return false; 808 809 /* ... and a write-fault isn't required for other reasons. */ 810 if (pte_needs_soft_dirty_wp(vma, pte)) 811 return false; 812 return !userfaultfd_pte_wp(vma, pte); 813 } 814 815 static struct page *follow_page_pte(struct vm_area_struct *vma, 816 unsigned long address, pmd_t *pmd, unsigned int flags, 817 struct dev_pagemap **pgmap) 818 { 819 struct mm_struct *mm = vma->vm_mm; 820 struct folio *folio; 821 struct page *page; 822 spinlock_t *ptl; 823 pte_t *ptep, pte; 824 int ret; 825 826 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 827 if (!ptep) 828 return no_page_table(vma, flags, address); 829 pte = ptep_get(ptep); 830 if (!pte_present(pte)) 831 goto no_page; 832 if (pte_protnone(pte) && !gup_can_follow_protnone(vma, flags)) 833 goto no_page; 834 835 page = vm_normal_page(vma, address, pte); 836 837 /* 838 * We only care about anon pages in can_follow_write_pte(). 839 */ 840 if ((flags & FOLL_WRITE) && 841 !can_follow_write_pte(pte, page, vma, flags)) { 842 page = NULL; 843 goto out; 844 } 845 846 if (unlikely(!page)) { 847 if (flags & FOLL_DUMP) { 848 /* Avoid special (like zero) pages in core dumps */ 849 page = ERR_PTR(-EFAULT); 850 goto out; 851 } 852 853 if (is_zero_pfn(pte_pfn(pte))) { 854 page = pte_page(pte); 855 } else { 856 ret = follow_pfn_pte(vma, address, ptep, flags); 857 page = ERR_PTR(ret); 858 goto out; 859 } 860 } 861 folio = page_folio(page); 862 863 if (!pte_write(pte) && gup_must_unshare(vma, flags, page)) { 864 page = ERR_PTR(-EMLINK); 865 goto out; 866 } 867 868 VM_WARN_ON_ONCE_PAGE((flags & FOLL_PIN) && PageAnon(page) && 869 !PageAnonExclusive(page), page); 870 871 /* try_grab_folio() does nothing unless FOLL_GET or FOLL_PIN is set. */ 872 ret = try_grab_folio(folio, 1, flags); 873 if (unlikely(ret)) { 874 page = ERR_PTR(ret); 875 goto out; 876 } 877 878 /* 879 * We need to make the page accessible if and only if we are going 880 * to access its content (the FOLL_PIN case). Please see 881 * Documentation/core-api/pin_user_pages.rst for details. 882 */ 883 if (flags & FOLL_PIN) { 884 ret = arch_make_folio_accessible(folio); 885 if (ret) { 886 unpin_user_page(page); 887 page = ERR_PTR(ret); 888 goto out; 889 } 890 } 891 if (flags & FOLL_TOUCH) { 892 if ((flags & FOLL_WRITE) && 893 !pte_dirty(pte) && !folio_test_dirty(folio)) 894 folio_mark_dirty(folio); 895 /* 896 * pte_mkyoung() would be more correct here, but atomic care 897 * is needed to avoid losing the dirty bit: it is easier to use 898 * folio_mark_accessed(). 899 */ 900 folio_mark_accessed(folio); 901 } 902 out: 903 pte_unmap_unlock(ptep, ptl); 904 return page; 905 no_page: 906 pte_unmap_unlock(ptep, ptl); 907 if (!pte_none(pte)) 908 return NULL; 909 return no_page_table(vma, flags, address); 910 } 911 912 static struct page *follow_pmd_mask(struct vm_area_struct *vma, 913 unsigned long address, pud_t *pudp, 914 unsigned int flags, 915 struct follow_page_context *ctx) 916 { 917 pmd_t *pmd, pmdval; 918 spinlock_t *ptl; 919 struct page *page; 920 struct mm_struct *mm = vma->vm_mm; 921 922 pmd = pmd_offset(pudp, address); 923 pmdval = pmdp_get_lockless(pmd); 924 if (pmd_none(pmdval)) 925 return no_page_table(vma, flags, address); 926 if (!pmd_present(pmdval)) 927 return no_page_table(vma, flags, address); 928 if (likely(!pmd_leaf(pmdval))) 929 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); 930 931 if (pmd_protnone(pmdval) && !gup_can_follow_protnone(vma, flags)) 932 return no_page_table(vma, flags, address); 933 934 ptl = pmd_lock(mm, pmd); 935 pmdval = *pmd; 936 if (unlikely(!pmd_present(pmdval))) { 937 spin_unlock(ptl); 938 return no_page_table(vma, flags, address); 939 } 940 if (unlikely(!pmd_leaf(pmdval))) { 941 spin_unlock(ptl); 942 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); 943 } 944 if (pmd_trans_huge(pmdval) && (flags & FOLL_SPLIT_PMD)) { 945 spin_unlock(ptl); 946 split_huge_pmd(vma, pmd, address); 947 /* If pmd was left empty, stuff a page table in there quickly */ 948 return pte_alloc(mm, pmd) ? ERR_PTR(-ENOMEM) : 949 follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); 950 } 951 page = follow_huge_pmd(vma, address, pmd, flags, ctx); 952 spin_unlock(ptl); 953 return page; 954 } 955 956 static struct page *follow_pud_mask(struct vm_area_struct *vma, 957 unsigned long address, p4d_t *p4dp, 958 unsigned int flags, 959 struct follow_page_context *ctx) 960 { 961 pud_t *pudp, pud; 962 spinlock_t *ptl; 963 struct page *page; 964 struct mm_struct *mm = vma->vm_mm; 965 966 pudp = pud_offset(p4dp, address); 967 pud = READ_ONCE(*pudp); 968 if (!pud_present(pud)) 969 return no_page_table(vma, flags, address); 970 if (pud_leaf(pud)) { 971 ptl = pud_lock(mm, pudp); 972 page = follow_huge_pud(vma, address, pudp, flags, ctx); 973 spin_unlock(ptl); 974 if (page) 975 return page; 976 return no_page_table(vma, flags, address); 977 } 978 if (unlikely(pud_bad(pud))) 979 return no_page_table(vma, flags, address); 980 981 return follow_pmd_mask(vma, address, pudp, flags, ctx); 982 } 983 984 static struct page *follow_p4d_mask(struct vm_area_struct *vma, 985 unsigned long address, pgd_t *pgdp, 986 unsigned int flags, 987 struct follow_page_context *ctx) 988 { 989 p4d_t *p4dp, p4d; 990 991 p4dp = p4d_offset(pgdp, address); 992 p4d = READ_ONCE(*p4dp); 993 BUILD_BUG_ON(p4d_leaf(p4d)); 994 995 if (!p4d_present(p4d) || p4d_bad(p4d)) 996 return no_page_table(vma, flags, address); 997 998 return follow_pud_mask(vma, address, p4dp, flags, ctx); 999 } 1000 1001 /** 1002 * follow_page_mask - look up a page descriptor from a user-virtual address 1003 * @vma: vm_area_struct mapping @address 1004 * @address: virtual address to look up 1005 * @flags: flags modifying lookup behaviour 1006 * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a 1007 * pointer to output page_mask 1008 * 1009 * @flags can have FOLL_ flags set, defined in <linux/mm.h> 1010 * 1011 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches 1012 * the device's dev_pagemap metadata to avoid repeating expensive lookups. 1013 * 1014 * When getting an anonymous page and the caller has to trigger unsharing 1015 * of a shared anonymous page first, -EMLINK is returned. The caller should 1016 * trigger a fault with FAULT_FLAG_UNSHARE set. Note that unsharing is only 1017 * relevant with FOLL_PIN and !FOLL_WRITE. 1018 * 1019 * On output, the @ctx->page_mask is set according to the size of the page. 1020 * 1021 * Return: the mapped (struct page *), %NULL if no mapping exists, or 1022 * an error pointer if there is a mapping to something not represented 1023 * by a page descriptor (see also vm_normal_page()). 1024 */ 1025 static struct page *follow_page_mask(struct vm_area_struct *vma, 1026 unsigned long address, unsigned int flags, 1027 struct follow_page_context *ctx) 1028 { 1029 pgd_t *pgd; 1030 struct mm_struct *mm = vma->vm_mm; 1031 struct page *page; 1032 1033 vma_pgtable_walk_begin(vma); 1034 1035 ctx->page_mask = 0; 1036 pgd = pgd_offset(mm, address); 1037 1038 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 1039 page = no_page_table(vma, flags, address); 1040 else 1041 page = follow_p4d_mask(vma, address, pgd, flags, ctx); 1042 1043 vma_pgtable_walk_end(vma); 1044 1045 return page; 1046 } 1047 1048 static int get_gate_page(struct mm_struct *mm, unsigned long address, 1049 unsigned int gup_flags, struct vm_area_struct **vma, 1050 struct page **page) 1051 { 1052 pgd_t *pgd; 1053 p4d_t *p4d; 1054 pud_t *pud; 1055 pmd_t *pmd; 1056 pte_t *pte; 1057 pte_t entry; 1058 int ret = -EFAULT; 1059 1060 /* user gate pages are read-only */ 1061 if (gup_flags & FOLL_WRITE) 1062 return -EFAULT; 1063 pgd = pgd_offset(mm, address); 1064 if (pgd_none(*pgd)) 1065 return -EFAULT; 1066 p4d = p4d_offset(pgd, address); 1067 if (p4d_none(*p4d)) 1068 return -EFAULT; 1069 pud = pud_offset(p4d, address); 1070 if (pud_none(*pud)) 1071 return -EFAULT; 1072 pmd = pmd_offset(pud, address); 1073 if (!pmd_present(*pmd)) 1074 return -EFAULT; 1075 pte = pte_offset_map(pmd, address); 1076 if (!pte) 1077 return -EFAULT; 1078 entry = ptep_get(pte); 1079 if (pte_none(entry)) 1080 goto unmap; 1081 *vma = get_gate_vma(mm); 1082 if (!page) 1083 goto out; 1084 *page = vm_normal_page(*vma, address, entry); 1085 if (!*page) { 1086 if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(entry))) 1087 goto unmap; 1088 *page = pte_page(entry); 1089 } 1090 ret = try_grab_folio(page_folio(*page), 1, gup_flags); 1091 if (unlikely(ret)) 1092 goto unmap; 1093 out: 1094 ret = 0; 1095 unmap: 1096 pte_unmap(pte); 1097 return ret; 1098 } 1099 1100 /* 1101 * mmap_lock must be held on entry. If @flags has FOLL_UNLOCKABLE but not 1102 * FOLL_NOWAIT, the mmap_lock may be released. If it is, *@locked will be set 1103 * to 0 and -EBUSY returned. 1104 */ 1105 static int faultin_page(struct vm_area_struct *vma, 1106 unsigned long address, unsigned int flags, bool unshare, 1107 int *locked) 1108 { 1109 unsigned int fault_flags = 0; 1110 vm_fault_t ret; 1111 1112 if (flags & FOLL_NOFAULT) 1113 return -EFAULT; 1114 if (flags & FOLL_WRITE) 1115 fault_flags |= FAULT_FLAG_WRITE; 1116 if (flags & FOLL_REMOTE) 1117 fault_flags |= FAULT_FLAG_REMOTE; 1118 if (flags & FOLL_UNLOCKABLE) { 1119 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 1120 /* 1121 * FAULT_FLAG_INTERRUPTIBLE is opt-in. GUP callers must set 1122 * FOLL_INTERRUPTIBLE to enable FAULT_FLAG_INTERRUPTIBLE. 1123 * That's because some callers may not be prepared to 1124 * handle early exits caused by non-fatal signals. 1125 */ 1126 if (flags & FOLL_INTERRUPTIBLE) 1127 fault_flags |= FAULT_FLAG_INTERRUPTIBLE; 1128 } 1129 if (flags & FOLL_NOWAIT) 1130 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; 1131 if (flags & FOLL_TRIED) { 1132 /* 1133 * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED 1134 * can co-exist 1135 */ 1136 fault_flags |= FAULT_FLAG_TRIED; 1137 } 1138 if (unshare) { 1139 fault_flags |= FAULT_FLAG_UNSHARE; 1140 /* FAULT_FLAG_WRITE and FAULT_FLAG_UNSHARE are incompatible */ 1141 VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_WRITE); 1142 } 1143 1144 ret = handle_mm_fault(vma, address, fault_flags, NULL); 1145 1146 if (ret & VM_FAULT_COMPLETED) { 1147 /* 1148 * With FAULT_FLAG_RETRY_NOWAIT we'll never release the 1149 * mmap lock in the page fault handler. Sanity check this. 1150 */ 1151 WARN_ON_ONCE(fault_flags & FAULT_FLAG_RETRY_NOWAIT); 1152 *locked = 0; 1153 1154 /* 1155 * We should do the same as VM_FAULT_RETRY, but let's not 1156 * return -EBUSY since that's not reflecting the reality of 1157 * what has happened - we've just fully completed a page 1158 * fault, with the mmap lock released. Use -EAGAIN to show 1159 * that we want to take the mmap lock _again_. 1160 */ 1161 return -EAGAIN; 1162 } 1163 1164 if (ret & VM_FAULT_ERROR) { 1165 int err = vm_fault_to_errno(ret, flags); 1166 1167 if (err) 1168 return err; 1169 BUG(); 1170 } 1171 1172 if (ret & VM_FAULT_RETRY) { 1173 if (!(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) 1174 *locked = 0; 1175 return -EBUSY; 1176 } 1177 1178 return 0; 1179 } 1180 1181 /* 1182 * Writing to file-backed mappings which require folio dirty tracking using GUP 1183 * is a fundamentally broken operation, as kernel write access to GUP mappings 1184 * do not adhere to the semantics expected by a file system. 1185 * 1186 * Consider the following scenario:- 1187 * 1188 * 1. A folio is written to via GUP which write-faults the memory, notifying 1189 * the file system and dirtying the folio. 1190 * 2. Later, writeback is triggered, resulting in the folio being cleaned and 1191 * the PTE being marked read-only. 1192 * 3. The GUP caller writes to the folio, as it is mapped read/write via the 1193 * direct mapping. 1194 * 4. The GUP caller, now done with the page, unpins it and sets it dirty 1195 * (though it does not have to). 1196 * 1197 * This results in both data being written to a folio without writenotify, and 1198 * the folio being dirtied unexpectedly (if the caller decides to do so). 1199 */ 1200 static bool writable_file_mapping_allowed(struct vm_area_struct *vma, 1201 unsigned long gup_flags) 1202 { 1203 /* 1204 * If we aren't pinning then no problematic write can occur. A long term 1205 * pin is the most egregious case so this is the case we disallow. 1206 */ 1207 if ((gup_flags & (FOLL_PIN | FOLL_LONGTERM)) != 1208 (FOLL_PIN | FOLL_LONGTERM)) 1209 return true; 1210 1211 /* 1212 * If the VMA does not require dirty tracking then no problematic write 1213 * can occur either. 1214 */ 1215 return !vma_needs_dirty_tracking(vma); 1216 } 1217 1218 static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) 1219 { 1220 vm_flags_t vm_flags = vma->vm_flags; 1221 int write = (gup_flags & FOLL_WRITE); 1222 int foreign = (gup_flags & FOLL_REMOTE); 1223 bool vma_anon = vma_is_anonymous(vma); 1224 1225 if (vm_flags & (VM_IO | VM_PFNMAP)) 1226 return -EFAULT; 1227 1228 if ((gup_flags & FOLL_ANON) && !vma_anon) 1229 return -EFAULT; 1230 1231 if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma)) 1232 return -EOPNOTSUPP; 1233 1234 if ((gup_flags & FOLL_SPLIT_PMD) && is_vm_hugetlb_page(vma)) 1235 return -EOPNOTSUPP; 1236 1237 if (vma_is_secretmem(vma)) 1238 return -EFAULT; 1239 1240 if (write) { 1241 if (!vma_anon && 1242 !writable_file_mapping_allowed(vma, gup_flags)) 1243 return -EFAULT; 1244 1245 if (!(vm_flags & VM_WRITE) || (vm_flags & VM_SHADOW_STACK)) { 1246 if (!(gup_flags & FOLL_FORCE)) 1247 return -EFAULT; 1248 /* 1249 * We used to let the write,force case do COW in a 1250 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could 1251 * set a breakpoint in a read-only mapping of an 1252 * executable, without corrupting the file (yet only 1253 * when that file had been opened for writing!). 1254 * Anon pages in shared mappings are surprising: now 1255 * just reject it. 1256 */ 1257 if (!is_cow_mapping(vm_flags)) 1258 return -EFAULT; 1259 } 1260 } else if (!(vm_flags & VM_READ)) { 1261 if (!(gup_flags & FOLL_FORCE)) 1262 return -EFAULT; 1263 /* 1264 * Is there actually any vma we can reach here which does not 1265 * have VM_MAYREAD set? 1266 */ 1267 if (!(vm_flags & VM_MAYREAD)) 1268 return -EFAULT; 1269 } 1270 /* 1271 * gups are always data accesses, not instruction 1272 * fetches, so execute=false here 1273 */ 1274 if (!arch_vma_access_permitted(vma, write, false, foreign)) 1275 return -EFAULT; 1276 return 0; 1277 } 1278 1279 /* 1280 * This is "vma_lookup()", but with a warning if we would have 1281 * historically expanded the stack in the GUP code. 1282 */ 1283 static struct vm_area_struct *gup_vma_lookup(struct mm_struct *mm, 1284 unsigned long addr) 1285 { 1286 #ifdef CONFIG_STACK_GROWSUP 1287 return vma_lookup(mm, addr); 1288 #else 1289 static volatile unsigned long next_warn; 1290 struct vm_area_struct *vma; 1291 unsigned long now, next; 1292 1293 vma = find_vma(mm, addr); 1294 if (!vma || (addr >= vma->vm_start)) 1295 return vma; 1296 1297 /* Only warn for half-way relevant accesses */ 1298 if (!(vma->vm_flags & VM_GROWSDOWN)) 1299 return NULL; 1300 if (vma->vm_start - addr > 65536) 1301 return NULL; 1302 1303 /* Let's not warn more than once an hour.. */ 1304 now = jiffies; next = next_warn; 1305 if (next && time_before(now, next)) 1306 return NULL; 1307 next_warn = now + 60*60*HZ; 1308 1309 /* Let people know things may have changed. */ 1310 pr_warn("GUP no longer grows the stack in %s (%d): %lx-%lx (%lx)\n", 1311 current->comm, task_pid_nr(current), 1312 vma->vm_start, vma->vm_end, addr); 1313 dump_stack(); 1314 return NULL; 1315 #endif 1316 } 1317 1318 /** 1319 * __get_user_pages() - pin user pages in memory 1320 * @mm: mm_struct of target mm 1321 * @start: starting user address 1322 * @nr_pages: number of pages from start to pin 1323 * @gup_flags: flags modifying pin behaviour 1324 * @pages: array that receives pointers to the pages pinned. 1325 * Should be at least nr_pages long. Or NULL, if caller 1326 * only intends to ensure the pages are faulted in. 1327 * @locked: whether we're still with the mmap_lock held 1328 * 1329 * Returns either number of pages pinned (which may be less than the 1330 * number requested), or an error. Details about the return value: 1331 * 1332 * -- If nr_pages is 0, returns 0. 1333 * -- If nr_pages is >0, but no pages were pinned, returns -errno. 1334 * -- If nr_pages is >0, and some pages were pinned, returns the number of 1335 * pages pinned. Again, this may be less than nr_pages. 1336 * -- 0 return value is possible when the fault would need to be retried. 1337 * 1338 * The caller is responsible for releasing returned @pages, via put_page(). 1339 * 1340 * Must be called with mmap_lock held. It may be released. See below. 1341 * 1342 * __get_user_pages walks a process's page tables and takes a reference to 1343 * each struct page that each user address corresponds to at a given 1344 * instant. That is, it takes the page that would be accessed if a user 1345 * thread accesses the given user virtual address at that instant. 1346 * 1347 * This does not guarantee that the page exists in the user mappings when 1348 * __get_user_pages returns, and there may even be a completely different 1349 * page there in some cases (eg. if mmapped pagecache has been invalidated 1350 * and subsequently re-faulted). However it does guarantee that the page 1351 * won't be freed completely. And mostly callers simply care that the page 1352 * contains data that was valid *at some point in time*. Typically, an IO 1353 * or similar operation cannot guarantee anything stronger anyway because 1354 * locks can't be held over the syscall boundary. 1355 * 1356 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If 1357 * the page is written to, set_page_dirty (or set_page_dirty_lock, as 1358 * appropriate) must be called after the page is finished with, and 1359 * before put_page is called. 1360 * 1361 * If FOLL_UNLOCKABLE is set without FOLL_NOWAIT then the mmap_lock may 1362 * be released. If this happens *@locked will be set to 0 on return. 1363 * 1364 * A caller using such a combination of @gup_flags must therefore hold the 1365 * mmap_lock for reading only, and recognize when it's been released. Otherwise, 1366 * it must be held for either reading or writing and will not be released. 1367 * 1368 * In most cases, get_user_pages or get_user_pages_fast should be used 1369 * instead of __get_user_pages. __get_user_pages should be used only if 1370 * you need some special @gup_flags. 1371 */ 1372 static long __get_user_pages(struct mm_struct *mm, 1373 unsigned long start, unsigned long nr_pages, 1374 unsigned int gup_flags, struct page **pages, 1375 int *locked) 1376 { 1377 long ret = 0, i = 0; 1378 struct vm_area_struct *vma = NULL; 1379 struct follow_page_context ctx = { NULL }; 1380 1381 if (!nr_pages) 1382 return 0; 1383 1384 start = untagged_addr_remote(mm, start); 1385 1386 VM_WARN_ON_ONCE(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); 1387 1388 /* FOLL_GET and FOLL_PIN are mutually exclusive. */ 1389 VM_WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) == 1390 (FOLL_PIN | FOLL_GET)); 1391 1392 do { 1393 struct page *page; 1394 unsigned int page_increm; 1395 1396 /* first iteration or cross vma bound */ 1397 if (!vma || start >= vma->vm_end) { 1398 /* 1399 * MADV_POPULATE_(READ|WRITE) wants to handle VMA 1400 * lookups+error reporting differently. 1401 */ 1402 if (gup_flags & FOLL_MADV_POPULATE) { 1403 vma = vma_lookup(mm, start); 1404 if (!vma) { 1405 ret = -ENOMEM; 1406 goto out; 1407 } 1408 if (check_vma_flags(vma, gup_flags)) { 1409 ret = -EINVAL; 1410 goto out; 1411 } 1412 goto retry; 1413 } 1414 vma = gup_vma_lookup(mm, start); 1415 if (!vma && in_gate_area(mm, start)) { 1416 ret = get_gate_page(mm, start & PAGE_MASK, 1417 gup_flags, &vma, 1418 pages ? &page : NULL); 1419 if (ret) 1420 goto out; 1421 ctx.page_mask = 0; 1422 goto next_page; 1423 } 1424 1425 if (!vma) { 1426 ret = -EFAULT; 1427 goto out; 1428 } 1429 ret = check_vma_flags(vma, gup_flags); 1430 if (ret) 1431 goto out; 1432 } 1433 retry: 1434 /* 1435 * If we have a pending SIGKILL, don't keep faulting pages and 1436 * potentially allocating memory. 1437 */ 1438 if (fatal_signal_pending(current)) { 1439 ret = -EINTR; 1440 goto out; 1441 } 1442 cond_resched(); 1443 1444 page = follow_page_mask(vma, start, gup_flags, &ctx); 1445 if (!page || PTR_ERR(page) == -EMLINK) { 1446 ret = faultin_page(vma, start, gup_flags, 1447 PTR_ERR(page) == -EMLINK, locked); 1448 switch (ret) { 1449 case 0: 1450 goto retry; 1451 case -EBUSY: 1452 case -EAGAIN: 1453 ret = 0; 1454 fallthrough; 1455 case -EFAULT: 1456 case -ENOMEM: 1457 case -EHWPOISON: 1458 goto out; 1459 } 1460 BUG(); 1461 } else if (PTR_ERR(page) == -EEXIST) { 1462 /* 1463 * Proper page table entry exists, but no corresponding 1464 * struct page. If the caller expects **pages to be 1465 * filled in, bail out now, because that can't be done 1466 * for this page. 1467 */ 1468 if (pages) { 1469 ret = PTR_ERR(page); 1470 goto out; 1471 } 1472 } else if (IS_ERR(page)) { 1473 ret = PTR_ERR(page); 1474 goto out; 1475 } 1476 next_page: 1477 page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask); 1478 if (page_increm > nr_pages) 1479 page_increm = nr_pages; 1480 1481 if (pages) { 1482 struct page *subpage; 1483 unsigned int j; 1484 1485 /* 1486 * This must be a large folio (and doesn't need to 1487 * be the whole folio; it can be part of it), do 1488 * the refcount work for all the subpages too. 1489 * 1490 * NOTE: here the page may not be the head page 1491 * e.g. when start addr is not thp-size aligned. 1492 * try_grab_folio() should have taken care of tail 1493 * pages. 1494 */ 1495 if (page_increm > 1) { 1496 struct folio *folio = page_folio(page); 1497 1498 /* 1499 * Since we already hold refcount on the 1500 * large folio, this should never fail. 1501 */ 1502 if (try_grab_folio(folio, page_increm - 1, 1503 gup_flags)) { 1504 /* 1505 * Release the 1st page ref if the 1506 * folio is problematic, fail hard. 1507 */ 1508 gup_put_folio(folio, 1, gup_flags); 1509 ret = -EFAULT; 1510 goto out; 1511 } 1512 } 1513 1514 for (j = 0; j < page_increm; j++) { 1515 subpage = nth_page(page, j); 1516 pages[i + j] = subpage; 1517 flush_anon_page(vma, subpage, start + j * PAGE_SIZE); 1518 flush_dcache_page(subpage); 1519 } 1520 } 1521 1522 i += page_increm; 1523 start += page_increm * PAGE_SIZE; 1524 nr_pages -= page_increm; 1525 } while (nr_pages); 1526 out: 1527 if (ctx.pgmap) 1528 put_dev_pagemap(ctx.pgmap); 1529 return i ? i : ret; 1530 } 1531 1532 static bool vma_permits_fault(struct vm_area_struct *vma, 1533 unsigned int fault_flags) 1534 { 1535 bool write = !!(fault_flags & FAULT_FLAG_WRITE); 1536 bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE); 1537 vm_flags_t vm_flags = write ? VM_WRITE : VM_READ; 1538 1539 if (!(vm_flags & vma->vm_flags)) 1540 return false; 1541 1542 /* 1543 * The architecture might have a hardware protection 1544 * mechanism other than read/write that can deny access. 1545 * 1546 * gup always represents data access, not instruction 1547 * fetches, so execute=false here: 1548 */ 1549 if (!arch_vma_access_permitted(vma, write, false, foreign)) 1550 return false; 1551 1552 return true; 1553 } 1554 1555 /** 1556 * fixup_user_fault() - manually resolve a user page fault 1557 * @mm: mm_struct of target mm 1558 * @address: user address 1559 * @fault_flags:flags to pass down to handle_mm_fault() 1560 * @unlocked: did we unlock the mmap_lock while retrying, maybe NULL if caller 1561 * does not allow retry. If NULL, the caller must guarantee 1562 * that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY. 1563 * 1564 * This is meant to be called in the specific scenario where for locking reasons 1565 * we try to access user memory in atomic context (within a pagefault_disable() 1566 * section), this returns -EFAULT, and we want to resolve the user fault before 1567 * trying again. 1568 * 1569 * Typically this is meant to be used by the futex code. 1570 * 1571 * The main difference with get_user_pages() is that this function will 1572 * unconditionally call handle_mm_fault() which will in turn perform all the 1573 * necessary SW fixup of the dirty and young bits in the PTE, while 1574 * get_user_pages() only guarantees to update these in the struct page. 1575 * 1576 * This is important for some architectures where those bits also gate the 1577 * access permission to the page because they are maintained in software. On 1578 * such architectures, gup() will not be enough to make a subsequent access 1579 * succeed. 1580 * 1581 * This function will not return with an unlocked mmap_lock. So it has not the 1582 * same semantics wrt the @mm->mmap_lock as does filemap_fault(). 1583 */ 1584 int fixup_user_fault(struct mm_struct *mm, 1585 unsigned long address, unsigned int fault_flags, 1586 bool *unlocked) 1587 { 1588 struct vm_area_struct *vma; 1589 vm_fault_t ret; 1590 1591 address = untagged_addr_remote(mm, address); 1592 1593 if (unlocked) 1594 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 1595 1596 retry: 1597 vma = gup_vma_lookup(mm, address); 1598 if (!vma) 1599 return -EFAULT; 1600 1601 if (!vma_permits_fault(vma, fault_flags)) 1602 return -EFAULT; 1603 1604 if ((fault_flags & FAULT_FLAG_KILLABLE) && 1605 fatal_signal_pending(current)) 1606 return -EINTR; 1607 1608 ret = handle_mm_fault(vma, address, fault_flags, NULL); 1609 1610 if (ret & VM_FAULT_COMPLETED) { 1611 /* 1612 * NOTE: it's a pity that we need to retake the lock here 1613 * to pair with the unlock() in the callers. Ideally we 1614 * could tell the callers so they do not need to unlock. 1615 */ 1616 mmap_read_lock(mm); 1617 *unlocked = true; 1618 return 0; 1619 } 1620 1621 if (ret & VM_FAULT_ERROR) { 1622 int err = vm_fault_to_errno(ret, 0); 1623 1624 if (err) 1625 return err; 1626 BUG(); 1627 } 1628 1629 if (ret & VM_FAULT_RETRY) { 1630 mmap_read_lock(mm); 1631 *unlocked = true; 1632 fault_flags |= FAULT_FLAG_TRIED; 1633 goto retry; 1634 } 1635 1636 return 0; 1637 } 1638 EXPORT_SYMBOL_GPL(fixup_user_fault); 1639 1640 /* 1641 * GUP always responds to fatal signals. When FOLL_INTERRUPTIBLE is 1642 * specified, it'll also respond to generic signals. The caller of GUP 1643 * that has FOLL_INTERRUPTIBLE should take care of the GUP interruption. 1644 */ 1645 static bool gup_signal_pending(unsigned int flags) 1646 { 1647 if (fatal_signal_pending(current)) 1648 return true; 1649 1650 if (!(flags & FOLL_INTERRUPTIBLE)) 1651 return false; 1652 1653 return signal_pending(current); 1654 } 1655 1656 /* 1657 * Locking: (*locked == 1) means that the mmap_lock has already been acquired by 1658 * the caller. This function may drop the mmap_lock. If it does so, then it will 1659 * set (*locked = 0). 1660 * 1661 * (*locked == 0) means that the caller expects this function to acquire and 1662 * drop the mmap_lock. Therefore, the value of *locked will still be zero when 1663 * the function returns, even though it may have changed temporarily during 1664 * function execution. 1665 * 1666 * Please note that this function, unlike __get_user_pages(), will not return 0 1667 * for nr_pages > 0, unless FOLL_NOWAIT is used. 1668 */ 1669 static __always_inline long __get_user_pages_locked(struct mm_struct *mm, 1670 unsigned long start, 1671 unsigned long nr_pages, 1672 struct page **pages, 1673 int *locked, 1674 unsigned int flags) 1675 { 1676 long ret, pages_done; 1677 bool must_unlock = false; 1678 1679 if (!nr_pages) 1680 return 0; 1681 1682 /* 1683 * The internal caller expects GUP to manage the lock internally and the 1684 * lock must be released when this returns. 1685 */ 1686 if (!*locked) { 1687 if (mmap_read_lock_killable(mm)) 1688 return -EAGAIN; 1689 must_unlock = true; 1690 *locked = 1; 1691 } 1692 else 1693 mmap_assert_locked(mm); 1694 1695 if (flags & FOLL_PIN) 1696 mm_set_has_pinned_flag(&mm->flags); 1697 1698 /* 1699 * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior 1700 * is to set FOLL_GET if the caller wants pages[] filled in (but has 1701 * carelessly failed to specify FOLL_GET), so keep doing that, but only 1702 * for FOLL_GET, not for the newer FOLL_PIN. 1703 * 1704 * FOLL_PIN always expects pages to be non-null, but no need to assert 1705 * that here, as any failures will be obvious enough. 1706 */ 1707 if (pages && !(flags & FOLL_PIN)) 1708 flags |= FOLL_GET; 1709 1710 pages_done = 0; 1711 for (;;) { 1712 ret = __get_user_pages(mm, start, nr_pages, flags, pages, 1713 locked); 1714 if (!(flags & FOLL_UNLOCKABLE)) { 1715 /* VM_FAULT_RETRY couldn't trigger, bypass */ 1716 pages_done = ret; 1717 break; 1718 } 1719 1720 /* VM_FAULT_RETRY or VM_FAULT_COMPLETED cannot return errors */ 1721 VM_WARN_ON_ONCE(!*locked && (ret < 0 || ret >= nr_pages)); 1722 1723 if (ret > 0) { 1724 nr_pages -= ret; 1725 pages_done += ret; 1726 if (!nr_pages) 1727 break; 1728 } 1729 if (*locked) { 1730 /* 1731 * VM_FAULT_RETRY didn't trigger or it was a 1732 * FOLL_NOWAIT. 1733 */ 1734 if (!pages_done) 1735 pages_done = ret; 1736 break; 1737 } 1738 /* 1739 * VM_FAULT_RETRY triggered, so seek to the faulting offset. 1740 * For the prefault case (!pages) we only update counts. 1741 */ 1742 if (likely(pages)) 1743 pages += ret; 1744 start += ret << PAGE_SHIFT; 1745 1746 /* The lock was temporarily dropped, so we must unlock later */ 1747 must_unlock = true; 1748 1749 retry: 1750 /* 1751 * Repeat on the address that fired VM_FAULT_RETRY 1752 * with both FAULT_FLAG_ALLOW_RETRY and 1753 * FAULT_FLAG_TRIED. Note that GUP can be interrupted 1754 * by fatal signals of even common signals, depending on 1755 * the caller's request. So we need to check it before we 1756 * start trying again otherwise it can loop forever. 1757 */ 1758 if (gup_signal_pending(flags)) { 1759 if (!pages_done) 1760 pages_done = -EINTR; 1761 break; 1762 } 1763 1764 ret = mmap_read_lock_killable(mm); 1765 if (ret) { 1766 if (!pages_done) 1767 pages_done = ret; 1768 break; 1769 } 1770 1771 *locked = 1; 1772 ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED, 1773 pages, locked); 1774 if (!*locked) { 1775 /* Continue to retry until we succeeded */ 1776 VM_WARN_ON_ONCE(ret != 0); 1777 goto retry; 1778 } 1779 if (ret != 1) { 1780 VM_WARN_ON_ONCE(ret > 1); 1781 if (!pages_done) 1782 pages_done = ret; 1783 break; 1784 } 1785 nr_pages--; 1786 pages_done++; 1787 if (!nr_pages) 1788 break; 1789 if (likely(pages)) 1790 pages++; 1791 start += PAGE_SIZE; 1792 } 1793 if (must_unlock && *locked) { 1794 /* 1795 * We either temporarily dropped the lock, or the caller 1796 * requested that we both acquire and drop the lock. Either way, 1797 * we must now unlock, and notify the caller of that state. 1798 */ 1799 mmap_read_unlock(mm); 1800 *locked = 0; 1801 } 1802 1803 /* 1804 * Failing to pin anything implies something has gone wrong (except when 1805 * FOLL_NOWAIT is specified). 1806 */ 1807 if (WARN_ON_ONCE(pages_done == 0 && !(flags & FOLL_NOWAIT))) 1808 return -EFAULT; 1809 1810 return pages_done; 1811 } 1812 1813 /** 1814 * populate_vma_page_range() - populate a range of pages in the vma. 1815 * @vma: target vma 1816 * @start: start address 1817 * @end: end address 1818 * @locked: whether the mmap_lock is still held 1819 * 1820 * This takes care of mlocking the pages too if VM_LOCKED is set. 1821 * 1822 * Return either number of pages pinned in the vma, or a negative error 1823 * code on error. 1824 * 1825 * vma->vm_mm->mmap_lock must be held. 1826 * 1827 * If @locked is NULL, it may be held for read or write and will 1828 * be unperturbed. 1829 * 1830 * If @locked is non-NULL, it must held for read only and may be 1831 * released. If it's released, *@locked will be set to 0. 1832 */ 1833 long populate_vma_page_range(struct vm_area_struct *vma, 1834 unsigned long start, unsigned long end, int *locked) 1835 { 1836 struct mm_struct *mm = vma->vm_mm; 1837 unsigned long nr_pages = (end - start) / PAGE_SIZE; 1838 int local_locked = 1; 1839 int gup_flags; 1840 long ret; 1841 1842 VM_WARN_ON_ONCE(!PAGE_ALIGNED(start)); 1843 VM_WARN_ON_ONCE(!PAGE_ALIGNED(end)); 1844 VM_WARN_ON_ONCE_VMA(start < vma->vm_start, vma); 1845 VM_WARN_ON_ONCE_VMA(end > vma->vm_end, vma); 1846 mmap_assert_locked(mm); 1847 1848 /* 1849 * Rightly or wrongly, the VM_LOCKONFAULT case has never used 1850 * faultin_page() to break COW, so it has no work to do here. 1851 */ 1852 if (vma->vm_flags & VM_LOCKONFAULT) 1853 return nr_pages; 1854 1855 /* ... similarly, we've never faulted in PROT_NONE pages */ 1856 if (!vma_is_accessible(vma)) 1857 return -EFAULT; 1858 1859 gup_flags = FOLL_TOUCH; 1860 /* 1861 * We want to touch writable mappings with a write fault in order 1862 * to break COW, except for shared mappings because these don't COW 1863 * and we would not want to dirty them for nothing. 1864 * 1865 * Otherwise, do a read fault, and use FOLL_FORCE in case it's not 1866 * readable (ie write-only or executable). 1867 */ 1868 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) 1869 gup_flags |= FOLL_WRITE; 1870 else 1871 gup_flags |= FOLL_FORCE; 1872 1873 if (locked) 1874 gup_flags |= FOLL_UNLOCKABLE; 1875 1876 /* 1877 * We made sure addr is within a VMA, so the following will 1878 * not result in a stack expansion that recurses back here. 1879 */ 1880 ret = __get_user_pages(mm, start, nr_pages, gup_flags, 1881 NULL, locked ? locked : &local_locked); 1882 lru_add_drain(); 1883 return ret; 1884 } 1885 1886 /* 1887 * faultin_page_range() - populate (prefault) page tables inside the 1888 * given range readable/writable 1889 * 1890 * This takes care of mlocking the pages, too, if VM_LOCKED is set. 1891 * 1892 * @mm: the mm to populate page tables in 1893 * @start: start address 1894 * @end: end address 1895 * @write: whether to prefault readable or writable 1896 * @locked: whether the mmap_lock is still held 1897 * 1898 * Returns either number of processed pages in the MM, or a negative error 1899 * code on error (see __get_user_pages()). Note that this function reports 1900 * errors related to VMAs, such as incompatible mappings, as expected by 1901 * MADV_POPULATE_(READ|WRITE). 1902 * 1903 * The range must be page-aligned. 1904 * 1905 * mm->mmap_lock must be held. If it's released, *@locked will be set to 0. 1906 */ 1907 long faultin_page_range(struct mm_struct *mm, unsigned long start, 1908 unsigned long end, bool write, int *locked) 1909 { 1910 unsigned long nr_pages = (end - start) / PAGE_SIZE; 1911 int gup_flags; 1912 long ret; 1913 1914 VM_WARN_ON_ONCE(!PAGE_ALIGNED(start)); 1915 VM_WARN_ON_ONCE(!PAGE_ALIGNED(end)); 1916 mmap_assert_locked(mm); 1917 1918 /* 1919 * FOLL_TOUCH: Mark page accessed and thereby young; will also mark 1920 * the page dirty with FOLL_WRITE -- which doesn't make a 1921 * difference with !FOLL_FORCE, because the page is writable 1922 * in the page table. 1923 * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit 1924 * a poisoned page. 1925 * !FOLL_FORCE: Require proper access permissions. 1926 */ 1927 gup_flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_UNLOCKABLE | 1928 FOLL_MADV_POPULATE; 1929 if (write) 1930 gup_flags |= FOLL_WRITE; 1931 1932 ret = __get_user_pages_locked(mm, start, nr_pages, NULL, locked, 1933 gup_flags); 1934 lru_add_drain(); 1935 return ret; 1936 } 1937 1938 /* 1939 * __mm_populate - populate and/or mlock pages within a range of address space. 1940 * 1941 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap 1942 * flags. VMAs must be already marked with the desired vm_flags, and 1943 * mmap_lock must not be held. 1944 */ 1945 int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) 1946 { 1947 struct mm_struct *mm = current->mm; 1948 unsigned long end, nstart, nend; 1949 struct vm_area_struct *vma = NULL; 1950 int locked = 0; 1951 long ret = 0; 1952 1953 end = start + len; 1954 1955 for (nstart = start; nstart < end; nstart = nend) { 1956 /* 1957 * We want to fault in pages for [nstart; end) address range. 1958 * Find first corresponding VMA. 1959 */ 1960 if (!locked) { 1961 locked = 1; 1962 mmap_read_lock(mm); 1963 vma = find_vma_intersection(mm, nstart, end); 1964 } else if (nstart >= vma->vm_end) 1965 vma = find_vma_intersection(mm, vma->vm_end, end); 1966 1967 if (!vma) 1968 break; 1969 /* 1970 * Set [nstart; nend) to intersection of desired address 1971 * range with the first VMA. Also, skip undesirable VMA types. 1972 */ 1973 nend = min(end, vma->vm_end); 1974 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) 1975 continue; 1976 if (nstart < vma->vm_start) 1977 nstart = vma->vm_start; 1978 /* 1979 * Now fault in a range of pages. populate_vma_page_range() 1980 * double checks the vma flags, so that it won't mlock pages 1981 * if the vma was already munlocked. 1982 */ 1983 ret = populate_vma_page_range(vma, nstart, nend, &locked); 1984 if (ret < 0) { 1985 if (ignore_errors) { 1986 ret = 0; 1987 continue; /* continue at next VMA */ 1988 } 1989 break; 1990 } 1991 nend = nstart + ret * PAGE_SIZE; 1992 ret = 0; 1993 } 1994 if (locked) 1995 mmap_read_unlock(mm); 1996 return ret; /* 0 or negative error code */ 1997 } 1998 #else /* CONFIG_MMU */ 1999 static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start, 2000 unsigned long nr_pages, struct page **pages, 2001 int *locked, unsigned int foll_flags) 2002 { 2003 struct vm_area_struct *vma; 2004 bool must_unlock = false; 2005 vm_flags_t vm_flags; 2006 long i; 2007 2008 if (!nr_pages) 2009 return 0; 2010 2011 /* 2012 * The internal caller expects GUP to manage the lock internally and the 2013 * lock must be released when this returns. 2014 */ 2015 if (!*locked) { 2016 if (mmap_read_lock_killable(mm)) 2017 return -EAGAIN; 2018 must_unlock = true; 2019 *locked = 1; 2020 } 2021 2022 /* calculate required read or write permissions. 2023 * If FOLL_FORCE is set, we only require the "MAY" flags. 2024 */ 2025 vm_flags = (foll_flags & FOLL_WRITE) ? 2026 (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); 2027 vm_flags &= (foll_flags & FOLL_FORCE) ? 2028 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); 2029 2030 for (i = 0; i < nr_pages; i++) { 2031 vma = find_vma(mm, start); 2032 if (!vma) 2033 break; 2034 2035 /* protect what we can, including chardevs */ 2036 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || 2037 !(vm_flags & vma->vm_flags)) 2038 break; 2039 2040 if (pages) { 2041 pages[i] = virt_to_page((void *)start); 2042 if (pages[i]) 2043 get_page(pages[i]); 2044 } 2045 2046 start = (start + PAGE_SIZE) & PAGE_MASK; 2047 } 2048 2049 if (must_unlock && *locked) { 2050 mmap_read_unlock(mm); 2051 *locked = 0; 2052 } 2053 2054 return i ? : -EFAULT; 2055 } 2056 #endif /* !CONFIG_MMU */ 2057 2058 /** 2059 * fault_in_writeable - fault in userspace address range for writing 2060 * @uaddr: start of address range 2061 * @size: size of address range 2062 * 2063 * Returns the number of bytes not faulted in (like copy_to_user() and 2064 * copy_from_user()). 2065 */ 2066 size_t fault_in_writeable(char __user *uaddr, size_t size) 2067 { 2068 const unsigned long start = (unsigned long)uaddr; 2069 const unsigned long end = start + size; 2070 unsigned long cur; 2071 2072 if (unlikely(size == 0)) 2073 return 0; 2074 if (!user_write_access_begin(uaddr, size)) 2075 return size; 2076 2077 /* Stop once we overflow to 0. */ 2078 for (cur = start; cur && cur < end; cur = PAGE_ALIGN_DOWN(cur + PAGE_SIZE)) 2079 unsafe_put_user(0, (char __user *)cur, out); 2080 out: 2081 user_write_access_end(); 2082 if (size > cur - start) 2083 return size - (cur - start); 2084 return 0; 2085 } 2086 EXPORT_SYMBOL(fault_in_writeable); 2087 2088 /** 2089 * fault_in_subpage_writeable - fault in an address range for writing 2090 * @uaddr: start of address range 2091 * @size: size of address range 2092 * 2093 * Fault in a user address range for writing while checking for permissions at 2094 * sub-page granularity (e.g. arm64 MTE). This function should be used when 2095 * the caller cannot guarantee forward progress of a copy_to_user() loop. 2096 * 2097 * Returns the number of bytes not faulted in (like copy_to_user() and 2098 * copy_from_user()). 2099 */ 2100 size_t fault_in_subpage_writeable(char __user *uaddr, size_t size) 2101 { 2102 size_t faulted_in; 2103 2104 /* 2105 * Attempt faulting in at page granularity first for page table 2106 * permission checking. The arch-specific probe_subpage_writeable() 2107 * functions may not check for this. 2108 */ 2109 faulted_in = size - fault_in_writeable(uaddr, size); 2110 if (faulted_in) 2111 faulted_in -= probe_subpage_writeable(uaddr, faulted_in); 2112 2113 return size - faulted_in; 2114 } 2115 EXPORT_SYMBOL(fault_in_subpage_writeable); 2116 2117 /* 2118 * fault_in_safe_writeable - fault in an address range for writing 2119 * @uaddr: start of address range 2120 * @size: length of address range 2121 * 2122 * Faults in an address range for writing. This is primarily useful when we 2123 * already know that some or all of the pages in the address range aren't in 2124 * memory. 2125 * 2126 * Unlike fault_in_writeable(), this function is non-destructive. 2127 * 2128 * Note that we don't pin or otherwise hold the pages referenced that we fault 2129 * in. There's no guarantee that they'll stay in memory for any duration of 2130 * time. 2131 * 2132 * Returns the number of bytes not faulted in, like copy_to_user() and 2133 * copy_from_user(). 2134 */ 2135 size_t fault_in_safe_writeable(const char __user *uaddr, size_t size) 2136 { 2137 const unsigned long start = (unsigned long)uaddr; 2138 const unsigned long end = start + size; 2139 unsigned long cur; 2140 struct mm_struct *mm = current->mm; 2141 bool unlocked = false; 2142 2143 if (unlikely(size == 0)) 2144 return 0; 2145 2146 mmap_read_lock(mm); 2147 /* Stop once we overflow to 0. */ 2148 for (cur = start; cur && cur < end; cur = PAGE_ALIGN_DOWN(cur + PAGE_SIZE)) 2149 if (fixup_user_fault(mm, cur, FAULT_FLAG_WRITE, &unlocked)) 2150 break; 2151 mmap_read_unlock(mm); 2152 2153 if (size > cur - start) 2154 return size - (cur - start); 2155 return 0; 2156 } 2157 EXPORT_SYMBOL(fault_in_safe_writeable); 2158 2159 /** 2160 * fault_in_readable - fault in userspace address range for reading 2161 * @uaddr: start of user address range 2162 * @size: size of user address range 2163 * 2164 * Returns the number of bytes not faulted in (like copy_to_user() and 2165 * copy_from_user()). 2166 */ 2167 size_t fault_in_readable(const char __user *uaddr, size_t size) 2168 { 2169 const unsigned long start = (unsigned long)uaddr; 2170 const unsigned long end = start + size; 2171 unsigned long cur; 2172 volatile char c; 2173 2174 if (unlikely(size == 0)) 2175 return 0; 2176 if (!user_read_access_begin(uaddr, size)) 2177 return size; 2178 2179 /* Stop once we overflow to 0. */ 2180 for (cur = start; cur && cur < end; cur = PAGE_ALIGN_DOWN(cur + PAGE_SIZE)) 2181 unsafe_get_user(c, (const char __user *)cur, out); 2182 out: 2183 user_read_access_end(); 2184 (void)c; 2185 if (size > cur - start) 2186 return size - (cur - start); 2187 return 0; 2188 } 2189 EXPORT_SYMBOL(fault_in_readable); 2190 2191 /** 2192 * get_dump_page() - pin user page in memory while writing it to core dump 2193 * @addr: user address 2194 * @locked: a pointer to an int denoting whether the mmap sem is held 2195 * 2196 * Returns struct page pointer of user page pinned for dump, 2197 * to be freed afterwards by put_page(). 2198 * 2199 * Returns NULL on any kind of failure - a hole must then be inserted into 2200 * the corefile, to preserve alignment with its headers; and also returns 2201 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - 2202 * allowing a hole to be left in the corefile to save disk space. 2203 * 2204 * Called without mmap_lock (takes and releases the mmap_lock by itself). 2205 */ 2206 #ifdef CONFIG_ELF_CORE 2207 struct page *get_dump_page(unsigned long addr, int *locked) 2208 { 2209 struct page *page; 2210 int ret; 2211 2212 ret = __get_user_pages_locked(current->mm, addr, 1, &page, locked, 2213 FOLL_FORCE | FOLL_DUMP | FOLL_GET); 2214 return (ret == 1) ? page : NULL; 2215 } 2216 #endif /* CONFIG_ELF_CORE */ 2217 2218 #ifdef CONFIG_MIGRATION 2219 2220 /* 2221 * An array of either pages or folios ("pofs"). Although it may seem tempting to 2222 * avoid this complication, by simply interpreting a list of folios as a list of 2223 * pages, that approach won't work in the longer term, because eventually the 2224 * layouts of struct page and struct folio will become completely different. 2225 * Furthermore, this pof approach avoids excessive page_folio() calls. 2226 */ 2227 struct pages_or_folios { 2228 union { 2229 struct page **pages; 2230 struct folio **folios; 2231 void **entries; 2232 }; 2233 bool has_folios; 2234 long nr_entries; 2235 }; 2236 2237 static struct folio *pofs_get_folio(struct pages_or_folios *pofs, long i) 2238 { 2239 if (pofs->has_folios) 2240 return pofs->folios[i]; 2241 return page_folio(pofs->pages[i]); 2242 } 2243 2244 static void pofs_clear_entry(struct pages_or_folios *pofs, long i) 2245 { 2246 pofs->entries[i] = NULL; 2247 } 2248 2249 static void pofs_unpin(struct pages_or_folios *pofs) 2250 { 2251 if (pofs->has_folios) 2252 unpin_folios(pofs->folios, pofs->nr_entries); 2253 else 2254 unpin_user_pages(pofs->pages, pofs->nr_entries); 2255 } 2256 2257 static struct folio *pofs_next_folio(struct folio *folio, 2258 struct pages_or_folios *pofs, long *index_ptr) 2259 { 2260 long i = *index_ptr + 1; 2261 2262 if (!pofs->has_folios && folio_test_large(folio)) { 2263 const unsigned long start_pfn = folio_pfn(folio); 2264 const unsigned long end_pfn = start_pfn + folio_nr_pages(folio); 2265 2266 for (; i < pofs->nr_entries; i++) { 2267 unsigned long pfn = page_to_pfn(pofs->pages[i]); 2268 2269 /* Is this page part of this folio? */ 2270 if (pfn < start_pfn || pfn >= end_pfn) 2271 break; 2272 } 2273 } 2274 2275 if (unlikely(i == pofs->nr_entries)) 2276 return NULL; 2277 *index_ptr = i; 2278 2279 return pofs_get_folio(pofs, i); 2280 } 2281 2282 /* 2283 * Returns the number of collected folios. Return value is always >= 0. 2284 */ 2285 static unsigned long collect_longterm_unpinnable_folios( 2286 struct list_head *movable_folio_list, 2287 struct pages_or_folios *pofs) 2288 { 2289 unsigned long collected = 0; 2290 bool drain_allow = true; 2291 struct folio *folio; 2292 long i = 0; 2293 2294 for (folio = pofs_get_folio(pofs, i); folio; 2295 folio = pofs_next_folio(folio, pofs, &i)) { 2296 2297 if (folio_is_longterm_pinnable(folio)) 2298 continue; 2299 2300 collected++; 2301 2302 if (folio_is_device_coherent(folio)) 2303 continue; 2304 2305 if (folio_test_hugetlb(folio)) { 2306 folio_isolate_hugetlb(folio, movable_folio_list); 2307 continue; 2308 } 2309 2310 if (!folio_test_lru(folio) && drain_allow) { 2311 lru_add_drain_all(); 2312 drain_allow = false; 2313 } 2314 2315 if (!folio_isolate_lru(folio)) 2316 continue; 2317 2318 list_add_tail(&folio->lru, movable_folio_list); 2319 node_stat_mod_folio(folio, 2320 NR_ISOLATED_ANON + folio_is_file_lru(folio), 2321 folio_nr_pages(folio)); 2322 } 2323 2324 return collected; 2325 } 2326 2327 /* 2328 * Unpins all folios and migrates device coherent folios and movable_folio_list. 2329 * Returns -EAGAIN if all folios were successfully migrated or -errno for 2330 * failure (or partial success). 2331 */ 2332 static int 2333 migrate_longterm_unpinnable_folios(struct list_head *movable_folio_list, 2334 struct pages_or_folios *pofs) 2335 { 2336 int ret; 2337 unsigned long i; 2338 2339 for (i = 0; i < pofs->nr_entries; i++) { 2340 struct folio *folio = pofs_get_folio(pofs, i); 2341 2342 if (folio_is_device_coherent(folio)) { 2343 /* 2344 * Migration will fail if the folio is pinned, so 2345 * convert the pin on the source folio to a normal 2346 * reference. 2347 */ 2348 pofs_clear_entry(pofs, i); 2349 folio_get(folio); 2350 gup_put_folio(folio, 1, FOLL_PIN); 2351 2352 if (migrate_device_coherent_folio(folio)) { 2353 ret = -EBUSY; 2354 goto err; 2355 } 2356 2357 continue; 2358 } 2359 2360 /* 2361 * We can't migrate folios with unexpected references, so drop 2362 * the reference obtained by __get_user_pages_locked(). 2363 * Migrating folios have been added to movable_folio_list after 2364 * calling folio_isolate_lru() which takes a reference so the 2365 * folio won't be freed if it's migrating. 2366 */ 2367 unpin_folio(folio); 2368 pofs_clear_entry(pofs, i); 2369 } 2370 2371 if (!list_empty(movable_folio_list)) { 2372 struct migration_target_control mtc = { 2373 .nid = NUMA_NO_NODE, 2374 .gfp_mask = GFP_USER | __GFP_NOWARN, 2375 .reason = MR_LONGTERM_PIN, 2376 }; 2377 2378 if (migrate_pages(movable_folio_list, alloc_migration_target, 2379 NULL, (unsigned long)&mtc, MIGRATE_SYNC, 2380 MR_LONGTERM_PIN, NULL)) { 2381 ret = -ENOMEM; 2382 goto err; 2383 } 2384 } 2385 2386 putback_movable_pages(movable_folio_list); 2387 2388 return -EAGAIN; 2389 2390 err: 2391 pofs_unpin(pofs); 2392 putback_movable_pages(movable_folio_list); 2393 2394 return ret; 2395 } 2396 2397 static long 2398 check_and_migrate_movable_pages_or_folios(struct pages_or_folios *pofs) 2399 { 2400 LIST_HEAD(movable_folio_list); 2401 unsigned long collected; 2402 2403 collected = collect_longterm_unpinnable_folios(&movable_folio_list, 2404 pofs); 2405 if (!collected) 2406 return 0; 2407 2408 return migrate_longterm_unpinnable_folios(&movable_folio_list, pofs); 2409 } 2410 2411 /* 2412 * Check whether all folios are *allowed* to be pinned indefinitely (long term). 2413 * Rather confusingly, all folios in the range are required to be pinned via 2414 * FOLL_PIN, before calling this routine. 2415 * 2416 * Return values: 2417 * 2418 * 0: if everything is OK and all folios in the range are allowed to be pinned, 2419 * then this routine leaves all folios pinned and returns zero for success. 2420 * 2421 * -EAGAIN: if any folios in the range are not allowed to be pinned, then this 2422 * routine will migrate those folios away, unpin all the folios in the range. If 2423 * migration of the entire set of folios succeeds, then -EAGAIN is returned. The 2424 * caller should re-pin the entire range with FOLL_PIN and then call this 2425 * routine again. 2426 * 2427 * -ENOMEM, or any other -errno: if an error *other* than -EAGAIN occurs, this 2428 * indicates a migration failure. The caller should give up, and propagate the 2429 * error back up the call stack. The caller does not need to unpin any folios in 2430 * that case, because this routine will do the unpinning. 2431 */ 2432 static long check_and_migrate_movable_folios(unsigned long nr_folios, 2433 struct folio **folios) 2434 { 2435 struct pages_or_folios pofs = { 2436 .folios = folios, 2437 .has_folios = true, 2438 .nr_entries = nr_folios, 2439 }; 2440 2441 return check_and_migrate_movable_pages_or_folios(&pofs); 2442 } 2443 2444 /* 2445 * Return values and behavior are the same as those for 2446 * check_and_migrate_movable_folios(). 2447 */ 2448 static long check_and_migrate_movable_pages(unsigned long nr_pages, 2449 struct page **pages) 2450 { 2451 struct pages_or_folios pofs = { 2452 .pages = pages, 2453 .has_folios = false, 2454 .nr_entries = nr_pages, 2455 }; 2456 2457 return check_and_migrate_movable_pages_or_folios(&pofs); 2458 } 2459 #else 2460 static long check_and_migrate_movable_pages(unsigned long nr_pages, 2461 struct page **pages) 2462 { 2463 return 0; 2464 } 2465 2466 static long check_and_migrate_movable_folios(unsigned long nr_folios, 2467 struct folio **folios) 2468 { 2469 return 0; 2470 } 2471 #endif /* CONFIG_MIGRATION */ 2472 2473 /* 2474 * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which 2475 * allows us to process the FOLL_LONGTERM flag. 2476 */ 2477 static long __gup_longterm_locked(struct mm_struct *mm, 2478 unsigned long start, 2479 unsigned long nr_pages, 2480 struct page **pages, 2481 int *locked, 2482 unsigned int gup_flags) 2483 { 2484 unsigned int flags; 2485 long rc, nr_pinned_pages; 2486 2487 if (!(gup_flags & FOLL_LONGTERM)) 2488 return __get_user_pages_locked(mm, start, nr_pages, pages, 2489 locked, gup_flags); 2490 2491 flags = memalloc_pin_save(); 2492 do { 2493 nr_pinned_pages = __get_user_pages_locked(mm, start, nr_pages, 2494 pages, locked, 2495 gup_flags); 2496 if (nr_pinned_pages <= 0) { 2497 rc = nr_pinned_pages; 2498 break; 2499 } 2500 2501 /* FOLL_LONGTERM implies FOLL_PIN */ 2502 rc = check_and_migrate_movable_pages(nr_pinned_pages, pages); 2503 } while (rc == -EAGAIN); 2504 memalloc_pin_restore(flags); 2505 return rc ? rc : nr_pinned_pages; 2506 } 2507 2508 /* 2509 * Check that the given flags are valid for the exported gup/pup interface, and 2510 * update them with the required flags that the caller must have set. 2511 */ 2512 static bool is_valid_gup_args(struct page **pages, int *locked, 2513 unsigned int *gup_flags_p, unsigned int to_set) 2514 { 2515 unsigned int gup_flags = *gup_flags_p; 2516 2517 /* 2518 * These flags not allowed to be specified externally to the gup 2519 * interfaces: 2520 * - FOLL_TOUCH/FOLL_PIN/FOLL_TRIED/FOLL_FAST_ONLY are internal only 2521 * - FOLL_REMOTE is internal only, set in (get|pin)_user_pages_remote() 2522 * - FOLL_UNLOCKABLE is internal only and used if locked is !NULL 2523 */ 2524 if (WARN_ON_ONCE(gup_flags & INTERNAL_GUP_FLAGS)) 2525 return false; 2526 2527 gup_flags |= to_set; 2528 if (locked) { 2529 /* At the external interface locked must be set */ 2530 if (WARN_ON_ONCE(*locked != 1)) 2531 return false; 2532 2533 gup_flags |= FOLL_UNLOCKABLE; 2534 } 2535 2536 /* FOLL_GET and FOLL_PIN are mutually exclusive. */ 2537 if (WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) == 2538 (FOLL_PIN | FOLL_GET))) 2539 return false; 2540 2541 /* LONGTERM can only be specified when pinning */ 2542 if (WARN_ON_ONCE(!(gup_flags & FOLL_PIN) && (gup_flags & FOLL_LONGTERM))) 2543 return false; 2544 2545 /* Pages input must be given if using GET/PIN */ 2546 if (WARN_ON_ONCE((gup_flags & (FOLL_GET | FOLL_PIN)) && !pages)) 2547 return false; 2548 2549 /* We want to allow the pgmap to be hot-unplugged at all times */ 2550 if (WARN_ON_ONCE((gup_flags & FOLL_LONGTERM) && 2551 (gup_flags & FOLL_PCI_P2PDMA))) 2552 return false; 2553 2554 *gup_flags_p = gup_flags; 2555 return true; 2556 } 2557 2558 #ifdef CONFIG_MMU 2559 /** 2560 * get_user_pages_remote() - pin user pages in memory 2561 * @mm: mm_struct of target mm 2562 * @start: starting user address 2563 * @nr_pages: number of pages from start to pin 2564 * @gup_flags: flags modifying lookup behaviour 2565 * @pages: array that receives pointers to the pages pinned. 2566 * Should be at least nr_pages long. Or NULL, if caller 2567 * only intends to ensure the pages are faulted in. 2568 * @locked: pointer to lock flag indicating whether lock is held and 2569 * subsequently whether VM_FAULT_RETRY functionality can be 2570 * utilised. Lock must initially be held. 2571 * 2572 * Returns either number of pages pinned (which may be less than the 2573 * number requested), or an error. Details about the return value: 2574 * 2575 * -- If nr_pages is 0, returns 0. 2576 * -- If nr_pages is >0, but no pages were pinned, returns -errno. 2577 * -- If nr_pages is >0, and some pages were pinned, returns the number of 2578 * pages pinned. Again, this may be less than nr_pages. 2579 * 2580 * The caller is responsible for releasing returned @pages, via put_page(). 2581 * 2582 * Must be called with mmap_lock held for read or write. 2583 * 2584 * get_user_pages_remote walks a process's page tables and takes a reference 2585 * to each struct page that each user address corresponds to at a given 2586 * instant. That is, it takes the page that would be accessed if a user 2587 * thread accesses the given user virtual address at that instant. 2588 * 2589 * This does not guarantee that the page exists in the user mappings when 2590 * get_user_pages_remote returns, and there may even be a completely different 2591 * page there in some cases (eg. if mmapped pagecache has been invalidated 2592 * and subsequently re-faulted). However it does guarantee that the page 2593 * won't be freed completely. And mostly callers simply care that the page 2594 * contains data that was valid *at some point in time*. Typically, an IO 2595 * or similar operation cannot guarantee anything stronger anyway because 2596 * locks can't be held over the syscall boundary. 2597 * 2598 * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page 2599 * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must 2600 * be called after the page is finished with, and before put_page is called. 2601 * 2602 * get_user_pages_remote is typically used for fewer-copy IO operations, 2603 * to get a handle on the memory by some means other than accesses 2604 * via the user virtual addresses. The pages may be submitted for 2605 * DMA to devices or accessed via their kernel linear mapping (via the 2606 * kmap APIs). Care should be taken to use the correct cache flushing APIs. 2607 * 2608 * See also get_user_pages_fast, for performance critical applications. 2609 * 2610 * get_user_pages_remote should be phased out in favor of 2611 * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing 2612 * should use get_user_pages_remote because it cannot pass 2613 * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault. 2614 */ 2615 long get_user_pages_remote(struct mm_struct *mm, 2616 unsigned long start, unsigned long nr_pages, 2617 unsigned int gup_flags, struct page **pages, 2618 int *locked) 2619 { 2620 int local_locked = 1; 2621 2622 if (!is_valid_gup_args(pages, locked, &gup_flags, 2623 FOLL_TOUCH | FOLL_REMOTE)) 2624 return -EINVAL; 2625 2626 return __get_user_pages_locked(mm, start, nr_pages, pages, 2627 locked ? locked : &local_locked, 2628 gup_flags); 2629 } 2630 EXPORT_SYMBOL(get_user_pages_remote); 2631 2632 #else /* CONFIG_MMU */ 2633 long get_user_pages_remote(struct mm_struct *mm, 2634 unsigned long start, unsigned long nr_pages, 2635 unsigned int gup_flags, struct page **pages, 2636 int *locked) 2637 { 2638 return 0; 2639 } 2640 #endif /* !CONFIG_MMU */ 2641 2642 /** 2643 * get_user_pages() - pin user pages in memory 2644 * @start: starting user address 2645 * @nr_pages: number of pages from start to pin 2646 * @gup_flags: flags modifying lookup behaviour 2647 * @pages: array that receives pointers to the pages pinned. 2648 * Should be at least nr_pages long. Or NULL, if caller 2649 * only intends to ensure the pages are faulted in. 2650 * 2651 * This is the same as get_user_pages_remote(), just with a less-flexible 2652 * calling convention where we assume that the mm being operated on belongs to 2653 * the current task, and doesn't allow passing of a locked parameter. We also 2654 * obviously don't pass FOLL_REMOTE in here. 2655 */ 2656 long get_user_pages(unsigned long start, unsigned long nr_pages, 2657 unsigned int gup_flags, struct page **pages) 2658 { 2659 int locked = 1; 2660 2661 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_TOUCH)) 2662 return -EINVAL; 2663 2664 return __get_user_pages_locked(current->mm, start, nr_pages, pages, 2665 &locked, gup_flags); 2666 } 2667 EXPORT_SYMBOL(get_user_pages); 2668 2669 /* 2670 * get_user_pages_unlocked() is suitable to replace the form: 2671 * 2672 * mmap_read_lock(mm); 2673 * get_user_pages(mm, ..., pages, NULL); 2674 * mmap_read_unlock(mm); 2675 * 2676 * with: 2677 * 2678 * get_user_pages_unlocked(mm, ..., pages); 2679 * 2680 * It is functionally equivalent to get_user_pages_fast so 2681 * get_user_pages_fast should be used instead if specific gup_flags 2682 * (e.g. FOLL_FORCE) are not required. 2683 */ 2684 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 2685 struct page **pages, unsigned int gup_flags) 2686 { 2687 int locked = 0; 2688 2689 if (!is_valid_gup_args(pages, NULL, &gup_flags, 2690 FOLL_TOUCH | FOLL_UNLOCKABLE)) 2691 return -EINVAL; 2692 2693 return __get_user_pages_locked(current->mm, start, nr_pages, pages, 2694 &locked, gup_flags); 2695 } 2696 EXPORT_SYMBOL(get_user_pages_unlocked); 2697 2698 /* 2699 * GUP-fast 2700 * 2701 * get_user_pages_fast attempts to pin user pages by walking the page 2702 * tables directly and avoids taking locks. Thus the walker needs to be 2703 * protected from page table pages being freed from under it, and should 2704 * block any THP splits. 2705 * 2706 * One way to achieve this is to have the walker disable interrupts, and 2707 * rely on IPIs from the TLB flushing code blocking before the page table 2708 * pages are freed. This is unsuitable for architectures that do not need 2709 * to broadcast an IPI when invalidating TLBs. 2710 * 2711 * Another way to achieve this is to batch up page table containing pages 2712 * belonging to more than one mm_user, then rcu_sched a callback to free those 2713 * pages. Disabling interrupts will allow the gup_fast() walker to both block 2714 * the rcu_sched callback, and an IPI that we broadcast for splitting THPs 2715 * (which is a relatively rare event). The code below adopts this strategy. 2716 * 2717 * Before activating this code, please be aware that the following assumptions 2718 * are currently made: 2719 * 2720 * *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to 2721 * free pages containing page tables or TLB flushing requires IPI broadcast. 2722 * 2723 * *) ptes can be read atomically by the architecture. 2724 * 2725 * *) valid user addesses are below TASK_MAX_SIZE 2726 * 2727 * The last two assumptions can be relaxed by the addition of helper functions. 2728 * 2729 * This code is based heavily on the PowerPC implementation by Nick Piggin. 2730 */ 2731 #ifdef CONFIG_HAVE_GUP_FAST 2732 /* 2733 * Used in the GUP-fast path to determine whether GUP is permitted to work on 2734 * a specific folio. 2735 * 2736 * This call assumes the caller has pinned the folio, that the lowest page table 2737 * level still points to this folio, and that interrupts have been disabled. 2738 * 2739 * GUP-fast must reject all secretmem folios. 2740 * 2741 * Writing to pinned file-backed dirty tracked folios is inherently problematic 2742 * (see comment describing the writable_file_mapping_allowed() function). We 2743 * therefore try to avoid the most egregious case of a long-term mapping doing 2744 * so. 2745 * 2746 * This function cannot be as thorough as that one as the VMA is not available 2747 * in the fast path, so instead we whitelist known good cases and if in doubt, 2748 * fall back to the slow path. 2749 */ 2750 static bool gup_fast_folio_allowed(struct folio *folio, unsigned int flags) 2751 { 2752 bool reject_file_backed = false; 2753 struct address_space *mapping; 2754 bool check_secretmem = false; 2755 unsigned long mapping_flags; 2756 2757 /* 2758 * If we aren't pinning then no problematic write can occur. A long term 2759 * pin is the most egregious case so this is the one we disallow. 2760 */ 2761 if ((flags & (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) == 2762 (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) 2763 reject_file_backed = true; 2764 2765 /* We hold a folio reference, so we can safely access folio fields. */ 2766 2767 /* secretmem folios are always order-0 folios. */ 2768 if (IS_ENABLED(CONFIG_SECRETMEM) && !folio_test_large(folio)) 2769 check_secretmem = true; 2770 2771 if (!reject_file_backed && !check_secretmem) 2772 return true; 2773 2774 if (WARN_ON_ONCE(folio_test_slab(folio))) 2775 return false; 2776 2777 /* hugetlb neither requires dirty-tracking nor can be secretmem. */ 2778 if (folio_test_hugetlb(folio)) 2779 return true; 2780 2781 /* 2782 * GUP-fast disables IRQs. When IRQS are disabled, RCU grace periods 2783 * cannot proceed, which means no actions performed under RCU can 2784 * proceed either. 2785 * 2786 * inodes and thus their mappings are freed under RCU, which means the 2787 * mapping cannot be freed beneath us and thus we can safely dereference 2788 * it. 2789 */ 2790 lockdep_assert_irqs_disabled(); 2791 2792 /* 2793 * However, there may be operations which _alter_ the mapping, so ensure 2794 * we read it once and only once. 2795 */ 2796 mapping = READ_ONCE(folio->mapping); 2797 2798 /* 2799 * The mapping may have been truncated, in any case we cannot determine 2800 * if this mapping is safe - fall back to slow path to determine how to 2801 * proceed. 2802 */ 2803 if (!mapping) 2804 return false; 2805 2806 /* Anonymous folios pose no problem. */ 2807 mapping_flags = (unsigned long)mapping & FOLIO_MAPPING_FLAGS; 2808 if (mapping_flags) 2809 return mapping_flags & FOLIO_MAPPING_ANON; 2810 2811 /* 2812 * At this point, we know the mapping is non-null and points to an 2813 * address_space object. 2814 */ 2815 if (check_secretmem && secretmem_mapping(mapping)) 2816 return false; 2817 /* The only remaining allowed file system is shmem. */ 2818 return !reject_file_backed || shmem_mapping(mapping); 2819 } 2820 2821 static void __maybe_unused gup_fast_undo_dev_pagemap(int *nr, int nr_start, 2822 unsigned int flags, struct page **pages) 2823 { 2824 while ((*nr) - nr_start) { 2825 struct folio *folio = page_folio(pages[--(*nr)]); 2826 2827 folio_clear_referenced(folio); 2828 gup_put_folio(folio, 1, flags); 2829 } 2830 } 2831 2832 #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL 2833 /* 2834 * GUP-fast relies on pte change detection to avoid concurrent pgtable 2835 * operations. 2836 * 2837 * To pin the page, GUP-fast needs to do below in order: 2838 * (1) pin the page (by prefetching pte), then (2) check pte not changed. 2839 * 2840 * For the rest of pgtable operations where pgtable updates can be racy 2841 * with GUP-fast, we need to do (1) clear pte, then (2) check whether page 2842 * is pinned. 2843 * 2844 * Above will work for all pte-level operations, including THP split. 2845 * 2846 * For THP collapse, it's a bit more complicated because GUP-fast may be 2847 * walking a pgtable page that is being freed (pte is still valid but pmd 2848 * can be cleared already). To avoid race in such condition, we need to 2849 * also check pmd here to make sure pmd doesn't change (corresponds to 2850 * pmdp_collapse_flush() in the THP collapse code path). 2851 */ 2852 static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, 2853 unsigned long end, unsigned int flags, struct page **pages, 2854 int *nr) 2855 { 2856 struct dev_pagemap *pgmap = NULL; 2857 int ret = 0; 2858 pte_t *ptep, *ptem; 2859 2860 ptem = ptep = pte_offset_map(&pmd, addr); 2861 if (!ptep) 2862 return 0; 2863 do { 2864 pte_t pte = ptep_get_lockless(ptep); 2865 struct page *page; 2866 struct folio *folio; 2867 2868 /* 2869 * Always fallback to ordinary GUP on PROT_NONE-mapped pages: 2870 * pte_access_permitted() better should reject these pages 2871 * either way: otherwise, GUP-fast might succeed in 2872 * cases where ordinary GUP would fail due to VMA access 2873 * permissions. 2874 */ 2875 if (pte_protnone(pte)) 2876 goto pte_unmap; 2877 2878 if (!pte_access_permitted(pte, flags & FOLL_WRITE)) 2879 goto pte_unmap; 2880 2881 if (pte_special(pte)) 2882 goto pte_unmap; 2883 2884 /* If it's not marked as special it must have a valid memmap. */ 2885 VM_WARN_ON_ONCE(!pfn_valid(pte_pfn(pte))); 2886 page = pte_page(pte); 2887 2888 folio = try_grab_folio_fast(page, 1, flags); 2889 if (!folio) 2890 goto pte_unmap; 2891 2892 if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) || 2893 unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) { 2894 gup_put_folio(folio, 1, flags); 2895 goto pte_unmap; 2896 } 2897 2898 if (!gup_fast_folio_allowed(folio, flags)) { 2899 gup_put_folio(folio, 1, flags); 2900 goto pte_unmap; 2901 } 2902 2903 if (!pte_write(pte) && gup_must_unshare(NULL, flags, page)) { 2904 gup_put_folio(folio, 1, flags); 2905 goto pte_unmap; 2906 } 2907 2908 /* 2909 * We need to make the page accessible if and only if we are 2910 * going to access its content (the FOLL_PIN case). Please 2911 * see Documentation/core-api/pin_user_pages.rst for 2912 * details. 2913 */ 2914 if (flags & FOLL_PIN) { 2915 ret = arch_make_folio_accessible(folio); 2916 if (ret) { 2917 gup_put_folio(folio, 1, flags); 2918 goto pte_unmap; 2919 } 2920 } 2921 folio_set_referenced(folio); 2922 pages[*nr] = page; 2923 (*nr)++; 2924 } while (ptep++, addr += PAGE_SIZE, addr != end); 2925 2926 ret = 1; 2927 2928 pte_unmap: 2929 if (pgmap) 2930 put_dev_pagemap(pgmap); 2931 pte_unmap(ptem); 2932 return ret; 2933 } 2934 #else 2935 2936 /* 2937 * If we can't determine whether or not a pte is special, then fail immediately 2938 * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not 2939 * to be special. 2940 * 2941 * For a futex to be placed on a THP tail page, get_futex_key requires a 2942 * get_user_pages_fast_only implementation that can pin pages. Thus it's still 2943 * useful to have gup_fast_pmd_leaf even if we can't operate on ptes. 2944 */ 2945 static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, 2946 unsigned long end, unsigned int flags, struct page **pages, 2947 int *nr) 2948 { 2949 return 0; 2950 } 2951 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ 2952 2953 static int gup_fast_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr, 2954 unsigned long end, unsigned int flags, struct page **pages, 2955 int *nr) 2956 { 2957 struct page *page; 2958 struct folio *folio; 2959 int refs; 2960 2961 if (!pmd_access_permitted(orig, flags & FOLL_WRITE)) 2962 return 0; 2963 2964 if (pmd_special(orig)) 2965 return 0; 2966 2967 page = pmd_page(orig); 2968 refs = record_subpages(page, PMD_SIZE, addr, end, pages + *nr); 2969 2970 folio = try_grab_folio_fast(page, refs, flags); 2971 if (!folio) 2972 return 0; 2973 2974 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { 2975 gup_put_folio(folio, refs, flags); 2976 return 0; 2977 } 2978 2979 if (!gup_fast_folio_allowed(folio, flags)) { 2980 gup_put_folio(folio, refs, flags); 2981 return 0; 2982 } 2983 if (!pmd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) { 2984 gup_put_folio(folio, refs, flags); 2985 return 0; 2986 } 2987 2988 *nr += refs; 2989 folio_set_referenced(folio); 2990 return 1; 2991 } 2992 2993 static int gup_fast_pud_leaf(pud_t orig, pud_t *pudp, unsigned long addr, 2994 unsigned long end, unsigned int flags, struct page **pages, 2995 int *nr) 2996 { 2997 struct page *page; 2998 struct folio *folio; 2999 int refs; 3000 3001 if (!pud_access_permitted(orig, flags & FOLL_WRITE)) 3002 return 0; 3003 3004 if (pud_special(orig)) 3005 return 0; 3006 3007 page = pud_page(orig); 3008 refs = record_subpages(page, PUD_SIZE, addr, end, pages + *nr); 3009 3010 folio = try_grab_folio_fast(page, refs, flags); 3011 if (!folio) 3012 return 0; 3013 3014 if (unlikely(pud_val(orig) != pud_val(*pudp))) { 3015 gup_put_folio(folio, refs, flags); 3016 return 0; 3017 } 3018 3019 if (!gup_fast_folio_allowed(folio, flags)) { 3020 gup_put_folio(folio, refs, flags); 3021 return 0; 3022 } 3023 3024 if (!pud_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) { 3025 gup_put_folio(folio, refs, flags); 3026 return 0; 3027 } 3028 3029 *nr += refs; 3030 folio_set_referenced(folio); 3031 return 1; 3032 } 3033 3034 static int gup_fast_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, 3035 unsigned long end, unsigned int flags, struct page **pages, 3036 int *nr) 3037 { 3038 unsigned long next; 3039 pmd_t *pmdp; 3040 3041 pmdp = pmd_offset_lockless(pudp, pud, addr); 3042 do { 3043 pmd_t pmd = pmdp_get_lockless(pmdp); 3044 3045 next = pmd_addr_end(addr, end); 3046 if (!pmd_present(pmd)) 3047 return 0; 3048 3049 if (unlikely(pmd_leaf(pmd))) { 3050 /* See gup_fast_pte_range() */ 3051 if (pmd_protnone(pmd)) 3052 return 0; 3053 3054 if (!gup_fast_pmd_leaf(pmd, pmdp, addr, next, flags, 3055 pages, nr)) 3056 return 0; 3057 3058 } else if (!gup_fast_pte_range(pmd, pmdp, addr, next, flags, 3059 pages, nr)) 3060 return 0; 3061 } while (pmdp++, addr = next, addr != end); 3062 3063 return 1; 3064 } 3065 3066 static int gup_fast_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, 3067 unsigned long end, unsigned int flags, struct page **pages, 3068 int *nr) 3069 { 3070 unsigned long next; 3071 pud_t *pudp; 3072 3073 pudp = pud_offset_lockless(p4dp, p4d, addr); 3074 do { 3075 pud_t pud = READ_ONCE(*pudp); 3076 3077 next = pud_addr_end(addr, end); 3078 if (unlikely(!pud_present(pud))) 3079 return 0; 3080 if (unlikely(pud_leaf(pud))) { 3081 if (!gup_fast_pud_leaf(pud, pudp, addr, next, flags, 3082 pages, nr)) 3083 return 0; 3084 } else if (!gup_fast_pmd_range(pudp, pud, addr, next, flags, 3085 pages, nr)) 3086 return 0; 3087 } while (pudp++, addr = next, addr != end); 3088 3089 return 1; 3090 } 3091 3092 static int gup_fast_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, 3093 unsigned long end, unsigned int flags, struct page **pages, 3094 int *nr) 3095 { 3096 unsigned long next; 3097 p4d_t *p4dp; 3098 3099 p4dp = p4d_offset_lockless(pgdp, pgd, addr); 3100 do { 3101 p4d_t p4d = READ_ONCE(*p4dp); 3102 3103 next = p4d_addr_end(addr, end); 3104 if (!p4d_present(p4d)) 3105 return 0; 3106 BUILD_BUG_ON(p4d_leaf(p4d)); 3107 if (!gup_fast_pud_range(p4dp, p4d, addr, next, flags, 3108 pages, nr)) 3109 return 0; 3110 } while (p4dp++, addr = next, addr != end); 3111 3112 return 1; 3113 } 3114 3115 static void gup_fast_pgd_range(unsigned long addr, unsigned long end, 3116 unsigned int flags, struct page **pages, int *nr) 3117 { 3118 unsigned long next; 3119 pgd_t *pgdp; 3120 3121 pgdp = pgd_offset(current->mm, addr); 3122 do { 3123 pgd_t pgd = READ_ONCE(*pgdp); 3124 3125 next = pgd_addr_end(addr, end); 3126 if (pgd_none(pgd)) 3127 return; 3128 BUILD_BUG_ON(pgd_leaf(pgd)); 3129 if (!gup_fast_p4d_range(pgdp, pgd, addr, next, flags, 3130 pages, nr)) 3131 return; 3132 } while (pgdp++, addr = next, addr != end); 3133 } 3134 #else 3135 static inline void gup_fast_pgd_range(unsigned long addr, unsigned long end, 3136 unsigned int flags, struct page **pages, int *nr) 3137 { 3138 } 3139 #endif /* CONFIG_HAVE_GUP_FAST */ 3140 3141 #ifndef gup_fast_permitted 3142 /* 3143 * Check if it's allowed to use get_user_pages_fast_only() for the range, or 3144 * we need to fall back to the slow version: 3145 */ 3146 static bool gup_fast_permitted(unsigned long start, unsigned long end) 3147 { 3148 return true; 3149 } 3150 #endif 3151 3152 static unsigned long gup_fast(unsigned long start, unsigned long end, 3153 unsigned int gup_flags, struct page **pages) 3154 { 3155 unsigned long flags; 3156 int nr_pinned = 0; 3157 unsigned seq; 3158 3159 if (!IS_ENABLED(CONFIG_HAVE_GUP_FAST) || 3160 !gup_fast_permitted(start, end)) 3161 return 0; 3162 3163 if (gup_flags & FOLL_PIN) { 3164 if (!raw_seqcount_try_begin(¤t->mm->write_protect_seq, seq)) 3165 return 0; 3166 } 3167 3168 /* 3169 * Disable interrupts. The nested form is used, in order to allow full, 3170 * general purpose use of this routine. 3171 * 3172 * With interrupts disabled, we block page table pages from being freed 3173 * from under us. See struct mmu_table_batch comments in 3174 * include/asm-generic/tlb.h for more details. 3175 * 3176 * We do not adopt an rcu_read_lock() here as we also want to block IPIs 3177 * that come from callers of tlb_remove_table_sync_one(). 3178 */ 3179 local_irq_save(flags); 3180 gup_fast_pgd_range(start, end, gup_flags, pages, &nr_pinned); 3181 local_irq_restore(flags); 3182 3183 /* 3184 * When pinning pages for DMA there could be a concurrent write protect 3185 * from fork() via copy_page_range(), in this case always fail GUP-fast. 3186 */ 3187 if (gup_flags & FOLL_PIN) { 3188 if (read_seqcount_retry(¤t->mm->write_protect_seq, seq)) { 3189 gup_fast_unpin_user_pages(pages, nr_pinned); 3190 return 0; 3191 } else { 3192 sanity_check_pinned_pages(pages, nr_pinned); 3193 } 3194 } 3195 return nr_pinned; 3196 } 3197 3198 static int gup_fast_fallback(unsigned long start, unsigned long nr_pages, 3199 unsigned int gup_flags, struct page **pages) 3200 { 3201 unsigned long len, end; 3202 unsigned long nr_pinned; 3203 int locked = 0; 3204 int ret; 3205 3206 if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM | 3207 FOLL_FORCE | FOLL_PIN | FOLL_GET | 3208 FOLL_FAST_ONLY | FOLL_NOFAULT | 3209 FOLL_PCI_P2PDMA | FOLL_HONOR_NUMA_FAULT))) 3210 return -EINVAL; 3211 3212 if (gup_flags & FOLL_PIN) 3213 mm_set_has_pinned_flag(¤t->mm->flags); 3214 3215 if (!(gup_flags & FOLL_FAST_ONLY)) 3216 might_lock_read(¤t->mm->mmap_lock); 3217 3218 start = untagged_addr(start) & PAGE_MASK; 3219 len = nr_pages << PAGE_SHIFT; 3220 if (check_add_overflow(start, len, &end)) 3221 return -EOVERFLOW; 3222 if (end > TASK_SIZE_MAX) 3223 return -EFAULT; 3224 3225 nr_pinned = gup_fast(start, end, gup_flags, pages); 3226 if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY) 3227 return nr_pinned; 3228 3229 /* Slow path: try to get the remaining pages with get_user_pages */ 3230 start += nr_pinned << PAGE_SHIFT; 3231 pages += nr_pinned; 3232 ret = __gup_longterm_locked(current->mm, start, nr_pages - nr_pinned, 3233 pages, &locked, 3234 gup_flags | FOLL_TOUCH | FOLL_UNLOCKABLE); 3235 if (ret < 0) { 3236 /* 3237 * The caller has to unpin the pages we already pinned so 3238 * returning -errno is not an option 3239 */ 3240 if (nr_pinned) 3241 return nr_pinned; 3242 return ret; 3243 } 3244 return ret + nr_pinned; 3245 } 3246 3247 /** 3248 * get_user_pages_fast_only() - pin user pages in memory 3249 * @start: starting user address 3250 * @nr_pages: number of pages from start to pin 3251 * @gup_flags: flags modifying pin behaviour 3252 * @pages: array that receives pointers to the pages pinned. 3253 * Should be at least nr_pages long. 3254 * 3255 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to 3256 * the regular GUP. 3257 * 3258 * If the architecture does not support this function, simply return with no 3259 * pages pinned. 3260 * 3261 * Careful, careful! COW breaking can go either way, so a non-write 3262 * access can get ambiguous page results. If you call this function without 3263 * 'write' set, you'd better be sure that you're ok with that ambiguity. 3264 */ 3265 int get_user_pages_fast_only(unsigned long start, int nr_pages, 3266 unsigned int gup_flags, struct page **pages) 3267 { 3268 /* 3269 * Internally (within mm/gup.c), gup fast variants must set FOLL_GET, 3270 * because gup fast is always a "pin with a +1 page refcount" request. 3271 * 3272 * FOLL_FAST_ONLY is required in order to match the API description of 3273 * this routine: no fall back to regular ("slow") GUP. 3274 */ 3275 if (!is_valid_gup_args(pages, NULL, &gup_flags, 3276 FOLL_GET | FOLL_FAST_ONLY)) 3277 return -EINVAL; 3278 3279 return gup_fast_fallback(start, nr_pages, gup_flags, pages); 3280 } 3281 EXPORT_SYMBOL_GPL(get_user_pages_fast_only); 3282 3283 /** 3284 * get_user_pages_fast() - pin user pages in memory 3285 * @start: starting user address 3286 * @nr_pages: number of pages from start to pin 3287 * @gup_flags: flags modifying pin behaviour 3288 * @pages: array that receives pointers to the pages pinned. 3289 * Should be at least nr_pages long. 3290 * 3291 * Attempt to pin user pages in memory without taking mm->mmap_lock. 3292 * If not successful, it will fall back to taking the lock and 3293 * calling get_user_pages(). 3294 * 3295 * Returns number of pages pinned. This may be fewer than the number requested. 3296 * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns 3297 * -errno. 3298 */ 3299 int get_user_pages_fast(unsigned long start, int nr_pages, 3300 unsigned int gup_flags, struct page **pages) 3301 { 3302 /* 3303 * The caller may or may not have explicitly set FOLL_GET; either way is 3304 * OK. However, internally (within mm/gup.c), gup fast variants must set 3305 * FOLL_GET, because gup fast is always a "pin with a +1 page refcount" 3306 * request. 3307 */ 3308 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_GET)) 3309 return -EINVAL; 3310 return gup_fast_fallback(start, nr_pages, gup_flags, pages); 3311 } 3312 EXPORT_SYMBOL_GPL(get_user_pages_fast); 3313 3314 /** 3315 * pin_user_pages_fast() - pin user pages in memory without taking locks 3316 * 3317 * @start: starting user address 3318 * @nr_pages: number of pages from start to pin 3319 * @gup_flags: flags modifying pin behaviour 3320 * @pages: array that receives pointers to the pages pinned. 3321 * Should be at least nr_pages long. 3322 * 3323 * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See 3324 * get_user_pages_fast() for documentation on the function arguments, because 3325 * the arguments here are identical. 3326 * 3327 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please 3328 * see Documentation/core-api/pin_user_pages.rst for further details. 3329 * 3330 * Note that if a zero_page is amongst the returned pages, it will not have 3331 * pins in it and unpin_user_page() will not remove pins from it. 3332 */ 3333 int pin_user_pages_fast(unsigned long start, int nr_pages, 3334 unsigned int gup_flags, struct page **pages) 3335 { 3336 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN)) 3337 return -EINVAL; 3338 return gup_fast_fallback(start, nr_pages, gup_flags, pages); 3339 } 3340 EXPORT_SYMBOL_GPL(pin_user_pages_fast); 3341 3342 /** 3343 * pin_user_pages_remote() - pin pages of a remote process 3344 * 3345 * @mm: mm_struct of target mm 3346 * @start: starting user address 3347 * @nr_pages: number of pages from start to pin 3348 * @gup_flags: flags modifying lookup behaviour 3349 * @pages: array that receives pointers to the pages pinned. 3350 * Should be at least nr_pages long. 3351 * @locked: pointer to lock flag indicating whether lock is held and 3352 * subsequently whether VM_FAULT_RETRY functionality can be 3353 * utilised. Lock must initially be held. 3354 * 3355 * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See 3356 * get_user_pages_remote() for documentation on the function arguments, because 3357 * the arguments here are identical. 3358 * 3359 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please 3360 * see Documentation/core-api/pin_user_pages.rst for details. 3361 * 3362 * Note that if a zero_page is amongst the returned pages, it will not have 3363 * pins in it and unpin_user_page*() will not remove pins from it. 3364 */ 3365 long pin_user_pages_remote(struct mm_struct *mm, 3366 unsigned long start, unsigned long nr_pages, 3367 unsigned int gup_flags, struct page **pages, 3368 int *locked) 3369 { 3370 int local_locked = 1; 3371 3372 if (!is_valid_gup_args(pages, locked, &gup_flags, 3373 FOLL_PIN | FOLL_TOUCH | FOLL_REMOTE)) 3374 return 0; 3375 return __gup_longterm_locked(mm, start, nr_pages, pages, 3376 locked ? locked : &local_locked, 3377 gup_flags); 3378 } 3379 EXPORT_SYMBOL(pin_user_pages_remote); 3380 3381 /** 3382 * pin_user_pages() - pin user pages in memory for use by other devices 3383 * 3384 * @start: starting user address 3385 * @nr_pages: number of pages from start to pin 3386 * @gup_flags: flags modifying lookup behaviour 3387 * @pages: array that receives pointers to the pages pinned. 3388 * Should be at least nr_pages long. 3389 * 3390 * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and 3391 * FOLL_PIN is set. 3392 * 3393 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please 3394 * see Documentation/core-api/pin_user_pages.rst for details. 3395 * 3396 * Note that if a zero_page is amongst the returned pages, it will not have 3397 * pins in it and unpin_user_page*() will not remove pins from it. 3398 */ 3399 long pin_user_pages(unsigned long start, unsigned long nr_pages, 3400 unsigned int gup_flags, struct page **pages) 3401 { 3402 int locked = 1; 3403 3404 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN)) 3405 return 0; 3406 return __gup_longterm_locked(current->mm, start, nr_pages, 3407 pages, &locked, gup_flags); 3408 } 3409 EXPORT_SYMBOL(pin_user_pages); 3410 3411 /* 3412 * pin_user_pages_unlocked() is the FOLL_PIN variant of 3413 * get_user_pages_unlocked(). Behavior is the same, except that this one sets 3414 * FOLL_PIN and rejects FOLL_GET. 3415 * 3416 * Note that if a zero_page is amongst the returned pages, it will not have 3417 * pins in it and unpin_user_page*() will not remove pins from it. 3418 */ 3419 long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 3420 struct page **pages, unsigned int gup_flags) 3421 { 3422 int locked = 0; 3423 3424 if (!is_valid_gup_args(pages, NULL, &gup_flags, 3425 FOLL_PIN | FOLL_TOUCH | FOLL_UNLOCKABLE)) 3426 return 0; 3427 3428 return __gup_longterm_locked(current->mm, start, nr_pages, pages, 3429 &locked, gup_flags); 3430 } 3431 EXPORT_SYMBOL(pin_user_pages_unlocked); 3432 3433 /** 3434 * memfd_pin_folios() - pin folios associated with a memfd 3435 * @memfd: the memfd whose folios are to be pinned 3436 * @start: the first memfd offset 3437 * @end: the last memfd offset (inclusive) 3438 * @folios: array that receives pointers to the folios pinned 3439 * @max_folios: maximum number of entries in @folios 3440 * @offset: the offset into the first folio 3441 * 3442 * Attempt to pin folios associated with a memfd in the contiguous range 3443 * [start, end]. Given that a memfd is either backed by shmem or hugetlb, 3444 * the folios can either be found in the page cache or need to be allocated 3445 * if necessary. Once the folios are located, they are all pinned via 3446 * FOLL_PIN and @offset is populatedwith the offset into the first folio. 3447 * And, eventually, these pinned folios must be released either using 3448 * unpin_folios() or unpin_folio(). 3449 * 3450 * It must be noted that the folios may be pinned for an indefinite amount 3451 * of time. And, in most cases, the duration of time they may stay pinned 3452 * would be controlled by the userspace. This behavior is effectively the 3453 * same as using FOLL_LONGTERM with other GUP APIs. 3454 * 3455 * Returns number of folios pinned, which could be less than @max_folios 3456 * as it depends on the folio sizes that cover the range [start, end]. 3457 * If no folios were pinned, it returns -errno. 3458 */ 3459 long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end, 3460 struct folio **folios, unsigned int max_folios, 3461 pgoff_t *offset) 3462 { 3463 unsigned int flags, nr_folios, nr_found; 3464 unsigned int i, pgshift = PAGE_SHIFT; 3465 pgoff_t start_idx, end_idx; 3466 struct folio *folio = NULL; 3467 struct folio_batch fbatch; 3468 struct hstate *h; 3469 long ret = -EINVAL; 3470 3471 if (start < 0 || start > end || !max_folios) 3472 return -EINVAL; 3473 3474 if (!memfd) 3475 return -EINVAL; 3476 3477 if (!shmem_file(memfd) && !is_file_hugepages(memfd)) 3478 return -EINVAL; 3479 3480 if (end >= i_size_read(file_inode(memfd))) 3481 return -EINVAL; 3482 3483 if (is_file_hugepages(memfd)) { 3484 h = hstate_file(memfd); 3485 pgshift = huge_page_shift(h); 3486 } 3487 3488 flags = memalloc_pin_save(); 3489 do { 3490 nr_folios = 0; 3491 start_idx = start >> pgshift; 3492 end_idx = end >> pgshift; 3493 if (is_file_hugepages(memfd)) { 3494 start_idx <<= huge_page_order(h); 3495 end_idx <<= huge_page_order(h); 3496 } 3497 3498 folio_batch_init(&fbatch); 3499 while (start_idx <= end_idx && nr_folios < max_folios) { 3500 /* 3501 * In most cases, we should be able to find the folios 3502 * in the page cache. If we cannot find them for some 3503 * reason, we try to allocate them and add them to the 3504 * page cache. 3505 */ 3506 nr_found = filemap_get_folios_contig(memfd->f_mapping, 3507 &start_idx, 3508 end_idx, 3509 &fbatch); 3510 if (folio) { 3511 folio_put(folio); 3512 folio = NULL; 3513 } 3514 3515 for (i = 0; i < nr_found; i++) { 3516 folio = fbatch.folios[i]; 3517 3518 if (try_grab_folio(folio, 1, FOLL_PIN)) { 3519 folio_batch_release(&fbatch); 3520 ret = -EINVAL; 3521 goto err; 3522 } 3523 3524 if (nr_folios == 0) 3525 *offset = offset_in_folio(folio, start); 3526 3527 folios[nr_folios] = folio; 3528 if (++nr_folios == max_folios) 3529 break; 3530 } 3531 3532 folio = NULL; 3533 folio_batch_release(&fbatch); 3534 if (!nr_found) { 3535 folio = memfd_alloc_folio(memfd, start_idx); 3536 if (IS_ERR(folio)) { 3537 ret = PTR_ERR(folio); 3538 if (ret != -EEXIST) 3539 goto err; 3540 folio = NULL; 3541 } 3542 } 3543 } 3544 3545 ret = check_and_migrate_movable_folios(nr_folios, folios); 3546 } while (ret == -EAGAIN); 3547 3548 memalloc_pin_restore(flags); 3549 return ret ? ret : nr_folios; 3550 err: 3551 memalloc_pin_restore(flags); 3552 unpin_folios(folios, nr_folios); 3553 3554 return ret; 3555 } 3556 EXPORT_SYMBOL_GPL(memfd_pin_folios); 3557 3558 /** 3559 * folio_add_pins() - add pins to an already-pinned folio 3560 * @folio: the folio to add more pins to 3561 * @pins: number of pins to add 3562 * 3563 * Try to add more pins to an already-pinned folio. The semantics 3564 * of the pin (e.g., FOLL_WRITE) follow any existing pin and cannot 3565 * be changed. 3566 * 3567 * This function is helpful when having obtained a pin on a large folio 3568 * using memfd_pin_folios(), but wanting to logically unpin parts 3569 * (e.g., individual pages) of the folio later, for example, using 3570 * unpin_user_page_range_dirty_lock(). 3571 * 3572 * This is not the right interface to initially pin a folio. 3573 */ 3574 int folio_add_pins(struct folio *folio, unsigned int pins) 3575 { 3576 VM_WARN_ON_ONCE(!folio_maybe_dma_pinned(folio)); 3577 3578 return try_grab_folio(folio, pins, FOLL_PIN); 3579 } 3580 EXPORT_SYMBOL_GPL(folio_add_pins); 3581