1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/err.h> 5 #include <linux/spinlock.h> 6 7 #include <linux/mm.h> 8 #include <linux/memremap.h> 9 #include <linux/pagemap.h> 10 #include <linux/rmap.h> 11 #include <linux/swap.h> 12 #include <linux/swapops.h> 13 #include <linux/secretmem.h> 14 15 #include <linux/sched/signal.h> 16 #include <linux/rwsem.h> 17 #include <linux/hugetlb.h> 18 #include <linux/migrate.h> 19 #include <linux/mm_inline.h> 20 #include <linux/sched/mm.h> 21 22 #include <asm/mmu_context.h> 23 #include <asm/tlbflush.h> 24 25 #include "internal.h" 26 27 struct follow_page_context { 28 struct dev_pagemap *pgmap; 29 unsigned int page_mask; 30 }; 31 32 static void hpage_pincount_add(struct page *page, int refs) 33 { 34 VM_BUG_ON_PAGE(!hpage_pincount_available(page), page); 35 VM_BUG_ON_PAGE(page != compound_head(page), page); 36 37 atomic_add(refs, compound_pincount_ptr(page)); 38 } 39 40 static void hpage_pincount_sub(struct page *page, int refs) 41 { 42 VM_BUG_ON_PAGE(!hpage_pincount_available(page), page); 43 VM_BUG_ON_PAGE(page != compound_head(page), page); 44 45 atomic_sub(refs, compound_pincount_ptr(page)); 46 } 47 48 /* Equivalent to calling put_page() @refs times. */ 49 static void put_page_refs(struct page *page, int refs) 50 { 51 #ifdef CONFIG_DEBUG_VM 52 if (VM_WARN_ON_ONCE_PAGE(page_ref_count(page) < refs, page)) 53 return; 54 #endif 55 56 /* 57 * Calling put_page() for each ref is unnecessarily slow. Only the last 58 * ref needs a put_page(). 59 */ 60 if (refs > 1) 61 page_ref_sub(page, refs - 1); 62 put_page(page); 63 } 64 65 /* 66 * Return the compound head page with ref appropriately incremented, 67 * or NULL if that failed. 68 */ 69 static inline struct page *try_get_compound_head(struct page *page, int refs) 70 { 71 struct page *head = compound_head(page); 72 73 if (WARN_ON_ONCE(page_ref_count(head) < 0)) 74 return NULL; 75 if (unlikely(!page_cache_add_speculative(head, refs))) 76 return NULL; 77 78 /* 79 * At this point we have a stable reference to the head page; but it 80 * could be that between the compound_head() lookup and the refcount 81 * increment, the compound page was split, in which case we'd end up 82 * holding a reference on a page that has nothing to do with the page 83 * we were given anymore. 84 * So now that the head page is stable, recheck that the pages still 85 * belong together. 86 */ 87 if (unlikely(compound_head(page) != head)) { 88 put_page_refs(head, refs); 89 return NULL; 90 } 91 92 return head; 93 } 94 95 /** 96 * try_grab_compound_head() - attempt to elevate a page's refcount, by a 97 * flags-dependent amount. 98 * 99 * Even though the name includes "compound_head", this function is still 100 * appropriate for callers that have a non-compound @page to get. 101 * 102 * @page: pointer to page to be grabbed 103 * @refs: the value to (effectively) add to the page's refcount 104 * @flags: gup flags: these are the FOLL_* flag values. 105 * 106 * "grab" names in this file mean, "look at flags to decide whether to use 107 * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount. 108 * 109 * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the 110 * same time. (That's true throughout the get_user_pages*() and 111 * pin_user_pages*() APIs.) Cases: 112 * 113 * FOLL_GET: page's refcount will be incremented by @refs. 114 * 115 * FOLL_PIN on compound pages that are > two pages long: page's refcount will 116 * be incremented by @refs, and page[2].hpage_pinned_refcount will be 117 * incremented by @refs * GUP_PIN_COUNTING_BIAS. 118 * 119 * FOLL_PIN on normal pages, or compound pages that are two pages long: 120 * page's refcount will be incremented by @refs * GUP_PIN_COUNTING_BIAS. 121 * 122 * Return: head page (with refcount appropriately incremented) for success, or 123 * NULL upon failure. If neither FOLL_GET nor FOLL_PIN was set, that's 124 * considered failure, and furthermore, a likely bug in the caller, so a warning 125 * is also emitted. 126 */ 127 struct page *try_grab_compound_head(struct page *page, 128 int refs, unsigned int flags) 129 { 130 if (flags & FOLL_GET) 131 return try_get_compound_head(page, refs); 132 else if (flags & FOLL_PIN) { 133 /* 134 * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a 135 * right zone, so fail and let the caller fall back to the slow 136 * path. 137 */ 138 if (unlikely((flags & FOLL_LONGTERM) && 139 !is_pinnable_page(page))) 140 return NULL; 141 142 /* 143 * CAUTION: Don't use compound_head() on the page before this 144 * point, the result won't be stable. 145 */ 146 page = try_get_compound_head(page, refs); 147 if (!page) 148 return NULL; 149 150 /* 151 * When pinning a compound page of order > 1 (which is what 152 * hpage_pincount_available() checks for), use an exact count to 153 * track it, via hpage_pincount_add/_sub(). 154 * 155 * However, be sure to *also* increment the normal page refcount 156 * field at least once, so that the page really is pinned. 157 * That's why the refcount from the earlier 158 * try_get_compound_head() is left intact. 159 */ 160 if (hpage_pincount_available(page)) 161 hpage_pincount_add(page, refs); 162 else 163 page_ref_add(page, refs * (GUP_PIN_COUNTING_BIAS - 1)); 164 165 mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED, 166 refs); 167 168 return page; 169 } 170 171 WARN_ON_ONCE(1); 172 return NULL; 173 } 174 175 static void put_compound_head(struct page *page, int refs, unsigned int flags) 176 { 177 if (flags & FOLL_PIN) { 178 mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_RELEASED, 179 refs); 180 181 if (hpage_pincount_available(page)) 182 hpage_pincount_sub(page, refs); 183 else 184 refs *= GUP_PIN_COUNTING_BIAS; 185 } 186 187 put_page_refs(page, refs); 188 } 189 190 /** 191 * try_grab_page() - elevate a page's refcount by a flag-dependent amount 192 * 193 * This might not do anything at all, depending on the flags argument. 194 * 195 * "grab" names in this file mean, "look at flags to decide whether to use 196 * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount. 197 * 198 * @page: pointer to page to be grabbed 199 * @flags: gup flags: these are the FOLL_* flag values. 200 * 201 * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same 202 * time. Cases: please see the try_grab_compound_head() documentation, with 203 * "refs=1". 204 * 205 * Return: true for success, or if no action was required (if neither FOLL_PIN 206 * nor FOLL_GET was set, nothing is done). False for failure: FOLL_GET or 207 * FOLL_PIN was set, but the page could not be grabbed. 208 */ 209 bool __must_check try_grab_page(struct page *page, unsigned int flags) 210 { 211 if (!(flags & (FOLL_GET | FOLL_PIN))) 212 return true; 213 214 return try_grab_compound_head(page, 1, flags); 215 } 216 217 /** 218 * unpin_user_page() - release a dma-pinned page 219 * @page: pointer to page to be released 220 * 221 * Pages that were pinned via pin_user_pages*() must be released via either 222 * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so 223 * that such pages can be separately tracked and uniquely handled. In 224 * particular, interactions with RDMA and filesystems need special handling. 225 */ 226 void unpin_user_page(struct page *page) 227 { 228 put_compound_head(compound_head(page), 1, FOLL_PIN); 229 } 230 EXPORT_SYMBOL(unpin_user_page); 231 232 static inline void compound_range_next(unsigned long i, unsigned long npages, 233 struct page **list, struct page **head, 234 unsigned int *ntails) 235 { 236 struct page *next, *page; 237 unsigned int nr = 1; 238 239 if (i >= npages) 240 return; 241 242 next = *list + i; 243 page = compound_head(next); 244 if (PageCompound(page) && compound_order(page) >= 1) 245 nr = min_t(unsigned int, 246 page + compound_nr(page) - next, npages - i); 247 248 *head = page; 249 *ntails = nr; 250 } 251 252 #define for_each_compound_range(__i, __list, __npages, __head, __ntails) \ 253 for (__i = 0, \ 254 compound_range_next(__i, __npages, __list, &(__head), &(__ntails)); \ 255 __i < __npages; __i += __ntails, \ 256 compound_range_next(__i, __npages, __list, &(__head), &(__ntails))) 257 258 static inline void compound_next(unsigned long i, unsigned long npages, 259 struct page **list, struct page **head, 260 unsigned int *ntails) 261 { 262 struct page *page; 263 unsigned int nr; 264 265 if (i >= npages) 266 return; 267 268 page = compound_head(list[i]); 269 for (nr = i + 1; nr < npages; nr++) { 270 if (compound_head(list[nr]) != page) 271 break; 272 } 273 274 *head = page; 275 *ntails = nr - i; 276 } 277 278 #define for_each_compound_head(__i, __list, __npages, __head, __ntails) \ 279 for (__i = 0, \ 280 compound_next(__i, __npages, __list, &(__head), &(__ntails)); \ 281 __i < __npages; __i += __ntails, \ 282 compound_next(__i, __npages, __list, &(__head), &(__ntails))) 283 284 /** 285 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages 286 * @pages: array of pages to be maybe marked dirty, and definitely released. 287 * @npages: number of pages in the @pages array. 288 * @make_dirty: whether to mark the pages dirty 289 * 290 * "gup-pinned page" refers to a page that has had one of the get_user_pages() 291 * variants called on that page. 292 * 293 * For each page in the @pages array, make that page (or its head page, if a 294 * compound page) dirty, if @make_dirty is true, and if the page was previously 295 * listed as clean. In any case, releases all pages using unpin_user_page(), 296 * possibly via unpin_user_pages(), for the non-dirty case. 297 * 298 * Please see the unpin_user_page() documentation for details. 299 * 300 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is 301 * required, then the caller should a) verify that this is really correct, 302 * because _lock() is usually required, and b) hand code it: 303 * set_page_dirty_lock(), unpin_user_page(). 304 * 305 */ 306 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, 307 bool make_dirty) 308 { 309 unsigned long index; 310 struct page *head; 311 unsigned int ntails; 312 313 if (!make_dirty) { 314 unpin_user_pages(pages, npages); 315 return; 316 } 317 318 for_each_compound_head(index, pages, npages, head, ntails) { 319 /* 320 * Checking PageDirty at this point may race with 321 * clear_page_dirty_for_io(), but that's OK. Two key 322 * cases: 323 * 324 * 1) This code sees the page as already dirty, so it 325 * skips the call to set_page_dirty(). That could happen 326 * because clear_page_dirty_for_io() called 327 * page_mkclean(), followed by set_page_dirty(). 328 * However, now the page is going to get written back, 329 * which meets the original intention of setting it 330 * dirty, so all is well: clear_page_dirty_for_io() goes 331 * on to call TestClearPageDirty(), and write the page 332 * back. 333 * 334 * 2) This code sees the page as clean, so it calls 335 * set_page_dirty(). The page stays dirty, despite being 336 * written back, so it gets written back again in the 337 * next writeback cycle. This is harmless. 338 */ 339 if (!PageDirty(head)) 340 set_page_dirty_lock(head); 341 put_compound_head(head, ntails, FOLL_PIN); 342 } 343 } 344 EXPORT_SYMBOL(unpin_user_pages_dirty_lock); 345 346 /** 347 * unpin_user_page_range_dirty_lock() - release and optionally dirty 348 * gup-pinned page range 349 * 350 * @page: the starting page of a range maybe marked dirty, and definitely released. 351 * @npages: number of consecutive pages to release. 352 * @make_dirty: whether to mark the pages dirty 353 * 354 * "gup-pinned page range" refers to a range of pages that has had one of the 355 * pin_user_pages() variants called on that page. 356 * 357 * For the page ranges defined by [page .. page+npages], make that range (or 358 * its head pages, if a compound page) dirty, if @make_dirty is true, and if the 359 * page range was previously listed as clean. 360 * 361 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is 362 * required, then the caller should a) verify that this is really correct, 363 * because _lock() is usually required, and b) hand code it: 364 * set_page_dirty_lock(), unpin_user_page(). 365 * 366 */ 367 void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, 368 bool make_dirty) 369 { 370 unsigned long index; 371 struct page *head; 372 unsigned int ntails; 373 374 for_each_compound_range(index, &page, npages, head, ntails) { 375 if (make_dirty && !PageDirty(head)) 376 set_page_dirty_lock(head); 377 put_compound_head(head, ntails, FOLL_PIN); 378 } 379 } 380 EXPORT_SYMBOL(unpin_user_page_range_dirty_lock); 381 382 /** 383 * unpin_user_pages() - release an array of gup-pinned pages. 384 * @pages: array of pages to be marked dirty and released. 385 * @npages: number of pages in the @pages array. 386 * 387 * For each page in the @pages array, release the page using unpin_user_page(). 388 * 389 * Please see the unpin_user_page() documentation for details. 390 */ 391 void unpin_user_pages(struct page **pages, unsigned long npages) 392 { 393 unsigned long index; 394 struct page *head; 395 unsigned int ntails; 396 397 /* 398 * If this WARN_ON() fires, then the system *might* be leaking pages (by 399 * leaving them pinned), but probably not. More likely, gup/pup returned 400 * a hard -ERRNO error to the caller, who erroneously passed it here. 401 */ 402 if (WARN_ON(IS_ERR_VALUE(npages))) 403 return; 404 405 for_each_compound_head(index, pages, npages, head, ntails) 406 put_compound_head(head, ntails, FOLL_PIN); 407 } 408 EXPORT_SYMBOL(unpin_user_pages); 409 410 /* 411 * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's 412 * lifecycle. Avoid setting the bit unless necessary, or it might cause write 413 * cache bouncing on large SMP machines for concurrent pinned gups. 414 */ 415 static inline void mm_set_has_pinned_flag(unsigned long *mm_flags) 416 { 417 if (!test_bit(MMF_HAS_PINNED, mm_flags)) 418 set_bit(MMF_HAS_PINNED, mm_flags); 419 } 420 421 #ifdef CONFIG_MMU 422 static struct page *no_page_table(struct vm_area_struct *vma, 423 unsigned int flags) 424 { 425 /* 426 * When core dumping an enormous anonymous area that nobody 427 * has touched so far, we don't want to allocate unnecessary pages or 428 * page tables. Return error instead of NULL to skip handle_mm_fault, 429 * then get_dump_page() will return NULL to leave a hole in the dump. 430 * But we can only make this optimization where a hole would surely 431 * be zero-filled if handle_mm_fault() actually did handle it. 432 */ 433 if ((flags & FOLL_DUMP) && 434 (vma_is_anonymous(vma) || !vma->vm_ops->fault)) 435 return ERR_PTR(-EFAULT); 436 return NULL; 437 } 438 439 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, 440 pte_t *pte, unsigned int flags) 441 { 442 /* No page to get reference */ 443 if (flags & FOLL_GET) 444 return -EFAULT; 445 446 if (flags & FOLL_TOUCH) { 447 pte_t entry = *pte; 448 449 if (flags & FOLL_WRITE) 450 entry = pte_mkdirty(entry); 451 entry = pte_mkyoung(entry); 452 453 if (!pte_same(*pte, entry)) { 454 set_pte_at(vma->vm_mm, address, pte, entry); 455 update_mmu_cache(vma, address, pte); 456 } 457 } 458 459 /* Proper page table entry exists, but no corresponding struct page */ 460 return -EEXIST; 461 } 462 463 /* 464 * FOLL_FORCE can write to even unwritable pte's, but only 465 * after we've gone through a COW cycle and they are dirty. 466 */ 467 static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) 468 { 469 return pte_write(pte) || 470 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); 471 } 472 473 static struct page *follow_page_pte(struct vm_area_struct *vma, 474 unsigned long address, pmd_t *pmd, unsigned int flags, 475 struct dev_pagemap **pgmap) 476 { 477 struct mm_struct *mm = vma->vm_mm; 478 struct page *page; 479 spinlock_t *ptl; 480 pte_t *ptep, pte; 481 int ret; 482 483 /* FOLL_GET and FOLL_PIN are mutually exclusive. */ 484 if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == 485 (FOLL_PIN | FOLL_GET))) 486 return ERR_PTR(-EINVAL); 487 retry: 488 if (unlikely(pmd_bad(*pmd))) 489 return no_page_table(vma, flags); 490 491 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 492 pte = *ptep; 493 if (!pte_present(pte)) { 494 swp_entry_t entry; 495 /* 496 * KSM's break_ksm() relies upon recognizing a ksm page 497 * even while it is being migrated, so for that case we 498 * need migration_entry_wait(). 499 */ 500 if (likely(!(flags & FOLL_MIGRATION))) 501 goto no_page; 502 if (pte_none(pte)) 503 goto no_page; 504 entry = pte_to_swp_entry(pte); 505 if (!is_migration_entry(entry)) 506 goto no_page; 507 pte_unmap_unlock(ptep, ptl); 508 migration_entry_wait(mm, pmd, address); 509 goto retry; 510 } 511 if ((flags & FOLL_NUMA) && pte_protnone(pte)) 512 goto no_page; 513 if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) { 514 pte_unmap_unlock(ptep, ptl); 515 return NULL; 516 } 517 518 page = vm_normal_page(vma, address, pte); 519 if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) { 520 /* 521 * Only return device mapping pages in the FOLL_GET or FOLL_PIN 522 * case since they are only valid while holding the pgmap 523 * reference. 524 */ 525 *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap); 526 if (*pgmap) 527 page = pte_page(pte); 528 else 529 goto no_page; 530 } else if (unlikely(!page)) { 531 if (flags & FOLL_DUMP) { 532 /* Avoid special (like zero) pages in core dumps */ 533 page = ERR_PTR(-EFAULT); 534 goto out; 535 } 536 537 if (is_zero_pfn(pte_pfn(pte))) { 538 page = pte_page(pte); 539 } else { 540 ret = follow_pfn_pte(vma, address, ptep, flags); 541 page = ERR_PTR(ret); 542 goto out; 543 } 544 } 545 546 /* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */ 547 if (unlikely(!try_grab_page(page, flags))) { 548 page = ERR_PTR(-ENOMEM); 549 goto out; 550 } 551 /* 552 * We need to make the page accessible if and only if we are going 553 * to access its content (the FOLL_PIN case). Please see 554 * Documentation/core-api/pin_user_pages.rst for details. 555 */ 556 if (flags & FOLL_PIN) { 557 ret = arch_make_page_accessible(page); 558 if (ret) { 559 unpin_user_page(page); 560 page = ERR_PTR(ret); 561 goto out; 562 } 563 } 564 if (flags & FOLL_TOUCH) { 565 if ((flags & FOLL_WRITE) && 566 !pte_dirty(pte) && !PageDirty(page)) 567 set_page_dirty(page); 568 /* 569 * pte_mkyoung() would be more correct here, but atomic care 570 * is needed to avoid losing the dirty bit: it is easier to use 571 * mark_page_accessed(). 572 */ 573 mark_page_accessed(page); 574 } 575 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { 576 /* Do not mlock pte-mapped THP */ 577 if (PageTransCompound(page)) 578 goto out; 579 580 /* 581 * The preliminary mapping check is mainly to avoid the 582 * pointless overhead of lock_page on the ZERO_PAGE 583 * which might bounce very badly if there is contention. 584 * 585 * If the page is already locked, we don't need to 586 * handle it now - vmscan will handle it later if and 587 * when it attempts to reclaim the page. 588 */ 589 if (page->mapping && trylock_page(page)) { 590 lru_add_drain(); /* push cached pages to LRU */ 591 /* 592 * Because we lock page here, and migration is 593 * blocked by the pte's page reference, and we 594 * know the page is still mapped, we don't even 595 * need to check for file-cache page truncation. 596 */ 597 mlock_vma_page(page); 598 unlock_page(page); 599 } 600 } 601 out: 602 pte_unmap_unlock(ptep, ptl); 603 return page; 604 no_page: 605 pte_unmap_unlock(ptep, ptl); 606 if (!pte_none(pte)) 607 return NULL; 608 return no_page_table(vma, flags); 609 } 610 611 static struct page *follow_pmd_mask(struct vm_area_struct *vma, 612 unsigned long address, pud_t *pudp, 613 unsigned int flags, 614 struct follow_page_context *ctx) 615 { 616 pmd_t *pmd, pmdval; 617 spinlock_t *ptl; 618 struct page *page; 619 struct mm_struct *mm = vma->vm_mm; 620 621 pmd = pmd_offset(pudp, address); 622 /* 623 * The READ_ONCE() will stabilize the pmdval in a register or 624 * on the stack so that it will stop changing under the code. 625 */ 626 pmdval = READ_ONCE(*pmd); 627 if (pmd_none(pmdval)) 628 return no_page_table(vma, flags); 629 if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) { 630 page = follow_huge_pmd(mm, address, pmd, flags); 631 if (page) 632 return page; 633 return no_page_table(vma, flags); 634 } 635 if (is_hugepd(__hugepd(pmd_val(pmdval)))) { 636 page = follow_huge_pd(vma, address, 637 __hugepd(pmd_val(pmdval)), flags, 638 PMD_SHIFT); 639 if (page) 640 return page; 641 return no_page_table(vma, flags); 642 } 643 retry: 644 if (!pmd_present(pmdval)) { 645 /* 646 * Should never reach here, if thp migration is not supported; 647 * Otherwise, it must be a thp migration entry. 648 */ 649 VM_BUG_ON(!thp_migration_supported() || 650 !is_pmd_migration_entry(pmdval)); 651 652 if (likely(!(flags & FOLL_MIGRATION))) 653 return no_page_table(vma, flags); 654 655 pmd_migration_entry_wait(mm, pmd); 656 pmdval = READ_ONCE(*pmd); 657 /* 658 * MADV_DONTNEED may convert the pmd to null because 659 * mmap_lock is held in read mode 660 */ 661 if (pmd_none(pmdval)) 662 return no_page_table(vma, flags); 663 goto retry; 664 } 665 if (pmd_devmap(pmdval)) { 666 ptl = pmd_lock(mm, pmd); 667 page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap); 668 spin_unlock(ptl); 669 if (page) 670 return page; 671 } 672 if (likely(!pmd_trans_huge(pmdval))) 673 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); 674 675 if ((flags & FOLL_NUMA) && pmd_protnone(pmdval)) 676 return no_page_table(vma, flags); 677 678 retry_locked: 679 ptl = pmd_lock(mm, pmd); 680 if (unlikely(pmd_none(*pmd))) { 681 spin_unlock(ptl); 682 return no_page_table(vma, flags); 683 } 684 if (unlikely(!pmd_present(*pmd))) { 685 spin_unlock(ptl); 686 if (likely(!(flags & FOLL_MIGRATION))) 687 return no_page_table(vma, flags); 688 pmd_migration_entry_wait(mm, pmd); 689 goto retry_locked; 690 } 691 if (unlikely(!pmd_trans_huge(*pmd))) { 692 spin_unlock(ptl); 693 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); 694 } 695 if (flags & FOLL_SPLIT_PMD) { 696 int ret; 697 page = pmd_page(*pmd); 698 if (is_huge_zero_page(page)) { 699 spin_unlock(ptl); 700 ret = 0; 701 split_huge_pmd(vma, pmd, address); 702 if (pmd_trans_unstable(pmd)) 703 ret = -EBUSY; 704 } else { 705 spin_unlock(ptl); 706 split_huge_pmd(vma, pmd, address); 707 ret = pte_alloc(mm, pmd) ? -ENOMEM : 0; 708 } 709 710 return ret ? ERR_PTR(ret) : 711 follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); 712 } 713 page = follow_trans_huge_pmd(vma, address, pmd, flags); 714 spin_unlock(ptl); 715 ctx->page_mask = HPAGE_PMD_NR - 1; 716 return page; 717 } 718 719 static struct page *follow_pud_mask(struct vm_area_struct *vma, 720 unsigned long address, p4d_t *p4dp, 721 unsigned int flags, 722 struct follow_page_context *ctx) 723 { 724 pud_t *pud; 725 spinlock_t *ptl; 726 struct page *page; 727 struct mm_struct *mm = vma->vm_mm; 728 729 pud = pud_offset(p4dp, address); 730 if (pud_none(*pud)) 731 return no_page_table(vma, flags); 732 if (pud_huge(*pud) && is_vm_hugetlb_page(vma)) { 733 page = follow_huge_pud(mm, address, pud, flags); 734 if (page) 735 return page; 736 return no_page_table(vma, flags); 737 } 738 if (is_hugepd(__hugepd(pud_val(*pud)))) { 739 page = follow_huge_pd(vma, address, 740 __hugepd(pud_val(*pud)), flags, 741 PUD_SHIFT); 742 if (page) 743 return page; 744 return no_page_table(vma, flags); 745 } 746 if (pud_devmap(*pud)) { 747 ptl = pud_lock(mm, pud); 748 page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap); 749 spin_unlock(ptl); 750 if (page) 751 return page; 752 } 753 if (unlikely(pud_bad(*pud))) 754 return no_page_table(vma, flags); 755 756 return follow_pmd_mask(vma, address, pud, flags, ctx); 757 } 758 759 static struct page *follow_p4d_mask(struct vm_area_struct *vma, 760 unsigned long address, pgd_t *pgdp, 761 unsigned int flags, 762 struct follow_page_context *ctx) 763 { 764 p4d_t *p4d; 765 struct page *page; 766 767 p4d = p4d_offset(pgdp, address); 768 if (p4d_none(*p4d)) 769 return no_page_table(vma, flags); 770 BUILD_BUG_ON(p4d_huge(*p4d)); 771 if (unlikely(p4d_bad(*p4d))) 772 return no_page_table(vma, flags); 773 774 if (is_hugepd(__hugepd(p4d_val(*p4d)))) { 775 page = follow_huge_pd(vma, address, 776 __hugepd(p4d_val(*p4d)), flags, 777 P4D_SHIFT); 778 if (page) 779 return page; 780 return no_page_table(vma, flags); 781 } 782 return follow_pud_mask(vma, address, p4d, flags, ctx); 783 } 784 785 /** 786 * follow_page_mask - look up a page descriptor from a user-virtual address 787 * @vma: vm_area_struct mapping @address 788 * @address: virtual address to look up 789 * @flags: flags modifying lookup behaviour 790 * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a 791 * pointer to output page_mask 792 * 793 * @flags can have FOLL_ flags set, defined in <linux/mm.h> 794 * 795 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches 796 * the device's dev_pagemap metadata to avoid repeating expensive lookups. 797 * 798 * On output, the @ctx->page_mask is set according to the size of the page. 799 * 800 * Return: the mapped (struct page *), %NULL if no mapping exists, or 801 * an error pointer if there is a mapping to something not represented 802 * by a page descriptor (see also vm_normal_page()). 803 */ 804 static struct page *follow_page_mask(struct vm_area_struct *vma, 805 unsigned long address, unsigned int flags, 806 struct follow_page_context *ctx) 807 { 808 pgd_t *pgd; 809 struct page *page; 810 struct mm_struct *mm = vma->vm_mm; 811 812 ctx->page_mask = 0; 813 814 /* make this handle hugepd */ 815 page = follow_huge_addr(mm, address, flags & FOLL_WRITE); 816 if (!IS_ERR(page)) { 817 WARN_ON_ONCE(flags & (FOLL_GET | FOLL_PIN)); 818 return page; 819 } 820 821 pgd = pgd_offset(mm, address); 822 823 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 824 return no_page_table(vma, flags); 825 826 if (pgd_huge(*pgd)) { 827 page = follow_huge_pgd(mm, address, pgd, flags); 828 if (page) 829 return page; 830 return no_page_table(vma, flags); 831 } 832 if (is_hugepd(__hugepd(pgd_val(*pgd)))) { 833 page = follow_huge_pd(vma, address, 834 __hugepd(pgd_val(*pgd)), flags, 835 PGDIR_SHIFT); 836 if (page) 837 return page; 838 return no_page_table(vma, flags); 839 } 840 841 return follow_p4d_mask(vma, address, pgd, flags, ctx); 842 } 843 844 struct page *follow_page(struct vm_area_struct *vma, unsigned long address, 845 unsigned int foll_flags) 846 { 847 struct follow_page_context ctx = { NULL }; 848 struct page *page; 849 850 if (vma_is_secretmem(vma)) 851 return NULL; 852 853 page = follow_page_mask(vma, address, foll_flags, &ctx); 854 if (ctx.pgmap) 855 put_dev_pagemap(ctx.pgmap); 856 return page; 857 } 858 859 static int get_gate_page(struct mm_struct *mm, unsigned long address, 860 unsigned int gup_flags, struct vm_area_struct **vma, 861 struct page **page) 862 { 863 pgd_t *pgd; 864 p4d_t *p4d; 865 pud_t *pud; 866 pmd_t *pmd; 867 pte_t *pte; 868 int ret = -EFAULT; 869 870 /* user gate pages are read-only */ 871 if (gup_flags & FOLL_WRITE) 872 return -EFAULT; 873 if (address > TASK_SIZE) 874 pgd = pgd_offset_k(address); 875 else 876 pgd = pgd_offset_gate(mm, address); 877 if (pgd_none(*pgd)) 878 return -EFAULT; 879 p4d = p4d_offset(pgd, address); 880 if (p4d_none(*p4d)) 881 return -EFAULT; 882 pud = pud_offset(p4d, address); 883 if (pud_none(*pud)) 884 return -EFAULT; 885 pmd = pmd_offset(pud, address); 886 if (!pmd_present(*pmd)) 887 return -EFAULT; 888 VM_BUG_ON(pmd_trans_huge(*pmd)); 889 pte = pte_offset_map(pmd, address); 890 if (pte_none(*pte)) 891 goto unmap; 892 *vma = get_gate_vma(mm); 893 if (!page) 894 goto out; 895 *page = vm_normal_page(*vma, address, *pte); 896 if (!*page) { 897 if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte))) 898 goto unmap; 899 *page = pte_page(*pte); 900 } 901 if (unlikely(!try_grab_page(*page, gup_flags))) { 902 ret = -ENOMEM; 903 goto unmap; 904 } 905 out: 906 ret = 0; 907 unmap: 908 pte_unmap(pte); 909 return ret; 910 } 911 912 /* 913 * mmap_lock must be held on entry. If @locked != NULL and *@flags 914 * does not include FOLL_NOWAIT, the mmap_lock may be released. If it 915 * is, *@locked will be set to 0 and -EBUSY returned. 916 */ 917 static int faultin_page(struct vm_area_struct *vma, 918 unsigned long address, unsigned int *flags, int *locked) 919 { 920 unsigned int fault_flags = 0; 921 vm_fault_t ret; 922 923 /* mlock all present pages, but do not fault in new pages */ 924 if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK) 925 return -ENOENT; 926 if (*flags & FOLL_NOFAULT) 927 return -EFAULT; 928 if (*flags & FOLL_WRITE) 929 fault_flags |= FAULT_FLAG_WRITE; 930 if (*flags & FOLL_REMOTE) 931 fault_flags |= FAULT_FLAG_REMOTE; 932 if (locked) 933 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 934 if (*flags & FOLL_NOWAIT) 935 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; 936 if (*flags & FOLL_TRIED) { 937 /* 938 * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED 939 * can co-exist 940 */ 941 fault_flags |= FAULT_FLAG_TRIED; 942 } 943 944 ret = handle_mm_fault(vma, address, fault_flags, NULL); 945 if (ret & VM_FAULT_ERROR) { 946 int err = vm_fault_to_errno(ret, *flags); 947 948 if (err) 949 return err; 950 BUG(); 951 } 952 953 if (ret & VM_FAULT_RETRY) { 954 if (locked && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) 955 *locked = 0; 956 return -EBUSY; 957 } 958 959 /* 960 * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when 961 * necessary, even if maybe_mkwrite decided not to set pte_write. We 962 * can thus safely do subsequent page lookups as if they were reads. 963 * But only do so when looping for pte_write is futile: in some cases 964 * userspace may also be wanting to write to the gotten user page, 965 * which a read fault here might prevent (a readonly page might get 966 * reCOWed by userspace write). 967 */ 968 if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) 969 *flags |= FOLL_COW; 970 return 0; 971 } 972 973 static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) 974 { 975 vm_flags_t vm_flags = vma->vm_flags; 976 int write = (gup_flags & FOLL_WRITE); 977 int foreign = (gup_flags & FOLL_REMOTE); 978 979 if (vm_flags & (VM_IO | VM_PFNMAP)) 980 return -EFAULT; 981 982 if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma)) 983 return -EFAULT; 984 985 if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma)) 986 return -EOPNOTSUPP; 987 988 if (vma_is_secretmem(vma)) 989 return -EFAULT; 990 991 if (write) { 992 if (!(vm_flags & VM_WRITE)) { 993 if (!(gup_flags & FOLL_FORCE)) 994 return -EFAULT; 995 /* 996 * We used to let the write,force case do COW in a 997 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could 998 * set a breakpoint in a read-only mapping of an 999 * executable, without corrupting the file (yet only 1000 * when that file had been opened for writing!). 1001 * Anon pages in shared mappings are surprising: now 1002 * just reject it. 1003 */ 1004 if (!is_cow_mapping(vm_flags)) 1005 return -EFAULT; 1006 } 1007 } else if (!(vm_flags & VM_READ)) { 1008 if (!(gup_flags & FOLL_FORCE)) 1009 return -EFAULT; 1010 /* 1011 * Is there actually any vma we can reach here which does not 1012 * have VM_MAYREAD set? 1013 */ 1014 if (!(vm_flags & VM_MAYREAD)) 1015 return -EFAULT; 1016 } 1017 /* 1018 * gups are always data accesses, not instruction 1019 * fetches, so execute=false here 1020 */ 1021 if (!arch_vma_access_permitted(vma, write, false, foreign)) 1022 return -EFAULT; 1023 return 0; 1024 } 1025 1026 /** 1027 * __get_user_pages() - pin user pages in memory 1028 * @mm: mm_struct of target mm 1029 * @start: starting user address 1030 * @nr_pages: number of pages from start to pin 1031 * @gup_flags: flags modifying pin behaviour 1032 * @pages: array that receives pointers to the pages pinned. 1033 * Should be at least nr_pages long. Or NULL, if caller 1034 * only intends to ensure the pages are faulted in. 1035 * @vmas: array of pointers to vmas corresponding to each page. 1036 * Or NULL if the caller does not require them. 1037 * @locked: whether we're still with the mmap_lock held 1038 * 1039 * Returns either number of pages pinned (which may be less than the 1040 * number requested), or an error. Details about the return value: 1041 * 1042 * -- If nr_pages is 0, returns 0. 1043 * -- If nr_pages is >0, but no pages were pinned, returns -errno. 1044 * -- If nr_pages is >0, and some pages were pinned, returns the number of 1045 * pages pinned. Again, this may be less than nr_pages. 1046 * -- 0 return value is possible when the fault would need to be retried. 1047 * 1048 * The caller is responsible for releasing returned @pages, via put_page(). 1049 * 1050 * @vmas are valid only as long as mmap_lock is held. 1051 * 1052 * Must be called with mmap_lock held. It may be released. See below. 1053 * 1054 * __get_user_pages walks a process's page tables and takes a reference to 1055 * each struct page that each user address corresponds to at a given 1056 * instant. That is, it takes the page that would be accessed if a user 1057 * thread accesses the given user virtual address at that instant. 1058 * 1059 * This does not guarantee that the page exists in the user mappings when 1060 * __get_user_pages returns, and there may even be a completely different 1061 * page there in some cases (eg. if mmapped pagecache has been invalidated 1062 * and subsequently re faulted). However it does guarantee that the page 1063 * won't be freed completely. And mostly callers simply care that the page 1064 * contains data that was valid *at some point in time*. Typically, an IO 1065 * or similar operation cannot guarantee anything stronger anyway because 1066 * locks can't be held over the syscall boundary. 1067 * 1068 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If 1069 * the page is written to, set_page_dirty (or set_page_dirty_lock, as 1070 * appropriate) must be called after the page is finished with, and 1071 * before put_page is called. 1072 * 1073 * If @locked != NULL, *@locked will be set to 0 when mmap_lock is 1074 * released by an up_read(). That can happen if @gup_flags does not 1075 * have FOLL_NOWAIT. 1076 * 1077 * A caller using such a combination of @locked and @gup_flags 1078 * must therefore hold the mmap_lock for reading only, and recognize 1079 * when it's been released. Otherwise, it must be held for either 1080 * reading or writing and will not be released. 1081 * 1082 * In most cases, get_user_pages or get_user_pages_fast should be used 1083 * instead of __get_user_pages. __get_user_pages should be used only if 1084 * you need some special @gup_flags. 1085 */ 1086 static long __get_user_pages(struct mm_struct *mm, 1087 unsigned long start, unsigned long nr_pages, 1088 unsigned int gup_flags, struct page **pages, 1089 struct vm_area_struct **vmas, int *locked) 1090 { 1091 long ret = 0, i = 0; 1092 struct vm_area_struct *vma = NULL; 1093 struct follow_page_context ctx = { NULL }; 1094 1095 if (!nr_pages) 1096 return 0; 1097 1098 start = untagged_addr(start); 1099 1100 VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); 1101 1102 /* 1103 * If FOLL_FORCE is set then do not force a full fault as the hinting 1104 * fault information is unrelated to the reference behaviour of a task 1105 * using the address space 1106 */ 1107 if (!(gup_flags & FOLL_FORCE)) 1108 gup_flags |= FOLL_NUMA; 1109 1110 do { 1111 struct page *page; 1112 unsigned int foll_flags = gup_flags; 1113 unsigned int page_increm; 1114 1115 /* first iteration or cross vma bound */ 1116 if (!vma || start >= vma->vm_end) { 1117 vma = find_extend_vma(mm, start); 1118 if (!vma && in_gate_area(mm, start)) { 1119 ret = get_gate_page(mm, start & PAGE_MASK, 1120 gup_flags, &vma, 1121 pages ? &pages[i] : NULL); 1122 if (ret) 1123 goto out; 1124 ctx.page_mask = 0; 1125 goto next_page; 1126 } 1127 1128 if (!vma) { 1129 ret = -EFAULT; 1130 goto out; 1131 } 1132 ret = check_vma_flags(vma, gup_flags); 1133 if (ret) 1134 goto out; 1135 1136 if (is_vm_hugetlb_page(vma)) { 1137 i = follow_hugetlb_page(mm, vma, pages, vmas, 1138 &start, &nr_pages, i, 1139 gup_flags, locked); 1140 if (locked && *locked == 0) { 1141 /* 1142 * We've got a VM_FAULT_RETRY 1143 * and we've lost mmap_lock. 1144 * We must stop here. 1145 */ 1146 BUG_ON(gup_flags & FOLL_NOWAIT); 1147 goto out; 1148 } 1149 continue; 1150 } 1151 } 1152 retry: 1153 /* 1154 * If we have a pending SIGKILL, don't keep faulting pages and 1155 * potentially allocating memory. 1156 */ 1157 if (fatal_signal_pending(current)) { 1158 ret = -EINTR; 1159 goto out; 1160 } 1161 cond_resched(); 1162 1163 page = follow_page_mask(vma, start, foll_flags, &ctx); 1164 if (!page) { 1165 ret = faultin_page(vma, start, &foll_flags, locked); 1166 switch (ret) { 1167 case 0: 1168 goto retry; 1169 case -EBUSY: 1170 ret = 0; 1171 fallthrough; 1172 case -EFAULT: 1173 case -ENOMEM: 1174 case -EHWPOISON: 1175 goto out; 1176 case -ENOENT: 1177 goto next_page; 1178 } 1179 BUG(); 1180 } else if (PTR_ERR(page) == -EEXIST) { 1181 /* 1182 * Proper page table entry exists, but no corresponding 1183 * struct page. 1184 */ 1185 goto next_page; 1186 } else if (IS_ERR(page)) { 1187 ret = PTR_ERR(page); 1188 goto out; 1189 } 1190 if (pages) { 1191 pages[i] = page; 1192 flush_anon_page(vma, page, start); 1193 flush_dcache_page(page); 1194 ctx.page_mask = 0; 1195 } 1196 next_page: 1197 if (vmas) { 1198 vmas[i] = vma; 1199 ctx.page_mask = 0; 1200 } 1201 page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask); 1202 if (page_increm > nr_pages) 1203 page_increm = nr_pages; 1204 i += page_increm; 1205 start += page_increm * PAGE_SIZE; 1206 nr_pages -= page_increm; 1207 } while (nr_pages); 1208 out: 1209 if (ctx.pgmap) 1210 put_dev_pagemap(ctx.pgmap); 1211 return i ? i : ret; 1212 } 1213 1214 static bool vma_permits_fault(struct vm_area_struct *vma, 1215 unsigned int fault_flags) 1216 { 1217 bool write = !!(fault_flags & FAULT_FLAG_WRITE); 1218 bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE); 1219 vm_flags_t vm_flags = write ? VM_WRITE : VM_READ; 1220 1221 if (!(vm_flags & vma->vm_flags)) 1222 return false; 1223 1224 /* 1225 * The architecture might have a hardware protection 1226 * mechanism other than read/write that can deny access. 1227 * 1228 * gup always represents data access, not instruction 1229 * fetches, so execute=false here: 1230 */ 1231 if (!arch_vma_access_permitted(vma, write, false, foreign)) 1232 return false; 1233 1234 return true; 1235 } 1236 1237 /** 1238 * fixup_user_fault() - manually resolve a user page fault 1239 * @mm: mm_struct of target mm 1240 * @address: user address 1241 * @fault_flags:flags to pass down to handle_mm_fault() 1242 * @unlocked: did we unlock the mmap_lock while retrying, maybe NULL if caller 1243 * does not allow retry. If NULL, the caller must guarantee 1244 * that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY. 1245 * 1246 * This is meant to be called in the specific scenario where for locking reasons 1247 * we try to access user memory in atomic context (within a pagefault_disable() 1248 * section), this returns -EFAULT, and we want to resolve the user fault before 1249 * trying again. 1250 * 1251 * Typically this is meant to be used by the futex code. 1252 * 1253 * The main difference with get_user_pages() is that this function will 1254 * unconditionally call handle_mm_fault() which will in turn perform all the 1255 * necessary SW fixup of the dirty and young bits in the PTE, while 1256 * get_user_pages() only guarantees to update these in the struct page. 1257 * 1258 * This is important for some architectures where those bits also gate the 1259 * access permission to the page because they are maintained in software. On 1260 * such architectures, gup() will not be enough to make a subsequent access 1261 * succeed. 1262 * 1263 * This function will not return with an unlocked mmap_lock. So it has not the 1264 * same semantics wrt the @mm->mmap_lock as does filemap_fault(). 1265 */ 1266 int fixup_user_fault(struct mm_struct *mm, 1267 unsigned long address, unsigned int fault_flags, 1268 bool *unlocked) 1269 { 1270 struct vm_area_struct *vma; 1271 vm_fault_t ret; 1272 1273 address = untagged_addr(address); 1274 1275 if (unlocked) 1276 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 1277 1278 retry: 1279 vma = find_extend_vma(mm, address); 1280 if (!vma || address < vma->vm_start) 1281 return -EFAULT; 1282 1283 if (!vma_permits_fault(vma, fault_flags)) 1284 return -EFAULT; 1285 1286 if ((fault_flags & FAULT_FLAG_KILLABLE) && 1287 fatal_signal_pending(current)) 1288 return -EINTR; 1289 1290 ret = handle_mm_fault(vma, address, fault_flags, NULL); 1291 if (ret & VM_FAULT_ERROR) { 1292 int err = vm_fault_to_errno(ret, 0); 1293 1294 if (err) 1295 return err; 1296 BUG(); 1297 } 1298 1299 if (ret & VM_FAULT_RETRY) { 1300 mmap_read_lock(mm); 1301 *unlocked = true; 1302 fault_flags |= FAULT_FLAG_TRIED; 1303 goto retry; 1304 } 1305 1306 return 0; 1307 } 1308 EXPORT_SYMBOL_GPL(fixup_user_fault); 1309 1310 /* 1311 * Please note that this function, unlike __get_user_pages will not 1312 * return 0 for nr_pages > 0 without FOLL_NOWAIT 1313 */ 1314 static __always_inline long __get_user_pages_locked(struct mm_struct *mm, 1315 unsigned long start, 1316 unsigned long nr_pages, 1317 struct page **pages, 1318 struct vm_area_struct **vmas, 1319 int *locked, 1320 unsigned int flags) 1321 { 1322 long ret, pages_done; 1323 bool lock_dropped; 1324 1325 if (locked) { 1326 /* if VM_FAULT_RETRY can be returned, vmas become invalid */ 1327 BUG_ON(vmas); 1328 /* check caller initialized locked */ 1329 BUG_ON(*locked != 1); 1330 } 1331 1332 if (flags & FOLL_PIN) 1333 mm_set_has_pinned_flag(&mm->flags); 1334 1335 /* 1336 * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior 1337 * is to set FOLL_GET if the caller wants pages[] filled in (but has 1338 * carelessly failed to specify FOLL_GET), so keep doing that, but only 1339 * for FOLL_GET, not for the newer FOLL_PIN. 1340 * 1341 * FOLL_PIN always expects pages to be non-null, but no need to assert 1342 * that here, as any failures will be obvious enough. 1343 */ 1344 if (pages && !(flags & FOLL_PIN)) 1345 flags |= FOLL_GET; 1346 1347 pages_done = 0; 1348 lock_dropped = false; 1349 for (;;) { 1350 ret = __get_user_pages(mm, start, nr_pages, flags, pages, 1351 vmas, locked); 1352 if (!locked) 1353 /* VM_FAULT_RETRY couldn't trigger, bypass */ 1354 return ret; 1355 1356 /* VM_FAULT_RETRY cannot return errors */ 1357 if (!*locked) { 1358 BUG_ON(ret < 0); 1359 BUG_ON(ret >= nr_pages); 1360 } 1361 1362 if (ret > 0) { 1363 nr_pages -= ret; 1364 pages_done += ret; 1365 if (!nr_pages) 1366 break; 1367 } 1368 if (*locked) { 1369 /* 1370 * VM_FAULT_RETRY didn't trigger or it was a 1371 * FOLL_NOWAIT. 1372 */ 1373 if (!pages_done) 1374 pages_done = ret; 1375 break; 1376 } 1377 /* 1378 * VM_FAULT_RETRY triggered, so seek to the faulting offset. 1379 * For the prefault case (!pages) we only update counts. 1380 */ 1381 if (likely(pages)) 1382 pages += ret; 1383 start += ret << PAGE_SHIFT; 1384 lock_dropped = true; 1385 1386 retry: 1387 /* 1388 * Repeat on the address that fired VM_FAULT_RETRY 1389 * with both FAULT_FLAG_ALLOW_RETRY and 1390 * FAULT_FLAG_TRIED. Note that GUP can be interrupted 1391 * by fatal signals, so we need to check it before we 1392 * start trying again otherwise it can loop forever. 1393 */ 1394 1395 if (fatal_signal_pending(current)) { 1396 if (!pages_done) 1397 pages_done = -EINTR; 1398 break; 1399 } 1400 1401 ret = mmap_read_lock_killable(mm); 1402 if (ret) { 1403 BUG_ON(ret > 0); 1404 if (!pages_done) 1405 pages_done = ret; 1406 break; 1407 } 1408 1409 *locked = 1; 1410 ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED, 1411 pages, NULL, locked); 1412 if (!*locked) { 1413 /* Continue to retry until we succeeded */ 1414 BUG_ON(ret != 0); 1415 goto retry; 1416 } 1417 if (ret != 1) { 1418 BUG_ON(ret > 1); 1419 if (!pages_done) 1420 pages_done = ret; 1421 break; 1422 } 1423 nr_pages--; 1424 pages_done++; 1425 if (!nr_pages) 1426 break; 1427 if (likely(pages)) 1428 pages++; 1429 start += PAGE_SIZE; 1430 } 1431 if (lock_dropped && *locked) { 1432 /* 1433 * We must let the caller know we temporarily dropped the lock 1434 * and so the critical section protected by it was lost. 1435 */ 1436 mmap_read_unlock(mm); 1437 *locked = 0; 1438 } 1439 return pages_done; 1440 } 1441 1442 /** 1443 * populate_vma_page_range() - populate a range of pages in the vma. 1444 * @vma: target vma 1445 * @start: start address 1446 * @end: end address 1447 * @locked: whether the mmap_lock is still held 1448 * 1449 * This takes care of mlocking the pages too if VM_LOCKED is set. 1450 * 1451 * Return either number of pages pinned in the vma, or a negative error 1452 * code on error. 1453 * 1454 * vma->vm_mm->mmap_lock must be held. 1455 * 1456 * If @locked is NULL, it may be held for read or write and will 1457 * be unperturbed. 1458 * 1459 * If @locked is non-NULL, it must held for read only and may be 1460 * released. If it's released, *@locked will be set to 0. 1461 */ 1462 long populate_vma_page_range(struct vm_area_struct *vma, 1463 unsigned long start, unsigned long end, int *locked) 1464 { 1465 struct mm_struct *mm = vma->vm_mm; 1466 unsigned long nr_pages = (end - start) / PAGE_SIZE; 1467 int gup_flags; 1468 1469 VM_BUG_ON(!PAGE_ALIGNED(start)); 1470 VM_BUG_ON(!PAGE_ALIGNED(end)); 1471 VM_BUG_ON_VMA(start < vma->vm_start, vma); 1472 VM_BUG_ON_VMA(end > vma->vm_end, vma); 1473 mmap_assert_locked(mm); 1474 1475 gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK; 1476 if (vma->vm_flags & VM_LOCKONFAULT) 1477 gup_flags &= ~FOLL_POPULATE; 1478 /* 1479 * We want to touch writable mappings with a write fault in order 1480 * to break COW, except for shared mappings because these don't COW 1481 * and we would not want to dirty them for nothing. 1482 */ 1483 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) 1484 gup_flags |= FOLL_WRITE; 1485 1486 /* 1487 * We want mlock to succeed for regions that have any permissions 1488 * other than PROT_NONE. 1489 */ 1490 if (vma_is_accessible(vma)) 1491 gup_flags |= FOLL_FORCE; 1492 1493 /* 1494 * We made sure addr is within a VMA, so the following will 1495 * not result in a stack expansion that recurses back here. 1496 */ 1497 return __get_user_pages(mm, start, nr_pages, gup_flags, 1498 NULL, NULL, locked); 1499 } 1500 1501 /* 1502 * faultin_vma_page_range() - populate (prefault) page tables inside the 1503 * given VMA range readable/writable 1504 * 1505 * This takes care of mlocking the pages, too, if VM_LOCKED is set. 1506 * 1507 * @vma: target vma 1508 * @start: start address 1509 * @end: end address 1510 * @write: whether to prefault readable or writable 1511 * @locked: whether the mmap_lock is still held 1512 * 1513 * Returns either number of processed pages in the vma, or a negative error 1514 * code on error (see __get_user_pages()). 1515 * 1516 * vma->vm_mm->mmap_lock must be held. The range must be page-aligned and 1517 * covered by the VMA. 1518 * 1519 * If @locked is NULL, it may be held for read or write and will be unperturbed. 1520 * 1521 * If @locked is non-NULL, it must held for read only and may be released. If 1522 * it's released, *@locked will be set to 0. 1523 */ 1524 long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start, 1525 unsigned long end, bool write, int *locked) 1526 { 1527 struct mm_struct *mm = vma->vm_mm; 1528 unsigned long nr_pages = (end - start) / PAGE_SIZE; 1529 int gup_flags; 1530 1531 VM_BUG_ON(!PAGE_ALIGNED(start)); 1532 VM_BUG_ON(!PAGE_ALIGNED(end)); 1533 VM_BUG_ON_VMA(start < vma->vm_start, vma); 1534 VM_BUG_ON_VMA(end > vma->vm_end, vma); 1535 mmap_assert_locked(mm); 1536 1537 /* 1538 * FOLL_TOUCH: Mark page accessed and thereby young; will also mark 1539 * the page dirty with FOLL_WRITE -- which doesn't make a 1540 * difference with !FOLL_FORCE, because the page is writable 1541 * in the page table. 1542 * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit 1543 * a poisoned page. 1544 * FOLL_POPULATE: Always populate memory with VM_LOCKONFAULT. 1545 * !FOLL_FORCE: Require proper access permissions. 1546 */ 1547 gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK | FOLL_HWPOISON; 1548 if (write) 1549 gup_flags |= FOLL_WRITE; 1550 1551 /* 1552 * We want to report -EINVAL instead of -EFAULT for any permission 1553 * problems or incompatible mappings. 1554 */ 1555 if (check_vma_flags(vma, gup_flags)) 1556 return -EINVAL; 1557 1558 return __get_user_pages(mm, start, nr_pages, gup_flags, 1559 NULL, NULL, locked); 1560 } 1561 1562 /* 1563 * __mm_populate - populate and/or mlock pages within a range of address space. 1564 * 1565 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap 1566 * flags. VMAs must be already marked with the desired vm_flags, and 1567 * mmap_lock must not be held. 1568 */ 1569 int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) 1570 { 1571 struct mm_struct *mm = current->mm; 1572 unsigned long end, nstart, nend; 1573 struct vm_area_struct *vma = NULL; 1574 int locked = 0; 1575 long ret = 0; 1576 1577 end = start + len; 1578 1579 for (nstart = start; nstart < end; nstart = nend) { 1580 /* 1581 * We want to fault in pages for [nstart; end) address range. 1582 * Find first corresponding VMA. 1583 */ 1584 if (!locked) { 1585 locked = 1; 1586 mmap_read_lock(mm); 1587 vma = find_vma(mm, nstart); 1588 } else if (nstart >= vma->vm_end) 1589 vma = vma->vm_next; 1590 if (!vma || vma->vm_start >= end) 1591 break; 1592 /* 1593 * Set [nstart; nend) to intersection of desired address 1594 * range with the first VMA. Also, skip undesirable VMA types. 1595 */ 1596 nend = min(end, vma->vm_end); 1597 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) 1598 continue; 1599 if (nstart < vma->vm_start) 1600 nstart = vma->vm_start; 1601 /* 1602 * Now fault in a range of pages. populate_vma_page_range() 1603 * double checks the vma flags, so that it won't mlock pages 1604 * if the vma was already munlocked. 1605 */ 1606 ret = populate_vma_page_range(vma, nstart, nend, &locked); 1607 if (ret < 0) { 1608 if (ignore_errors) { 1609 ret = 0; 1610 continue; /* continue at next VMA */ 1611 } 1612 break; 1613 } 1614 nend = nstart + ret * PAGE_SIZE; 1615 ret = 0; 1616 } 1617 if (locked) 1618 mmap_read_unlock(mm); 1619 return ret; /* 0 or negative error code */ 1620 } 1621 #else /* CONFIG_MMU */ 1622 static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start, 1623 unsigned long nr_pages, struct page **pages, 1624 struct vm_area_struct **vmas, int *locked, 1625 unsigned int foll_flags) 1626 { 1627 struct vm_area_struct *vma; 1628 unsigned long vm_flags; 1629 long i; 1630 1631 /* calculate required read or write permissions. 1632 * If FOLL_FORCE is set, we only require the "MAY" flags. 1633 */ 1634 vm_flags = (foll_flags & FOLL_WRITE) ? 1635 (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); 1636 vm_flags &= (foll_flags & FOLL_FORCE) ? 1637 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); 1638 1639 for (i = 0; i < nr_pages; i++) { 1640 vma = find_vma(mm, start); 1641 if (!vma) 1642 goto finish_or_fault; 1643 1644 /* protect what we can, including chardevs */ 1645 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || 1646 !(vm_flags & vma->vm_flags)) 1647 goto finish_or_fault; 1648 1649 if (pages) { 1650 pages[i] = virt_to_page(start); 1651 if (pages[i]) 1652 get_page(pages[i]); 1653 } 1654 if (vmas) 1655 vmas[i] = vma; 1656 start = (start + PAGE_SIZE) & PAGE_MASK; 1657 } 1658 1659 return i; 1660 1661 finish_or_fault: 1662 return i ? : -EFAULT; 1663 } 1664 #endif /* !CONFIG_MMU */ 1665 1666 /** 1667 * fault_in_writeable - fault in userspace address range for writing 1668 * @uaddr: start of address range 1669 * @size: size of address range 1670 * 1671 * Returns the number of bytes not faulted in (like copy_to_user() and 1672 * copy_from_user()). 1673 */ 1674 size_t fault_in_writeable(char __user *uaddr, size_t size) 1675 { 1676 char __user *start = uaddr, *end; 1677 1678 if (unlikely(size == 0)) 1679 return 0; 1680 if (!user_write_access_begin(uaddr, size)) 1681 return size; 1682 if (!PAGE_ALIGNED(uaddr)) { 1683 unsafe_put_user(0, uaddr, out); 1684 uaddr = (char __user *)PAGE_ALIGN((unsigned long)uaddr); 1685 } 1686 end = (char __user *)PAGE_ALIGN((unsigned long)start + size); 1687 if (unlikely(end < start)) 1688 end = NULL; 1689 while (uaddr != end) { 1690 unsafe_put_user(0, uaddr, out); 1691 uaddr += PAGE_SIZE; 1692 } 1693 1694 out: 1695 user_write_access_end(); 1696 if (size > uaddr - start) 1697 return size - (uaddr - start); 1698 return 0; 1699 } 1700 EXPORT_SYMBOL(fault_in_writeable); 1701 1702 /* 1703 * fault_in_safe_writeable - fault in an address range for writing 1704 * @uaddr: start of address range 1705 * @size: length of address range 1706 * 1707 * Faults in an address range using get_user_pages, i.e., without triggering 1708 * hardware page faults. This is primarily useful when we already know that 1709 * some or all of the pages in the address range aren't in memory. 1710 * 1711 * Other than fault_in_writeable(), this function is non-destructive. 1712 * 1713 * Note that we don't pin or otherwise hold the pages referenced that we fault 1714 * in. There's no guarantee that they'll stay in memory for any duration of 1715 * time. 1716 * 1717 * Returns the number of bytes not faulted in, like copy_to_user() and 1718 * copy_from_user(). 1719 */ 1720 size_t fault_in_safe_writeable(const char __user *uaddr, size_t size) 1721 { 1722 unsigned long start = (unsigned long)untagged_addr(uaddr); 1723 unsigned long end, nstart, nend; 1724 struct mm_struct *mm = current->mm; 1725 struct vm_area_struct *vma = NULL; 1726 int locked = 0; 1727 1728 nstart = start & PAGE_MASK; 1729 end = PAGE_ALIGN(start + size); 1730 if (end < nstart) 1731 end = 0; 1732 for (; nstart != end; nstart = nend) { 1733 unsigned long nr_pages; 1734 long ret; 1735 1736 if (!locked) { 1737 locked = 1; 1738 mmap_read_lock(mm); 1739 vma = find_vma(mm, nstart); 1740 } else if (nstart >= vma->vm_end) 1741 vma = vma->vm_next; 1742 if (!vma || vma->vm_start >= end) 1743 break; 1744 nend = end ? min(end, vma->vm_end) : vma->vm_end; 1745 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) 1746 continue; 1747 if (nstart < vma->vm_start) 1748 nstart = vma->vm_start; 1749 nr_pages = (nend - nstart) / PAGE_SIZE; 1750 ret = __get_user_pages_locked(mm, nstart, nr_pages, 1751 NULL, NULL, &locked, 1752 FOLL_TOUCH | FOLL_WRITE); 1753 if (ret <= 0) 1754 break; 1755 nend = nstart + ret * PAGE_SIZE; 1756 } 1757 if (locked) 1758 mmap_read_unlock(mm); 1759 if (nstart == end) 1760 return 0; 1761 return size - min_t(size_t, nstart - start, size); 1762 } 1763 EXPORT_SYMBOL(fault_in_safe_writeable); 1764 1765 /** 1766 * fault_in_readable - fault in userspace address range for reading 1767 * @uaddr: start of user address range 1768 * @size: size of user address range 1769 * 1770 * Returns the number of bytes not faulted in (like copy_to_user() and 1771 * copy_from_user()). 1772 */ 1773 size_t fault_in_readable(const char __user *uaddr, size_t size) 1774 { 1775 const char __user *start = uaddr, *end; 1776 volatile char c; 1777 1778 if (unlikely(size == 0)) 1779 return 0; 1780 if (!user_read_access_begin(uaddr, size)) 1781 return size; 1782 if (!PAGE_ALIGNED(uaddr)) { 1783 unsafe_get_user(c, uaddr, out); 1784 uaddr = (const char __user *)PAGE_ALIGN((unsigned long)uaddr); 1785 } 1786 end = (const char __user *)PAGE_ALIGN((unsigned long)start + size); 1787 if (unlikely(end < start)) 1788 end = NULL; 1789 while (uaddr != end) { 1790 unsafe_get_user(c, uaddr, out); 1791 uaddr += PAGE_SIZE; 1792 } 1793 1794 out: 1795 user_read_access_end(); 1796 (void)c; 1797 if (size > uaddr - start) 1798 return size - (uaddr - start); 1799 return 0; 1800 } 1801 EXPORT_SYMBOL(fault_in_readable); 1802 1803 /** 1804 * get_dump_page() - pin user page in memory while writing it to core dump 1805 * @addr: user address 1806 * 1807 * Returns struct page pointer of user page pinned for dump, 1808 * to be freed afterwards by put_page(). 1809 * 1810 * Returns NULL on any kind of failure - a hole must then be inserted into 1811 * the corefile, to preserve alignment with its headers; and also returns 1812 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - 1813 * allowing a hole to be left in the corefile to save disk space. 1814 * 1815 * Called without mmap_lock (takes and releases the mmap_lock by itself). 1816 */ 1817 #ifdef CONFIG_ELF_CORE 1818 struct page *get_dump_page(unsigned long addr) 1819 { 1820 struct mm_struct *mm = current->mm; 1821 struct page *page; 1822 int locked = 1; 1823 int ret; 1824 1825 if (mmap_read_lock_killable(mm)) 1826 return NULL; 1827 ret = __get_user_pages_locked(mm, addr, 1, &page, NULL, &locked, 1828 FOLL_FORCE | FOLL_DUMP | FOLL_GET); 1829 if (locked) 1830 mmap_read_unlock(mm); 1831 return (ret == 1) ? page : NULL; 1832 } 1833 #endif /* CONFIG_ELF_CORE */ 1834 1835 #ifdef CONFIG_MIGRATION 1836 /* 1837 * Check whether all pages are pinnable, if so return number of pages. If some 1838 * pages are not pinnable, migrate them, and unpin all pages. Return zero if 1839 * pages were migrated, or if some pages were not successfully isolated. 1840 * Return negative error if migration fails. 1841 */ 1842 static long check_and_migrate_movable_pages(unsigned long nr_pages, 1843 struct page **pages, 1844 unsigned int gup_flags) 1845 { 1846 unsigned long i; 1847 unsigned long isolation_error_count = 0; 1848 bool drain_allow = true; 1849 LIST_HEAD(movable_page_list); 1850 long ret = 0; 1851 struct page *prev_head = NULL; 1852 struct page *head; 1853 struct migration_target_control mtc = { 1854 .nid = NUMA_NO_NODE, 1855 .gfp_mask = GFP_USER | __GFP_NOWARN, 1856 }; 1857 1858 for (i = 0; i < nr_pages; i++) { 1859 head = compound_head(pages[i]); 1860 if (head == prev_head) 1861 continue; 1862 prev_head = head; 1863 /* 1864 * If we get a movable page, since we are going to be pinning 1865 * these entries, try to move them out if possible. 1866 */ 1867 if (!is_pinnable_page(head)) { 1868 if (PageHuge(head)) { 1869 if (!isolate_huge_page(head, &movable_page_list)) 1870 isolation_error_count++; 1871 } else { 1872 if (!PageLRU(head) && drain_allow) { 1873 lru_add_drain_all(); 1874 drain_allow = false; 1875 } 1876 1877 if (isolate_lru_page(head)) { 1878 isolation_error_count++; 1879 continue; 1880 } 1881 list_add_tail(&head->lru, &movable_page_list); 1882 mod_node_page_state(page_pgdat(head), 1883 NR_ISOLATED_ANON + 1884 page_is_file_lru(head), 1885 thp_nr_pages(head)); 1886 } 1887 } 1888 } 1889 1890 /* 1891 * If list is empty, and no isolation errors, means that all pages are 1892 * in the correct zone. 1893 */ 1894 if (list_empty(&movable_page_list) && !isolation_error_count) 1895 return nr_pages; 1896 1897 if (gup_flags & FOLL_PIN) { 1898 unpin_user_pages(pages, nr_pages); 1899 } else { 1900 for (i = 0; i < nr_pages; i++) 1901 put_page(pages[i]); 1902 } 1903 if (!list_empty(&movable_page_list)) { 1904 ret = migrate_pages(&movable_page_list, alloc_migration_target, 1905 NULL, (unsigned long)&mtc, MIGRATE_SYNC, 1906 MR_LONGTERM_PIN, NULL); 1907 if (ret && !list_empty(&movable_page_list)) 1908 putback_movable_pages(&movable_page_list); 1909 } 1910 1911 return ret > 0 ? -ENOMEM : ret; 1912 } 1913 #else 1914 static long check_and_migrate_movable_pages(unsigned long nr_pages, 1915 struct page **pages, 1916 unsigned int gup_flags) 1917 { 1918 return nr_pages; 1919 } 1920 #endif /* CONFIG_MIGRATION */ 1921 1922 /* 1923 * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which 1924 * allows us to process the FOLL_LONGTERM flag. 1925 */ 1926 static long __gup_longterm_locked(struct mm_struct *mm, 1927 unsigned long start, 1928 unsigned long nr_pages, 1929 struct page **pages, 1930 struct vm_area_struct **vmas, 1931 unsigned int gup_flags) 1932 { 1933 unsigned int flags; 1934 long rc; 1935 1936 if (!(gup_flags & FOLL_LONGTERM)) 1937 return __get_user_pages_locked(mm, start, nr_pages, pages, vmas, 1938 NULL, gup_flags); 1939 flags = memalloc_pin_save(); 1940 do { 1941 rc = __get_user_pages_locked(mm, start, nr_pages, pages, vmas, 1942 NULL, gup_flags); 1943 if (rc <= 0) 1944 break; 1945 rc = check_and_migrate_movable_pages(rc, pages, gup_flags); 1946 } while (!rc); 1947 memalloc_pin_restore(flags); 1948 1949 return rc; 1950 } 1951 1952 static bool is_valid_gup_flags(unsigned int gup_flags) 1953 { 1954 /* 1955 * FOLL_PIN must only be set internally by the pin_user_pages*() APIs, 1956 * never directly by the caller, so enforce that with an assertion: 1957 */ 1958 if (WARN_ON_ONCE(gup_flags & FOLL_PIN)) 1959 return false; 1960 /* 1961 * FOLL_PIN is a prerequisite to FOLL_LONGTERM. Another way of saying 1962 * that is, FOLL_LONGTERM is a specific case, more restrictive case of 1963 * FOLL_PIN. 1964 */ 1965 if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM)) 1966 return false; 1967 1968 return true; 1969 } 1970 1971 #ifdef CONFIG_MMU 1972 static long __get_user_pages_remote(struct mm_struct *mm, 1973 unsigned long start, unsigned long nr_pages, 1974 unsigned int gup_flags, struct page **pages, 1975 struct vm_area_struct **vmas, int *locked) 1976 { 1977 /* 1978 * Parts of FOLL_LONGTERM behavior are incompatible with 1979 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on 1980 * vmas. However, this only comes up if locked is set, and there are 1981 * callers that do request FOLL_LONGTERM, but do not set locked. So, 1982 * allow what we can. 1983 */ 1984 if (gup_flags & FOLL_LONGTERM) { 1985 if (WARN_ON_ONCE(locked)) 1986 return -EINVAL; 1987 /* 1988 * This will check the vmas (even if our vmas arg is NULL) 1989 * and return -ENOTSUPP if DAX isn't allowed in this case: 1990 */ 1991 return __gup_longterm_locked(mm, start, nr_pages, pages, 1992 vmas, gup_flags | FOLL_TOUCH | 1993 FOLL_REMOTE); 1994 } 1995 1996 return __get_user_pages_locked(mm, start, nr_pages, pages, vmas, 1997 locked, 1998 gup_flags | FOLL_TOUCH | FOLL_REMOTE); 1999 } 2000 2001 /** 2002 * get_user_pages_remote() - pin user pages in memory 2003 * @mm: mm_struct of target mm 2004 * @start: starting user address 2005 * @nr_pages: number of pages from start to pin 2006 * @gup_flags: flags modifying lookup behaviour 2007 * @pages: array that receives pointers to the pages pinned. 2008 * Should be at least nr_pages long. Or NULL, if caller 2009 * only intends to ensure the pages are faulted in. 2010 * @vmas: array of pointers to vmas corresponding to each page. 2011 * Or NULL if the caller does not require them. 2012 * @locked: pointer to lock flag indicating whether lock is held and 2013 * subsequently whether VM_FAULT_RETRY functionality can be 2014 * utilised. Lock must initially be held. 2015 * 2016 * Returns either number of pages pinned (which may be less than the 2017 * number requested), or an error. Details about the return value: 2018 * 2019 * -- If nr_pages is 0, returns 0. 2020 * -- If nr_pages is >0, but no pages were pinned, returns -errno. 2021 * -- If nr_pages is >0, and some pages were pinned, returns the number of 2022 * pages pinned. Again, this may be less than nr_pages. 2023 * 2024 * The caller is responsible for releasing returned @pages, via put_page(). 2025 * 2026 * @vmas are valid only as long as mmap_lock is held. 2027 * 2028 * Must be called with mmap_lock held for read or write. 2029 * 2030 * get_user_pages_remote walks a process's page tables and takes a reference 2031 * to each struct page that each user address corresponds to at a given 2032 * instant. That is, it takes the page that would be accessed if a user 2033 * thread accesses the given user virtual address at that instant. 2034 * 2035 * This does not guarantee that the page exists in the user mappings when 2036 * get_user_pages_remote returns, and there may even be a completely different 2037 * page there in some cases (eg. if mmapped pagecache has been invalidated 2038 * and subsequently re faulted). However it does guarantee that the page 2039 * won't be freed completely. And mostly callers simply care that the page 2040 * contains data that was valid *at some point in time*. Typically, an IO 2041 * or similar operation cannot guarantee anything stronger anyway because 2042 * locks can't be held over the syscall boundary. 2043 * 2044 * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page 2045 * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must 2046 * be called after the page is finished with, and before put_page is called. 2047 * 2048 * get_user_pages_remote is typically used for fewer-copy IO operations, 2049 * to get a handle on the memory by some means other than accesses 2050 * via the user virtual addresses. The pages may be submitted for 2051 * DMA to devices or accessed via their kernel linear mapping (via the 2052 * kmap APIs). Care should be taken to use the correct cache flushing APIs. 2053 * 2054 * See also get_user_pages_fast, for performance critical applications. 2055 * 2056 * get_user_pages_remote should be phased out in favor of 2057 * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing 2058 * should use get_user_pages_remote because it cannot pass 2059 * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault. 2060 */ 2061 long get_user_pages_remote(struct mm_struct *mm, 2062 unsigned long start, unsigned long nr_pages, 2063 unsigned int gup_flags, struct page **pages, 2064 struct vm_area_struct **vmas, int *locked) 2065 { 2066 if (!is_valid_gup_flags(gup_flags)) 2067 return -EINVAL; 2068 2069 return __get_user_pages_remote(mm, start, nr_pages, gup_flags, 2070 pages, vmas, locked); 2071 } 2072 EXPORT_SYMBOL(get_user_pages_remote); 2073 2074 #else /* CONFIG_MMU */ 2075 long get_user_pages_remote(struct mm_struct *mm, 2076 unsigned long start, unsigned long nr_pages, 2077 unsigned int gup_flags, struct page **pages, 2078 struct vm_area_struct **vmas, int *locked) 2079 { 2080 return 0; 2081 } 2082 2083 static long __get_user_pages_remote(struct mm_struct *mm, 2084 unsigned long start, unsigned long nr_pages, 2085 unsigned int gup_flags, struct page **pages, 2086 struct vm_area_struct **vmas, int *locked) 2087 { 2088 return 0; 2089 } 2090 #endif /* !CONFIG_MMU */ 2091 2092 /** 2093 * get_user_pages() - pin user pages in memory 2094 * @start: starting user address 2095 * @nr_pages: number of pages from start to pin 2096 * @gup_flags: flags modifying lookup behaviour 2097 * @pages: array that receives pointers to the pages pinned. 2098 * Should be at least nr_pages long. Or NULL, if caller 2099 * only intends to ensure the pages are faulted in. 2100 * @vmas: array of pointers to vmas corresponding to each page. 2101 * Or NULL if the caller does not require them. 2102 * 2103 * This is the same as get_user_pages_remote(), just with a less-flexible 2104 * calling convention where we assume that the mm being operated on belongs to 2105 * the current task, and doesn't allow passing of a locked parameter. We also 2106 * obviously don't pass FOLL_REMOTE in here. 2107 */ 2108 long get_user_pages(unsigned long start, unsigned long nr_pages, 2109 unsigned int gup_flags, struct page **pages, 2110 struct vm_area_struct **vmas) 2111 { 2112 if (!is_valid_gup_flags(gup_flags)) 2113 return -EINVAL; 2114 2115 return __gup_longterm_locked(current->mm, start, nr_pages, 2116 pages, vmas, gup_flags | FOLL_TOUCH); 2117 } 2118 EXPORT_SYMBOL(get_user_pages); 2119 2120 /** 2121 * get_user_pages_locked() - variant of get_user_pages() 2122 * 2123 * @start: starting user address 2124 * @nr_pages: number of pages from start to pin 2125 * @gup_flags: flags modifying lookup behaviour 2126 * @pages: array that receives pointers to the pages pinned. 2127 * Should be at least nr_pages long. Or NULL, if caller 2128 * only intends to ensure the pages are faulted in. 2129 * @locked: pointer to lock flag indicating whether lock is held and 2130 * subsequently whether VM_FAULT_RETRY functionality can be 2131 * utilised. Lock must initially be held. 2132 * 2133 * It is suitable to replace the form: 2134 * 2135 * mmap_read_lock(mm); 2136 * do_something() 2137 * get_user_pages(mm, ..., pages, NULL); 2138 * mmap_read_unlock(mm); 2139 * 2140 * to: 2141 * 2142 * int locked = 1; 2143 * mmap_read_lock(mm); 2144 * do_something() 2145 * get_user_pages_locked(mm, ..., pages, &locked); 2146 * if (locked) 2147 * mmap_read_unlock(mm); 2148 * 2149 * We can leverage the VM_FAULT_RETRY functionality in the page fault 2150 * paths better by using either get_user_pages_locked() or 2151 * get_user_pages_unlocked(). 2152 * 2153 */ 2154 long get_user_pages_locked(unsigned long start, unsigned long nr_pages, 2155 unsigned int gup_flags, struct page **pages, 2156 int *locked) 2157 { 2158 /* 2159 * FIXME: Current FOLL_LONGTERM behavior is incompatible with 2160 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on 2161 * vmas. As there are no users of this flag in this call we simply 2162 * disallow this option for now. 2163 */ 2164 if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM)) 2165 return -EINVAL; 2166 /* 2167 * FOLL_PIN must only be set internally by the pin_user_pages*() APIs, 2168 * never directly by the caller, so enforce that: 2169 */ 2170 if (WARN_ON_ONCE(gup_flags & FOLL_PIN)) 2171 return -EINVAL; 2172 2173 return __get_user_pages_locked(current->mm, start, nr_pages, 2174 pages, NULL, locked, 2175 gup_flags | FOLL_TOUCH); 2176 } 2177 EXPORT_SYMBOL(get_user_pages_locked); 2178 2179 /* 2180 * get_user_pages_unlocked() is suitable to replace the form: 2181 * 2182 * mmap_read_lock(mm); 2183 * get_user_pages(mm, ..., pages, NULL); 2184 * mmap_read_unlock(mm); 2185 * 2186 * with: 2187 * 2188 * get_user_pages_unlocked(mm, ..., pages); 2189 * 2190 * It is functionally equivalent to get_user_pages_fast so 2191 * get_user_pages_fast should be used instead if specific gup_flags 2192 * (e.g. FOLL_FORCE) are not required. 2193 */ 2194 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 2195 struct page **pages, unsigned int gup_flags) 2196 { 2197 struct mm_struct *mm = current->mm; 2198 int locked = 1; 2199 long ret; 2200 2201 /* 2202 * FIXME: Current FOLL_LONGTERM behavior is incompatible with 2203 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on 2204 * vmas. As there are no users of this flag in this call we simply 2205 * disallow this option for now. 2206 */ 2207 if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM)) 2208 return -EINVAL; 2209 2210 mmap_read_lock(mm); 2211 ret = __get_user_pages_locked(mm, start, nr_pages, pages, NULL, 2212 &locked, gup_flags | FOLL_TOUCH); 2213 if (locked) 2214 mmap_read_unlock(mm); 2215 return ret; 2216 } 2217 EXPORT_SYMBOL(get_user_pages_unlocked); 2218 2219 /* 2220 * Fast GUP 2221 * 2222 * get_user_pages_fast attempts to pin user pages by walking the page 2223 * tables directly and avoids taking locks. Thus the walker needs to be 2224 * protected from page table pages being freed from under it, and should 2225 * block any THP splits. 2226 * 2227 * One way to achieve this is to have the walker disable interrupts, and 2228 * rely on IPIs from the TLB flushing code blocking before the page table 2229 * pages are freed. This is unsuitable for architectures that do not need 2230 * to broadcast an IPI when invalidating TLBs. 2231 * 2232 * Another way to achieve this is to batch up page table containing pages 2233 * belonging to more than one mm_user, then rcu_sched a callback to free those 2234 * pages. Disabling interrupts will allow the fast_gup walker to both block 2235 * the rcu_sched callback, and an IPI that we broadcast for splitting THPs 2236 * (which is a relatively rare event). The code below adopts this strategy. 2237 * 2238 * Before activating this code, please be aware that the following assumptions 2239 * are currently made: 2240 * 2241 * *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to 2242 * free pages containing page tables or TLB flushing requires IPI broadcast. 2243 * 2244 * *) ptes can be read atomically by the architecture. 2245 * 2246 * *) access_ok is sufficient to validate userspace address ranges. 2247 * 2248 * The last two assumptions can be relaxed by the addition of helper functions. 2249 * 2250 * This code is based heavily on the PowerPC implementation by Nick Piggin. 2251 */ 2252 #ifdef CONFIG_HAVE_FAST_GUP 2253 2254 static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start, 2255 unsigned int flags, 2256 struct page **pages) 2257 { 2258 while ((*nr) - nr_start) { 2259 struct page *page = pages[--(*nr)]; 2260 2261 ClearPageReferenced(page); 2262 if (flags & FOLL_PIN) 2263 unpin_user_page(page); 2264 else 2265 put_page(page); 2266 } 2267 } 2268 2269 #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL 2270 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, 2271 unsigned int flags, struct page **pages, int *nr) 2272 { 2273 struct dev_pagemap *pgmap = NULL; 2274 int nr_start = *nr, ret = 0; 2275 pte_t *ptep, *ptem; 2276 2277 ptem = ptep = pte_offset_map(&pmd, addr); 2278 do { 2279 pte_t pte = ptep_get_lockless(ptep); 2280 struct page *head, *page; 2281 2282 /* 2283 * Similar to the PMD case below, NUMA hinting must take slow 2284 * path using the pte_protnone check. 2285 */ 2286 if (pte_protnone(pte)) 2287 goto pte_unmap; 2288 2289 if (!pte_access_permitted(pte, flags & FOLL_WRITE)) 2290 goto pte_unmap; 2291 2292 if (pte_devmap(pte)) { 2293 if (unlikely(flags & FOLL_LONGTERM)) 2294 goto pte_unmap; 2295 2296 pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); 2297 if (unlikely(!pgmap)) { 2298 undo_dev_pagemap(nr, nr_start, flags, pages); 2299 goto pte_unmap; 2300 } 2301 } else if (pte_special(pte)) 2302 goto pte_unmap; 2303 2304 VM_BUG_ON(!pfn_valid(pte_pfn(pte))); 2305 page = pte_page(pte); 2306 2307 head = try_grab_compound_head(page, 1, flags); 2308 if (!head) 2309 goto pte_unmap; 2310 2311 if (unlikely(page_is_secretmem(page))) { 2312 put_compound_head(head, 1, flags); 2313 goto pte_unmap; 2314 } 2315 2316 if (unlikely(pte_val(pte) != pte_val(*ptep))) { 2317 put_compound_head(head, 1, flags); 2318 goto pte_unmap; 2319 } 2320 2321 VM_BUG_ON_PAGE(compound_head(page) != head, page); 2322 2323 /* 2324 * We need to make the page accessible if and only if we are 2325 * going to access its content (the FOLL_PIN case). Please 2326 * see Documentation/core-api/pin_user_pages.rst for 2327 * details. 2328 */ 2329 if (flags & FOLL_PIN) { 2330 ret = arch_make_page_accessible(page); 2331 if (ret) { 2332 unpin_user_page(page); 2333 goto pte_unmap; 2334 } 2335 } 2336 SetPageReferenced(page); 2337 pages[*nr] = page; 2338 (*nr)++; 2339 2340 } while (ptep++, addr += PAGE_SIZE, addr != end); 2341 2342 ret = 1; 2343 2344 pte_unmap: 2345 if (pgmap) 2346 put_dev_pagemap(pgmap); 2347 pte_unmap(ptem); 2348 return ret; 2349 } 2350 #else 2351 2352 /* 2353 * If we can't determine whether or not a pte is special, then fail immediately 2354 * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not 2355 * to be special. 2356 * 2357 * For a futex to be placed on a THP tail page, get_futex_key requires a 2358 * get_user_pages_fast_only implementation that can pin pages. Thus it's still 2359 * useful to have gup_huge_pmd even if we can't operate on ptes. 2360 */ 2361 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, 2362 unsigned int flags, struct page **pages, int *nr) 2363 { 2364 return 0; 2365 } 2366 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ 2367 2368 #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE) 2369 static int __gup_device_huge(unsigned long pfn, unsigned long addr, 2370 unsigned long end, unsigned int flags, 2371 struct page **pages, int *nr) 2372 { 2373 int nr_start = *nr; 2374 struct dev_pagemap *pgmap = NULL; 2375 2376 do { 2377 struct page *page = pfn_to_page(pfn); 2378 2379 pgmap = get_dev_pagemap(pfn, pgmap); 2380 if (unlikely(!pgmap)) { 2381 undo_dev_pagemap(nr, nr_start, flags, pages); 2382 break; 2383 } 2384 SetPageReferenced(page); 2385 pages[*nr] = page; 2386 if (unlikely(!try_grab_page(page, flags))) { 2387 undo_dev_pagemap(nr, nr_start, flags, pages); 2388 break; 2389 } 2390 (*nr)++; 2391 pfn++; 2392 } while (addr += PAGE_SIZE, addr != end); 2393 2394 put_dev_pagemap(pgmap); 2395 return addr == end; 2396 } 2397 2398 static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, 2399 unsigned long end, unsigned int flags, 2400 struct page **pages, int *nr) 2401 { 2402 unsigned long fault_pfn; 2403 int nr_start = *nr; 2404 2405 fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); 2406 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr)) 2407 return 0; 2408 2409 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { 2410 undo_dev_pagemap(nr, nr_start, flags, pages); 2411 return 0; 2412 } 2413 return 1; 2414 } 2415 2416 static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, 2417 unsigned long end, unsigned int flags, 2418 struct page **pages, int *nr) 2419 { 2420 unsigned long fault_pfn; 2421 int nr_start = *nr; 2422 2423 fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); 2424 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr)) 2425 return 0; 2426 2427 if (unlikely(pud_val(orig) != pud_val(*pudp))) { 2428 undo_dev_pagemap(nr, nr_start, flags, pages); 2429 return 0; 2430 } 2431 return 1; 2432 } 2433 #else 2434 static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, 2435 unsigned long end, unsigned int flags, 2436 struct page **pages, int *nr) 2437 { 2438 BUILD_BUG(); 2439 return 0; 2440 } 2441 2442 static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr, 2443 unsigned long end, unsigned int flags, 2444 struct page **pages, int *nr) 2445 { 2446 BUILD_BUG(); 2447 return 0; 2448 } 2449 #endif 2450 2451 static int record_subpages(struct page *page, unsigned long addr, 2452 unsigned long end, struct page **pages) 2453 { 2454 int nr; 2455 2456 for (nr = 0; addr != end; addr += PAGE_SIZE) 2457 pages[nr++] = page++; 2458 2459 return nr; 2460 } 2461 2462 #ifdef CONFIG_ARCH_HAS_HUGEPD 2463 static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end, 2464 unsigned long sz) 2465 { 2466 unsigned long __boundary = (addr + sz) & ~(sz-1); 2467 return (__boundary - 1 < end - 1) ? __boundary : end; 2468 } 2469 2470 static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, 2471 unsigned long end, unsigned int flags, 2472 struct page **pages, int *nr) 2473 { 2474 unsigned long pte_end; 2475 struct page *head, *page; 2476 pte_t pte; 2477 int refs; 2478 2479 pte_end = (addr + sz) & ~(sz-1); 2480 if (pte_end < end) 2481 end = pte_end; 2482 2483 pte = huge_ptep_get(ptep); 2484 2485 if (!pte_access_permitted(pte, flags & FOLL_WRITE)) 2486 return 0; 2487 2488 /* hugepages are never "special" */ 2489 VM_BUG_ON(!pfn_valid(pte_pfn(pte))); 2490 2491 head = pte_page(pte); 2492 page = head + ((addr & (sz-1)) >> PAGE_SHIFT); 2493 refs = record_subpages(page, addr, end, pages + *nr); 2494 2495 head = try_grab_compound_head(head, refs, flags); 2496 if (!head) 2497 return 0; 2498 2499 if (unlikely(pte_val(pte) != pte_val(*ptep))) { 2500 put_compound_head(head, refs, flags); 2501 return 0; 2502 } 2503 2504 *nr += refs; 2505 SetPageReferenced(head); 2506 return 1; 2507 } 2508 2509 static int gup_huge_pd(hugepd_t hugepd, unsigned long addr, 2510 unsigned int pdshift, unsigned long end, unsigned int flags, 2511 struct page **pages, int *nr) 2512 { 2513 pte_t *ptep; 2514 unsigned long sz = 1UL << hugepd_shift(hugepd); 2515 unsigned long next; 2516 2517 ptep = hugepte_offset(hugepd, addr, pdshift); 2518 do { 2519 next = hugepte_addr_end(addr, end, sz); 2520 if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr)) 2521 return 0; 2522 } while (ptep++, addr = next, addr != end); 2523 2524 return 1; 2525 } 2526 #else 2527 static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr, 2528 unsigned int pdshift, unsigned long end, unsigned int flags, 2529 struct page **pages, int *nr) 2530 { 2531 return 0; 2532 } 2533 #endif /* CONFIG_ARCH_HAS_HUGEPD */ 2534 2535 static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, 2536 unsigned long end, unsigned int flags, 2537 struct page **pages, int *nr) 2538 { 2539 struct page *head, *page; 2540 int refs; 2541 2542 if (!pmd_access_permitted(orig, flags & FOLL_WRITE)) 2543 return 0; 2544 2545 if (pmd_devmap(orig)) { 2546 if (unlikely(flags & FOLL_LONGTERM)) 2547 return 0; 2548 return __gup_device_huge_pmd(orig, pmdp, addr, end, flags, 2549 pages, nr); 2550 } 2551 2552 page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); 2553 refs = record_subpages(page, addr, end, pages + *nr); 2554 2555 head = try_grab_compound_head(pmd_page(orig), refs, flags); 2556 if (!head) 2557 return 0; 2558 2559 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { 2560 put_compound_head(head, refs, flags); 2561 return 0; 2562 } 2563 2564 *nr += refs; 2565 SetPageReferenced(head); 2566 return 1; 2567 } 2568 2569 static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, 2570 unsigned long end, unsigned int flags, 2571 struct page **pages, int *nr) 2572 { 2573 struct page *head, *page; 2574 int refs; 2575 2576 if (!pud_access_permitted(orig, flags & FOLL_WRITE)) 2577 return 0; 2578 2579 if (pud_devmap(orig)) { 2580 if (unlikely(flags & FOLL_LONGTERM)) 2581 return 0; 2582 return __gup_device_huge_pud(orig, pudp, addr, end, flags, 2583 pages, nr); 2584 } 2585 2586 page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); 2587 refs = record_subpages(page, addr, end, pages + *nr); 2588 2589 head = try_grab_compound_head(pud_page(orig), refs, flags); 2590 if (!head) 2591 return 0; 2592 2593 if (unlikely(pud_val(orig) != pud_val(*pudp))) { 2594 put_compound_head(head, refs, flags); 2595 return 0; 2596 } 2597 2598 *nr += refs; 2599 SetPageReferenced(head); 2600 return 1; 2601 } 2602 2603 static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, 2604 unsigned long end, unsigned int flags, 2605 struct page **pages, int *nr) 2606 { 2607 int refs; 2608 struct page *head, *page; 2609 2610 if (!pgd_access_permitted(orig, flags & FOLL_WRITE)) 2611 return 0; 2612 2613 BUILD_BUG_ON(pgd_devmap(orig)); 2614 2615 page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT); 2616 refs = record_subpages(page, addr, end, pages + *nr); 2617 2618 head = try_grab_compound_head(pgd_page(orig), refs, flags); 2619 if (!head) 2620 return 0; 2621 2622 if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) { 2623 put_compound_head(head, refs, flags); 2624 return 0; 2625 } 2626 2627 *nr += refs; 2628 SetPageReferenced(head); 2629 return 1; 2630 } 2631 2632 static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end, 2633 unsigned int flags, struct page **pages, int *nr) 2634 { 2635 unsigned long next; 2636 pmd_t *pmdp; 2637 2638 pmdp = pmd_offset_lockless(pudp, pud, addr); 2639 do { 2640 pmd_t pmd = READ_ONCE(*pmdp); 2641 2642 next = pmd_addr_end(addr, end); 2643 if (!pmd_present(pmd)) 2644 return 0; 2645 2646 if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) || 2647 pmd_devmap(pmd))) { 2648 /* 2649 * NUMA hinting faults need to be handled in the GUP 2650 * slowpath for accounting purposes and so that they 2651 * can be serialised against THP migration. 2652 */ 2653 if (pmd_protnone(pmd)) 2654 return 0; 2655 2656 if (!gup_huge_pmd(pmd, pmdp, addr, next, flags, 2657 pages, nr)) 2658 return 0; 2659 2660 } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) { 2661 /* 2662 * architecture have different format for hugetlbfs 2663 * pmd format and THP pmd format 2664 */ 2665 if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr, 2666 PMD_SHIFT, next, flags, pages, nr)) 2667 return 0; 2668 } else if (!gup_pte_range(pmd, addr, next, flags, pages, nr)) 2669 return 0; 2670 } while (pmdp++, addr = next, addr != end); 2671 2672 return 1; 2673 } 2674 2675 static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end, 2676 unsigned int flags, struct page **pages, int *nr) 2677 { 2678 unsigned long next; 2679 pud_t *pudp; 2680 2681 pudp = pud_offset_lockless(p4dp, p4d, addr); 2682 do { 2683 pud_t pud = READ_ONCE(*pudp); 2684 2685 next = pud_addr_end(addr, end); 2686 if (unlikely(!pud_present(pud))) 2687 return 0; 2688 if (unlikely(pud_huge(pud))) { 2689 if (!gup_huge_pud(pud, pudp, addr, next, flags, 2690 pages, nr)) 2691 return 0; 2692 } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) { 2693 if (!gup_huge_pd(__hugepd(pud_val(pud)), addr, 2694 PUD_SHIFT, next, flags, pages, nr)) 2695 return 0; 2696 } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr)) 2697 return 0; 2698 } while (pudp++, addr = next, addr != end); 2699 2700 return 1; 2701 } 2702 2703 static int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end, 2704 unsigned int flags, struct page **pages, int *nr) 2705 { 2706 unsigned long next; 2707 p4d_t *p4dp; 2708 2709 p4dp = p4d_offset_lockless(pgdp, pgd, addr); 2710 do { 2711 p4d_t p4d = READ_ONCE(*p4dp); 2712 2713 next = p4d_addr_end(addr, end); 2714 if (p4d_none(p4d)) 2715 return 0; 2716 BUILD_BUG_ON(p4d_huge(p4d)); 2717 if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) { 2718 if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr, 2719 P4D_SHIFT, next, flags, pages, nr)) 2720 return 0; 2721 } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr)) 2722 return 0; 2723 } while (p4dp++, addr = next, addr != end); 2724 2725 return 1; 2726 } 2727 2728 static void gup_pgd_range(unsigned long addr, unsigned long end, 2729 unsigned int flags, struct page **pages, int *nr) 2730 { 2731 unsigned long next; 2732 pgd_t *pgdp; 2733 2734 pgdp = pgd_offset(current->mm, addr); 2735 do { 2736 pgd_t pgd = READ_ONCE(*pgdp); 2737 2738 next = pgd_addr_end(addr, end); 2739 if (pgd_none(pgd)) 2740 return; 2741 if (unlikely(pgd_huge(pgd))) { 2742 if (!gup_huge_pgd(pgd, pgdp, addr, next, flags, 2743 pages, nr)) 2744 return; 2745 } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) { 2746 if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, 2747 PGDIR_SHIFT, next, flags, pages, nr)) 2748 return; 2749 } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr)) 2750 return; 2751 } while (pgdp++, addr = next, addr != end); 2752 } 2753 #else 2754 static inline void gup_pgd_range(unsigned long addr, unsigned long end, 2755 unsigned int flags, struct page **pages, int *nr) 2756 { 2757 } 2758 #endif /* CONFIG_HAVE_FAST_GUP */ 2759 2760 #ifndef gup_fast_permitted 2761 /* 2762 * Check if it's allowed to use get_user_pages_fast_only() for the range, or 2763 * we need to fall back to the slow version: 2764 */ 2765 static bool gup_fast_permitted(unsigned long start, unsigned long end) 2766 { 2767 return true; 2768 } 2769 #endif 2770 2771 static int __gup_longterm_unlocked(unsigned long start, int nr_pages, 2772 unsigned int gup_flags, struct page **pages) 2773 { 2774 int ret; 2775 2776 /* 2777 * FIXME: FOLL_LONGTERM does not work with 2778 * get_user_pages_unlocked() (see comments in that function) 2779 */ 2780 if (gup_flags & FOLL_LONGTERM) { 2781 mmap_read_lock(current->mm); 2782 ret = __gup_longterm_locked(current->mm, 2783 start, nr_pages, 2784 pages, NULL, gup_flags); 2785 mmap_read_unlock(current->mm); 2786 } else { 2787 ret = get_user_pages_unlocked(start, nr_pages, 2788 pages, gup_flags); 2789 } 2790 2791 return ret; 2792 } 2793 2794 static unsigned long lockless_pages_from_mm(unsigned long start, 2795 unsigned long end, 2796 unsigned int gup_flags, 2797 struct page **pages) 2798 { 2799 unsigned long flags; 2800 int nr_pinned = 0; 2801 unsigned seq; 2802 2803 if (!IS_ENABLED(CONFIG_HAVE_FAST_GUP) || 2804 !gup_fast_permitted(start, end)) 2805 return 0; 2806 2807 if (gup_flags & FOLL_PIN) { 2808 seq = raw_read_seqcount(¤t->mm->write_protect_seq); 2809 if (seq & 1) 2810 return 0; 2811 } 2812 2813 /* 2814 * Disable interrupts. The nested form is used, in order to allow full, 2815 * general purpose use of this routine. 2816 * 2817 * With interrupts disabled, we block page table pages from being freed 2818 * from under us. See struct mmu_table_batch comments in 2819 * include/asm-generic/tlb.h for more details. 2820 * 2821 * We do not adopt an rcu_read_lock() here as we also want to block IPIs 2822 * that come from THPs splitting. 2823 */ 2824 local_irq_save(flags); 2825 gup_pgd_range(start, end, gup_flags, pages, &nr_pinned); 2826 local_irq_restore(flags); 2827 2828 /* 2829 * When pinning pages for DMA there could be a concurrent write protect 2830 * from fork() via copy_page_range(), in this case always fail fast GUP. 2831 */ 2832 if (gup_flags & FOLL_PIN) { 2833 if (read_seqcount_retry(¤t->mm->write_protect_seq, seq)) { 2834 unpin_user_pages(pages, nr_pinned); 2835 return 0; 2836 } 2837 } 2838 return nr_pinned; 2839 } 2840 2841 static int internal_get_user_pages_fast(unsigned long start, 2842 unsigned long nr_pages, 2843 unsigned int gup_flags, 2844 struct page **pages) 2845 { 2846 unsigned long len, end; 2847 unsigned long nr_pinned; 2848 int ret; 2849 2850 if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM | 2851 FOLL_FORCE | FOLL_PIN | FOLL_GET | 2852 FOLL_FAST_ONLY | FOLL_NOFAULT))) 2853 return -EINVAL; 2854 2855 if (gup_flags & FOLL_PIN) 2856 mm_set_has_pinned_flag(¤t->mm->flags); 2857 2858 if (!(gup_flags & FOLL_FAST_ONLY)) 2859 might_lock_read(¤t->mm->mmap_lock); 2860 2861 start = untagged_addr(start) & PAGE_MASK; 2862 len = nr_pages << PAGE_SHIFT; 2863 if (check_add_overflow(start, len, &end)) 2864 return 0; 2865 if (unlikely(!access_ok((void __user *)start, len))) 2866 return -EFAULT; 2867 2868 nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages); 2869 if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY) 2870 return nr_pinned; 2871 2872 /* Slow path: try to get the remaining pages with get_user_pages */ 2873 start += nr_pinned << PAGE_SHIFT; 2874 pages += nr_pinned; 2875 ret = __gup_longterm_unlocked(start, nr_pages - nr_pinned, gup_flags, 2876 pages); 2877 if (ret < 0) { 2878 /* 2879 * The caller has to unpin the pages we already pinned so 2880 * returning -errno is not an option 2881 */ 2882 if (nr_pinned) 2883 return nr_pinned; 2884 return ret; 2885 } 2886 return ret + nr_pinned; 2887 } 2888 2889 /** 2890 * get_user_pages_fast_only() - pin user pages in memory 2891 * @start: starting user address 2892 * @nr_pages: number of pages from start to pin 2893 * @gup_flags: flags modifying pin behaviour 2894 * @pages: array that receives pointers to the pages pinned. 2895 * Should be at least nr_pages long. 2896 * 2897 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to 2898 * the regular GUP. 2899 * Note a difference with get_user_pages_fast: this always returns the 2900 * number of pages pinned, 0 if no pages were pinned. 2901 * 2902 * If the architecture does not support this function, simply return with no 2903 * pages pinned. 2904 * 2905 * Careful, careful! COW breaking can go either way, so a non-write 2906 * access can get ambiguous page results. If you call this function without 2907 * 'write' set, you'd better be sure that you're ok with that ambiguity. 2908 */ 2909 int get_user_pages_fast_only(unsigned long start, int nr_pages, 2910 unsigned int gup_flags, struct page **pages) 2911 { 2912 int nr_pinned; 2913 /* 2914 * Internally (within mm/gup.c), gup fast variants must set FOLL_GET, 2915 * because gup fast is always a "pin with a +1 page refcount" request. 2916 * 2917 * FOLL_FAST_ONLY is required in order to match the API description of 2918 * this routine: no fall back to regular ("slow") GUP. 2919 */ 2920 gup_flags |= FOLL_GET | FOLL_FAST_ONLY; 2921 2922 nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags, 2923 pages); 2924 2925 /* 2926 * As specified in the API description above, this routine is not 2927 * allowed to return negative values. However, the common core 2928 * routine internal_get_user_pages_fast() *can* return -errno. 2929 * Therefore, correct for that here: 2930 */ 2931 if (nr_pinned < 0) 2932 nr_pinned = 0; 2933 2934 return nr_pinned; 2935 } 2936 EXPORT_SYMBOL_GPL(get_user_pages_fast_only); 2937 2938 /** 2939 * get_user_pages_fast() - pin user pages in memory 2940 * @start: starting user address 2941 * @nr_pages: number of pages from start to pin 2942 * @gup_flags: flags modifying pin behaviour 2943 * @pages: array that receives pointers to the pages pinned. 2944 * Should be at least nr_pages long. 2945 * 2946 * Attempt to pin user pages in memory without taking mm->mmap_lock. 2947 * If not successful, it will fall back to taking the lock and 2948 * calling get_user_pages(). 2949 * 2950 * Returns number of pages pinned. This may be fewer than the number requested. 2951 * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns 2952 * -errno. 2953 */ 2954 int get_user_pages_fast(unsigned long start, int nr_pages, 2955 unsigned int gup_flags, struct page **pages) 2956 { 2957 if (!is_valid_gup_flags(gup_flags)) 2958 return -EINVAL; 2959 2960 /* 2961 * The caller may or may not have explicitly set FOLL_GET; either way is 2962 * OK. However, internally (within mm/gup.c), gup fast variants must set 2963 * FOLL_GET, because gup fast is always a "pin with a +1 page refcount" 2964 * request. 2965 */ 2966 gup_flags |= FOLL_GET; 2967 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); 2968 } 2969 EXPORT_SYMBOL_GPL(get_user_pages_fast); 2970 2971 /** 2972 * pin_user_pages_fast() - pin user pages in memory without taking locks 2973 * 2974 * @start: starting user address 2975 * @nr_pages: number of pages from start to pin 2976 * @gup_flags: flags modifying pin behaviour 2977 * @pages: array that receives pointers to the pages pinned. 2978 * Should be at least nr_pages long. 2979 * 2980 * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See 2981 * get_user_pages_fast() for documentation on the function arguments, because 2982 * the arguments here are identical. 2983 * 2984 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please 2985 * see Documentation/core-api/pin_user_pages.rst for further details. 2986 */ 2987 int pin_user_pages_fast(unsigned long start, int nr_pages, 2988 unsigned int gup_flags, struct page **pages) 2989 { 2990 /* FOLL_GET and FOLL_PIN are mutually exclusive. */ 2991 if (WARN_ON_ONCE(gup_flags & FOLL_GET)) 2992 return -EINVAL; 2993 2994 gup_flags |= FOLL_PIN; 2995 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); 2996 } 2997 EXPORT_SYMBOL_GPL(pin_user_pages_fast); 2998 2999 /* 3000 * This is the FOLL_PIN equivalent of get_user_pages_fast_only(). Behavior 3001 * is the same, except that this one sets FOLL_PIN instead of FOLL_GET. 3002 * 3003 * The API rules are the same, too: no negative values may be returned. 3004 */ 3005 int pin_user_pages_fast_only(unsigned long start, int nr_pages, 3006 unsigned int gup_flags, struct page **pages) 3007 { 3008 int nr_pinned; 3009 3010 /* 3011 * FOLL_GET and FOLL_PIN are mutually exclusive. Note that the API 3012 * rules require returning 0, rather than -errno: 3013 */ 3014 if (WARN_ON_ONCE(gup_flags & FOLL_GET)) 3015 return 0; 3016 /* 3017 * FOLL_FAST_ONLY is required in order to match the API description of 3018 * this routine: no fall back to regular ("slow") GUP. 3019 */ 3020 gup_flags |= (FOLL_PIN | FOLL_FAST_ONLY); 3021 nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags, 3022 pages); 3023 /* 3024 * This routine is not allowed to return negative values. However, 3025 * internal_get_user_pages_fast() *can* return -errno. Therefore, 3026 * correct for that here: 3027 */ 3028 if (nr_pinned < 0) 3029 nr_pinned = 0; 3030 3031 return nr_pinned; 3032 } 3033 EXPORT_SYMBOL_GPL(pin_user_pages_fast_only); 3034 3035 /** 3036 * pin_user_pages_remote() - pin pages of a remote process 3037 * 3038 * @mm: mm_struct of target mm 3039 * @start: starting user address 3040 * @nr_pages: number of pages from start to pin 3041 * @gup_flags: flags modifying lookup behaviour 3042 * @pages: array that receives pointers to the pages pinned. 3043 * Should be at least nr_pages long. Or NULL, if caller 3044 * only intends to ensure the pages are faulted in. 3045 * @vmas: array of pointers to vmas corresponding to each page. 3046 * Or NULL if the caller does not require them. 3047 * @locked: pointer to lock flag indicating whether lock is held and 3048 * subsequently whether VM_FAULT_RETRY functionality can be 3049 * utilised. Lock must initially be held. 3050 * 3051 * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See 3052 * get_user_pages_remote() for documentation on the function arguments, because 3053 * the arguments here are identical. 3054 * 3055 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please 3056 * see Documentation/core-api/pin_user_pages.rst for details. 3057 */ 3058 long pin_user_pages_remote(struct mm_struct *mm, 3059 unsigned long start, unsigned long nr_pages, 3060 unsigned int gup_flags, struct page **pages, 3061 struct vm_area_struct **vmas, int *locked) 3062 { 3063 /* FOLL_GET and FOLL_PIN are mutually exclusive. */ 3064 if (WARN_ON_ONCE(gup_flags & FOLL_GET)) 3065 return -EINVAL; 3066 3067 gup_flags |= FOLL_PIN; 3068 return __get_user_pages_remote(mm, start, nr_pages, gup_flags, 3069 pages, vmas, locked); 3070 } 3071 EXPORT_SYMBOL(pin_user_pages_remote); 3072 3073 /** 3074 * pin_user_pages() - pin user pages in memory for use by other devices 3075 * 3076 * @start: starting user address 3077 * @nr_pages: number of pages from start to pin 3078 * @gup_flags: flags modifying lookup behaviour 3079 * @pages: array that receives pointers to the pages pinned. 3080 * Should be at least nr_pages long. Or NULL, if caller 3081 * only intends to ensure the pages are faulted in. 3082 * @vmas: array of pointers to vmas corresponding to each page. 3083 * Or NULL if the caller does not require them. 3084 * 3085 * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and 3086 * FOLL_PIN is set. 3087 * 3088 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please 3089 * see Documentation/core-api/pin_user_pages.rst for details. 3090 */ 3091 long pin_user_pages(unsigned long start, unsigned long nr_pages, 3092 unsigned int gup_flags, struct page **pages, 3093 struct vm_area_struct **vmas) 3094 { 3095 /* FOLL_GET and FOLL_PIN are mutually exclusive. */ 3096 if (WARN_ON_ONCE(gup_flags & FOLL_GET)) 3097 return -EINVAL; 3098 3099 gup_flags |= FOLL_PIN; 3100 return __gup_longterm_locked(current->mm, start, nr_pages, 3101 pages, vmas, gup_flags); 3102 } 3103 EXPORT_SYMBOL(pin_user_pages); 3104 3105 /* 3106 * pin_user_pages_unlocked() is the FOLL_PIN variant of 3107 * get_user_pages_unlocked(). Behavior is the same, except that this one sets 3108 * FOLL_PIN and rejects FOLL_GET. 3109 */ 3110 long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 3111 struct page **pages, unsigned int gup_flags) 3112 { 3113 /* FOLL_GET and FOLL_PIN are mutually exclusive. */ 3114 if (WARN_ON_ONCE(gup_flags & FOLL_GET)) 3115 return -EINVAL; 3116 3117 gup_flags |= FOLL_PIN; 3118 return get_user_pages_unlocked(start, nr_pages, pages, gup_flags); 3119 } 3120 EXPORT_SYMBOL(pin_user_pages_unlocked); 3121 3122 /* 3123 * pin_user_pages_locked() is the FOLL_PIN variant of get_user_pages_locked(). 3124 * Behavior is the same, except that this one sets FOLL_PIN and rejects 3125 * FOLL_GET. 3126 */ 3127 long pin_user_pages_locked(unsigned long start, unsigned long nr_pages, 3128 unsigned int gup_flags, struct page **pages, 3129 int *locked) 3130 { 3131 /* 3132 * FIXME: Current FOLL_LONGTERM behavior is incompatible with 3133 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on 3134 * vmas. As there are no users of this flag in this call we simply 3135 * disallow this option for now. 3136 */ 3137 if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM)) 3138 return -EINVAL; 3139 3140 /* FOLL_GET and FOLL_PIN are mutually exclusive. */ 3141 if (WARN_ON_ONCE(gup_flags & FOLL_GET)) 3142 return -EINVAL; 3143 3144 gup_flags |= FOLL_PIN; 3145 return __get_user_pages_locked(current->mm, start, nr_pages, 3146 pages, NULL, locked, 3147 gup_flags | FOLL_TOUCH); 3148 } 3149 EXPORT_SYMBOL(pin_user_pages_locked); 3150