1 #include <linux/kernel.h> 2 #include <linux/errno.h> 3 #include <linux/err.h> 4 #include <linux/spinlock.h> 5 6 #include <linux/mm.h> 7 #include <linux/pagemap.h> 8 #include <linux/rmap.h> 9 #include <linux/swap.h> 10 #include <linux/swapops.h> 11 12 #include <linux/sched.h> 13 #include <linux/rwsem.h> 14 #include <linux/hugetlb.h> 15 16 #include <asm/pgtable.h> 17 #include <asm/tlbflush.h> 18 19 #include "internal.h" 20 21 static struct page *no_page_table(struct vm_area_struct *vma, 22 unsigned int flags) 23 { 24 /* 25 * When core dumping an enormous anonymous area that nobody 26 * has touched so far, we don't want to allocate unnecessary pages or 27 * page tables. Return error instead of NULL to skip handle_mm_fault, 28 * then get_dump_page() will return NULL to leave a hole in the dump. 29 * But we can only make this optimization where a hole would surely 30 * be zero-filled if handle_mm_fault() actually did handle it. 31 */ 32 if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) 33 return ERR_PTR(-EFAULT); 34 return NULL; 35 } 36 37 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, 38 pte_t *pte, unsigned int flags) 39 { 40 /* No page to get reference */ 41 if (flags & FOLL_GET) 42 return -EFAULT; 43 44 if (flags & FOLL_TOUCH) { 45 pte_t entry = *pte; 46 47 if (flags & FOLL_WRITE) 48 entry = pte_mkdirty(entry); 49 entry = pte_mkyoung(entry); 50 51 if (!pte_same(*pte, entry)) { 52 set_pte_at(vma->vm_mm, address, pte, entry); 53 update_mmu_cache(vma, address, pte); 54 } 55 } 56 57 /* Proper page table entry exists, but no corresponding struct page */ 58 return -EEXIST; 59 } 60 61 static struct page *follow_page_pte(struct vm_area_struct *vma, 62 unsigned long address, pmd_t *pmd, unsigned int flags) 63 { 64 struct mm_struct *mm = vma->vm_mm; 65 struct page *page; 66 spinlock_t *ptl; 67 pte_t *ptep, pte; 68 69 retry: 70 if (unlikely(pmd_bad(*pmd))) 71 return no_page_table(vma, flags); 72 73 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 74 pte = *ptep; 75 if (!pte_present(pte)) { 76 swp_entry_t entry; 77 /* 78 * KSM's break_ksm() relies upon recognizing a ksm page 79 * even while it is being migrated, so for that case we 80 * need migration_entry_wait(). 81 */ 82 if (likely(!(flags & FOLL_MIGRATION))) 83 goto no_page; 84 if (pte_none(pte)) 85 goto no_page; 86 entry = pte_to_swp_entry(pte); 87 if (!is_migration_entry(entry)) 88 goto no_page; 89 pte_unmap_unlock(ptep, ptl); 90 migration_entry_wait(mm, pmd, address); 91 goto retry; 92 } 93 if ((flags & FOLL_NUMA) && pte_protnone(pte)) 94 goto no_page; 95 if ((flags & FOLL_WRITE) && !pte_write(pte)) { 96 pte_unmap_unlock(ptep, ptl); 97 return NULL; 98 } 99 100 page = vm_normal_page(vma, address, pte); 101 if (unlikely(!page)) { 102 if (flags & FOLL_DUMP) { 103 /* Avoid special (like zero) pages in core dumps */ 104 page = ERR_PTR(-EFAULT); 105 goto out; 106 } 107 108 if (is_zero_pfn(pte_pfn(pte))) { 109 page = pte_page(pte); 110 } else { 111 int ret; 112 113 ret = follow_pfn_pte(vma, address, ptep, flags); 114 page = ERR_PTR(ret); 115 goto out; 116 } 117 } 118 119 if (flags & FOLL_GET) 120 get_page_foll(page); 121 if (flags & FOLL_TOUCH) { 122 if ((flags & FOLL_WRITE) && 123 !pte_dirty(pte) && !PageDirty(page)) 124 set_page_dirty(page); 125 /* 126 * pte_mkyoung() would be more correct here, but atomic care 127 * is needed to avoid losing the dirty bit: it is easier to use 128 * mark_page_accessed(). 129 */ 130 mark_page_accessed(page); 131 } 132 if ((flags & FOLL_POPULATE) && (vma->vm_flags & VM_LOCKED)) { 133 /* 134 * The preliminary mapping check is mainly to avoid the 135 * pointless overhead of lock_page on the ZERO_PAGE 136 * which might bounce very badly if there is contention. 137 * 138 * If the page is already locked, we don't need to 139 * handle it now - vmscan will handle it later if and 140 * when it attempts to reclaim the page. 141 */ 142 if (page->mapping && trylock_page(page)) { 143 lru_add_drain(); /* push cached pages to LRU */ 144 /* 145 * Because we lock page here, and migration is 146 * blocked by the pte's page reference, and we 147 * know the page is still mapped, we don't even 148 * need to check for file-cache page truncation. 149 */ 150 mlock_vma_page(page); 151 unlock_page(page); 152 } 153 } 154 out: 155 pte_unmap_unlock(ptep, ptl); 156 return page; 157 no_page: 158 pte_unmap_unlock(ptep, ptl); 159 if (!pte_none(pte)) 160 return NULL; 161 return no_page_table(vma, flags); 162 } 163 164 /** 165 * follow_page_mask - look up a page descriptor from a user-virtual address 166 * @vma: vm_area_struct mapping @address 167 * @address: virtual address to look up 168 * @flags: flags modifying lookup behaviour 169 * @page_mask: on output, *page_mask is set according to the size of the page 170 * 171 * @flags can have FOLL_ flags set, defined in <linux/mm.h> 172 * 173 * Returns the mapped (struct page *), %NULL if no mapping exists, or 174 * an error pointer if there is a mapping to something not represented 175 * by a page descriptor (see also vm_normal_page()). 176 */ 177 struct page *follow_page_mask(struct vm_area_struct *vma, 178 unsigned long address, unsigned int flags, 179 unsigned int *page_mask) 180 { 181 pgd_t *pgd; 182 pud_t *pud; 183 pmd_t *pmd; 184 spinlock_t *ptl; 185 struct page *page; 186 struct mm_struct *mm = vma->vm_mm; 187 188 *page_mask = 0; 189 190 page = follow_huge_addr(mm, address, flags & FOLL_WRITE); 191 if (!IS_ERR(page)) { 192 BUG_ON(flags & FOLL_GET); 193 return page; 194 } 195 196 pgd = pgd_offset(mm, address); 197 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 198 return no_page_table(vma, flags); 199 200 pud = pud_offset(pgd, address); 201 if (pud_none(*pud)) 202 return no_page_table(vma, flags); 203 if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { 204 page = follow_huge_pud(mm, address, pud, flags); 205 if (page) 206 return page; 207 return no_page_table(vma, flags); 208 } 209 if (unlikely(pud_bad(*pud))) 210 return no_page_table(vma, flags); 211 212 pmd = pmd_offset(pud, address); 213 if (pmd_none(*pmd)) 214 return no_page_table(vma, flags); 215 if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) { 216 page = follow_huge_pmd(mm, address, pmd, flags); 217 if (page) 218 return page; 219 return no_page_table(vma, flags); 220 } 221 if ((flags & FOLL_NUMA) && pmd_protnone(*pmd)) 222 return no_page_table(vma, flags); 223 if (pmd_trans_huge(*pmd)) { 224 if (flags & FOLL_SPLIT) { 225 split_huge_page_pmd(vma, address, pmd); 226 return follow_page_pte(vma, address, pmd, flags); 227 } 228 ptl = pmd_lock(mm, pmd); 229 if (likely(pmd_trans_huge(*pmd))) { 230 if (unlikely(pmd_trans_splitting(*pmd))) { 231 spin_unlock(ptl); 232 wait_split_huge_page(vma->anon_vma, pmd); 233 } else { 234 page = follow_trans_huge_pmd(vma, address, 235 pmd, flags); 236 spin_unlock(ptl); 237 *page_mask = HPAGE_PMD_NR - 1; 238 return page; 239 } 240 } else 241 spin_unlock(ptl); 242 } 243 return follow_page_pte(vma, address, pmd, flags); 244 } 245 246 static int get_gate_page(struct mm_struct *mm, unsigned long address, 247 unsigned int gup_flags, struct vm_area_struct **vma, 248 struct page **page) 249 { 250 pgd_t *pgd; 251 pud_t *pud; 252 pmd_t *pmd; 253 pte_t *pte; 254 int ret = -EFAULT; 255 256 /* user gate pages are read-only */ 257 if (gup_flags & FOLL_WRITE) 258 return -EFAULT; 259 if (address > TASK_SIZE) 260 pgd = pgd_offset_k(address); 261 else 262 pgd = pgd_offset_gate(mm, address); 263 BUG_ON(pgd_none(*pgd)); 264 pud = pud_offset(pgd, address); 265 BUG_ON(pud_none(*pud)); 266 pmd = pmd_offset(pud, address); 267 if (pmd_none(*pmd)) 268 return -EFAULT; 269 VM_BUG_ON(pmd_trans_huge(*pmd)); 270 pte = pte_offset_map(pmd, address); 271 if (pte_none(*pte)) 272 goto unmap; 273 *vma = get_gate_vma(mm); 274 if (!page) 275 goto out; 276 *page = vm_normal_page(*vma, address, *pte); 277 if (!*page) { 278 if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte))) 279 goto unmap; 280 *page = pte_page(*pte); 281 } 282 get_page(*page); 283 out: 284 ret = 0; 285 unmap: 286 pte_unmap(pte); 287 return ret; 288 } 289 290 /* 291 * mmap_sem must be held on entry. If @nonblocking != NULL and 292 * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released. 293 * If it is, *@nonblocking will be set to 0 and -EBUSY returned. 294 */ 295 static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, 296 unsigned long address, unsigned int *flags, int *nonblocking) 297 { 298 struct mm_struct *mm = vma->vm_mm; 299 unsigned int fault_flags = 0; 300 int ret; 301 302 /* For mm_populate(), just skip the stack guard page. */ 303 if ((*flags & FOLL_POPULATE) && 304 (stack_guard_page_start(vma, address) || 305 stack_guard_page_end(vma, address + PAGE_SIZE))) 306 return -ENOENT; 307 if (*flags & FOLL_WRITE) 308 fault_flags |= FAULT_FLAG_WRITE; 309 if (nonblocking) 310 fault_flags |= FAULT_FLAG_ALLOW_RETRY; 311 if (*flags & FOLL_NOWAIT) 312 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; 313 if (*flags & FOLL_TRIED) { 314 VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY); 315 fault_flags |= FAULT_FLAG_TRIED; 316 } 317 318 ret = handle_mm_fault(mm, vma, address, fault_flags); 319 if (ret & VM_FAULT_ERROR) { 320 if (ret & VM_FAULT_OOM) 321 return -ENOMEM; 322 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) 323 return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT; 324 if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) 325 return -EFAULT; 326 BUG(); 327 } 328 329 if (tsk) { 330 if (ret & VM_FAULT_MAJOR) 331 tsk->maj_flt++; 332 else 333 tsk->min_flt++; 334 } 335 336 if (ret & VM_FAULT_RETRY) { 337 if (nonblocking) 338 *nonblocking = 0; 339 return -EBUSY; 340 } 341 342 /* 343 * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when 344 * necessary, even if maybe_mkwrite decided not to set pte_write. We 345 * can thus safely do subsequent page lookups as if they were reads. 346 * But only do so when looping for pte_write is futile: in some cases 347 * userspace may also be wanting to write to the gotten user page, 348 * which a read fault here might prevent (a readonly page might get 349 * reCOWed by userspace write). 350 */ 351 if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) 352 *flags &= ~FOLL_WRITE; 353 return 0; 354 } 355 356 static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) 357 { 358 vm_flags_t vm_flags = vma->vm_flags; 359 360 if (vm_flags & (VM_IO | VM_PFNMAP)) 361 return -EFAULT; 362 363 if (gup_flags & FOLL_WRITE) { 364 if (!(vm_flags & VM_WRITE)) { 365 if (!(gup_flags & FOLL_FORCE)) 366 return -EFAULT; 367 /* 368 * We used to let the write,force case do COW in a 369 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could 370 * set a breakpoint in a read-only mapping of an 371 * executable, without corrupting the file (yet only 372 * when that file had been opened for writing!). 373 * Anon pages in shared mappings are surprising: now 374 * just reject it. 375 */ 376 if (!is_cow_mapping(vm_flags)) { 377 WARN_ON_ONCE(vm_flags & VM_MAYWRITE); 378 return -EFAULT; 379 } 380 } 381 } else if (!(vm_flags & VM_READ)) { 382 if (!(gup_flags & FOLL_FORCE)) 383 return -EFAULT; 384 /* 385 * Is there actually any vma we can reach here which does not 386 * have VM_MAYREAD set? 387 */ 388 if (!(vm_flags & VM_MAYREAD)) 389 return -EFAULT; 390 } 391 return 0; 392 } 393 394 /** 395 * __get_user_pages() - pin user pages in memory 396 * @tsk: task_struct of target task 397 * @mm: mm_struct of target mm 398 * @start: starting user address 399 * @nr_pages: number of pages from start to pin 400 * @gup_flags: flags modifying pin behaviour 401 * @pages: array that receives pointers to the pages pinned. 402 * Should be at least nr_pages long. Or NULL, if caller 403 * only intends to ensure the pages are faulted in. 404 * @vmas: array of pointers to vmas corresponding to each page. 405 * Or NULL if the caller does not require them. 406 * @nonblocking: whether waiting for disk IO or mmap_sem contention 407 * 408 * Returns number of pages pinned. This may be fewer than the number 409 * requested. If nr_pages is 0 or negative, returns 0. If no pages 410 * were pinned, returns -errno. Each page returned must be released 411 * with a put_page() call when it is finished with. vmas will only 412 * remain valid while mmap_sem is held. 413 * 414 * Must be called with mmap_sem held. It may be released. See below. 415 * 416 * __get_user_pages walks a process's page tables and takes a reference to 417 * each struct page that each user address corresponds to at a given 418 * instant. That is, it takes the page that would be accessed if a user 419 * thread accesses the given user virtual address at that instant. 420 * 421 * This does not guarantee that the page exists in the user mappings when 422 * __get_user_pages returns, and there may even be a completely different 423 * page there in some cases (eg. if mmapped pagecache has been invalidated 424 * and subsequently re faulted). However it does guarantee that the page 425 * won't be freed completely. And mostly callers simply care that the page 426 * contains data that was valid *at some point in time*. Typically, an IO 427 * or similar operation cannot guarantee anything stronger anyway because 428 * locks can't be held over the syscall boundary. 429 * 430 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If 431 * the page is written to, set_page_dirty (or set_page_dirty_lock, as 432 * appropriate) must be called after the page is finished with, and 433 * before put_page is called. 434 * 435 * If @nonblocking != NULL, __get_user_pages will not wait for disk IO 436 * or mmap_sem contention, and if waiting is needed to pin all pages, 437 * *@nonblocking will be set to 0. Further, if @gup_flags does not 438 * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in 439 * this case. 440 * 441 * A caller using such a combination of @nonblocking and @gup_flags 442 * must therefore hold the mmap_sem for reading only, and recognize 443 * when it's been released. Otherwise, it must be held for either 444 * reading or writing and will not be released. 445 * 446 * In most cases, get_user_pages or get_user_pages_fast should be used 447 * instead of __get_user_pages. __get_user_pages should be used only if 448 * you need some special @gup_flags. 449 */ 450 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 451 unsigned long start, unsigned long nr_pages, 452 unsigned int gup_flags, struct page **pages, 453 struct vm_area_struct **vmas, int *nonblocking) 454 { 455 long i = 0; 456 unsigned int page_mask; 457 struct vm_area_struct *vma = NULL; 458 459 if (!nr_pages) 460 return 0; 461 462 VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); 463 464 /* 465 * If FOLL_FORCE is set then do not force a full fault as the hinting 466 * fault information is unrelated to the reference behaviour of a task 467 * using the address space 468 */ 469 if (!(gup_flags & FOLL_FORCE)) 470 gup_flags |= FOLL_NUMA; 471 472 do { 473 struct page *page; 474 unsigned int foll_flags = gup_flags; 475 unsigned int page_increm; 476 477 /* first iteration or cross vma bound */ 478 if (!vma || start >= vma->vm_end) { 479 vma = find_extend_vma(mm, start); 480 if (!vma && in_gate_area(mm, start)) { 481 int ret; 482 ret = get_gate_page(mm, start & PAGE_MASK, 483 gup_flags, &vma, 484 pages ? &pages[i] : NULL); 485 if (ret) 486 return i ? : ret; 487 page_mask = 0; 488 goto next_page; 489 } 490 491 if (!vma || check_vma_flags(vma, gup_flags)) 492 return i ? : -EFAULT; 493 if (is_vm_hugetlb_page(vma)) { 494 i = follow_hugetlb_page(mm, vma, pages, vmas, 495 &start, &nr_pages, i, 496 gup_flags); 497 continue; 498 } 499 } 500 retry: 501 /* 502 * If we have a pending SIGKILL, don't keep faulting pages and 503 * potentially allocating memory. 504 */ 505 if (unlikely(fatal_signal_pending(current))) 506 return i ? i : -ERESTARTSYS; 507 cond_resched(); 508 page = follow_page_mask(vma, start, foll_flags, &page_mask); 509 if (!page) { 510 int ret; 511 ret = faultin_page(tsk, vma, start, &foll_flags, 512 nonblocking); 513 switch (ret) { 514 case 0: 515 goto retry; 516 case -EFAULT: 517 case -ENOMEM: 518 case -EHWPOISON: 519 return i ? i : ret; 520 case -EBUSY: 521 return i; 522 case -ENOENT: 523 goto next_page; 524 } 525 BUG(); 526 } else if (PTR_ERR(page) == -EEXIST) { 527 /* 528 * Proper page table entry exists, but no corresponding 529 * struct page. 530 */ 531 goto next_page; 532 } else if (IS_ERR(page)) { 533 return i ? i : PTR_ERR(page); 534 } 535 if (pages) { 536 pages[i] = page; 537 flush_anon_page(vma, page, start); 538 flush_dcache_page(page); 539 page_mask = 0; 540 } 541 next_page: 542 if (vmas) { 543 vmas[i] = vma; 544 page_mask = 0; 545 } 546 page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask); 547 if (page_increm > nr_pages) 548 page_increm = nr_pages; 549 i += page_increm; 550 start += page_increm * PAGE_SIZE; 551 nr_pages -= page_increm; 552 } while (nr_pages); 553 return i; 554 } 555 EXPORT_SYMBOL(__get_user_pages); 556 557 /* 558 * fixup_user_fault() - manually resolve a user page fault 559 * @tsk: the task_struct to use for page fault accounting, or 560 * NULL if faults are not to be recorded. 561 * @mm: mm_struct of target mm 562 * @address: user address 563 * @fault_flags:flags to pass down to handle_mm_fault() 564 * 565 * This is meant to be called in the specific scenario where for locking reasons 566 * we try to access user memory in atomic context (within a pagefault_disable() 567 * section), this returns -EFAULT, and we want to resolve the user fault before 568 * trying again. 569 * 570 * Typically this is meant to be used by the futex code. 571 * 572 * The main difference with get_user_pages() is that this function will 573 * unconditionally call handle_mm_fault() which will in turn perform all the 574 * necessary SW fixup of the dirty and young bits in the PTE, while 575 * handle_mm_fault() only guarantees to update these in the struct page. 576 * 577 * This is important for some architectures where those bits also gate the 578 * access permission to the page because they are maintained in software. On 579 * such architectures, gup() will not be enough to make a subsequent access 580 * succeed. 581 * 582 * This has the same semantics wrt the @mm->mmap_sem as does filemap_fault(). 583 */ 584 int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, 585 unsigned long address, unsigned int fault_flags) 586 { 587 struct vm_area_struct *vma; 588 vm_flags_t vm_flags; 589 int ret; 590 591 vma = find_extend_vma(mm, address); 592 if (!vma || address < vma->vm_start) 593 return -EFAULT; 594 595 vm_flags = (fault_flags & FAULT_FLAG_WRITE) ? VM_WRITE : VM_READ; 596 if (!(vm_flags & vma->vm_flags)) 597 return -EFAULT; 598 599 ret = handle_mm_fault(mm, vma, address, fault_flags); 600 if (ret & VM_FAULT_ERROR) { 601 if (ret & VM_FAULT_OOM) 602 return -ENOMEM; 603 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) 604 return -EHWPOISON; 605 if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) 606 return -EFAULT; 607 BUG(); 608 } 609 if (tsk) { 610 if (ret & VM_FAULT_MAJOR) 611 tsk->maj_flt++; 612 else 613 tsk->min_flt++; 614 } 615 return 0; 616 } 617 618 static __always_inline long __get_user_pages_locked(struct task_struct *tsk, 619 struct mm_struct *mm, 620 unsigned long start, 621 unsigned long nr_pages, 622 int write, int force, 623 struct page **pages, 624 struct vm_area_struct **vmas, 625 int *locked, bool notify_drop, 626 unsigned int flags) 627 { 628 long ret, pages_done; 629 bool lock_dropped; 630 631 if (locked) { 632 /* if VM_FAULT_RETRY can be returned, vmas become invalid */ 633 BUG_ON(vmas); 634 /* check caller initialized locked */ 635 BUG_ON(*locked != 1); 636 } 637 638 if (pages) 639 flags |= FOLL_GET; 640 if (write) 641 flags |= FOLL_WRITE; 642 if (force) 643 flags |= FOLL_FORCE; 644 645 pages_done = 0; 646 lock_dropped = false; 647 for (;;) { 648 ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, 649 vmas, locked); 650 if (!locked) 651 /* VM_FAULT_RETRY couldn't trigger, bypass */ 652 return ret; 653 654 /* VM_FAULT_RETRY cannot return errors */ 655 if (!*locked) { 656 BUG_ON(ret < 0); 657 BUG_ON(ret >= nr_pages); 658 } 659 660 if (!pages) 661 /* If it's a prefault don't insist harder */ 662 return ret; 663 664 if (ret > 0) { 665 nr_pages -= ret; 666 pages_done += ret; 667 if (!nr_pages) 668 break; 669 } 670 if (*locked) { 671 /* VM_FAULT_RETRY didn't trigger */ 672 if (!pages_done) 673 pages_done = ret; 674 break; 675 } 676 /* VM_FAULT_RETRY triggered, so seek to the faulting offset */ 677 pages += ret; 678 start += ret << PAGE_SHIFT; 679 680 /* 681 * Repeat on the address that fired VM_FAULT_RETRY 682 * without FAULT_FLAG_ALLOW_RETRY but with 683 * FAULT_FLAG_TRIED. 684 */ 685 *locked = 1; 686 lock_dropped = true; 687 down_read(&mm->mmap_sem); 688 ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED, 689 pages, NULL, NULL); 690 if (ret != 1) { 691 BUG_ON(ret > 1); 692 if (!pages_done) 693 pages_done = ret; 694 break; 695 } 696 nr_pages--; 697 pages_done++; 698 if (!nr_pages) 699 break; 700 pages++; 701 start += PAGE_SIZE; 702 } 703 if (notify_drop && lock_dropped && *locked) { 704 /* 705 * We must let the caller know we temporarily dropped the lock 706 * and so the critical section protected by it was lost. 707 */ 708 up_read(&mm->mmap_sem); 709 *locked = 0; 710 } 711 return pages_done; 712 } 713 714 /* 715 * We can leverage the VM_FAULT_RETRY functionality in the page fault 716 * paths better by using either get_user_pages_locked() or 717 * get_user_pages_unlocked(). 718 * 719 * get_user_pages_locked() is suitable to replace the form: 720 * 721 * down_read(&mm->mmap_sem); 722 * do_something() 723 * get_user_pages(tsk, mm, ..., pages, NULL); 724 * up_read(&mm->mmap_sem); 725 * 726 * to: 727 * 728 * int locked = 1; 729 * down_read(&mm->mmap_sem); 730 * do_something() 731 * get_user_pages_locked(tsk, mm, ..., pages, &locked); 732 * if (locked) 733 * up_read(&mm->mmap_sem); 734 */ 735 long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm, 736 unsigned long start, unsigned long nr_pages, 737 int write, int force, struct page **pages, 738 int *locked) 739 { 740 return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force, 741 pages, NULL, locked, true, FOLL_TOUCH); 742 } 743 EXPORT_SYMBOL(get_user_pages_locked); 744 745 /* 746 * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows to 747 * pass additional gup_flags as last parameter (like FOLL_HWPOISON). 748 * 749 * NOTE: here FOLL_TOUCH is not set implicitly and must be set by the 750 * caller if required (just like with __get_user_pages). "FOLL_GET", 751 * "FOLL_WRITE" and "FOLL_FORCE" are set implicitly as needed 752 * according to the parameters "pages", "write", "force" 753 * respectively. 754 */ 755 __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, 756 unsigned long start, unsigned long nr_pages, 757 int write, int force, struct page **pages, 758 unsigned int gup_flags) 759 { 760 long ret; 761 int locked = 1; 762 down_read(&mm->mmap_sem); 763 ret = __get_user_pages_locked(tsk, mm, start, nr_pages, write, force, 764 pages, NULL, &locked, false, gup_flags); 765 if (locked) 766 up_read(&mm->mmap_sem); 767 return ret; 768 } 769 EXPORT_SYMBOL(__get_user_pages_unlocked); 770 771 /* 772 * get_user_pages_unlocked() is suitable to replace the form: 773 * 774 * down_read(&mm->mmap_sem); 775 * get_user_pages(tsk, mm, ..., pages, NULL); 776 * up_read(&mm->mmap_sem); 777 * 778 * with: 779 * 780 * get_user_pages_unlocked(tsk, mm, ..., pages); 781 * 782 * It is functionally equivalent to get_user_pages_fast so 783 * get_user_pages_fast should be used instead, if the two parameters 784 * "tsk" and "mm" are respectively equal to current and current->mm, 785 * or if "force" shall be set to 1 (get_user_pages_fast misses the 786 * "force" parameter). 787 */ 788 long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, 789 unsigned long start, unsigned long nr_pages, 790 int write, int force, struct page **pages) 791 { 792 return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write, 793 force, pages, FOLL_TOUCH); 794 } 795 EXPORT_SYMBOL(get_user_pages_unlocked); 796 797 /* 798 * get_user_pages() - pin user pages in memory 799 * @tsk: the task_struct to use for page fault accounting, or 800 * NULL if faults are not to be recorded. 801 * @mm: mm_struct of target mm 802 * @start: starting user address 803 * @nr_pages: number of pages from start to pin 804 * @write: whether pages will be written to by the caller 805 * @force: whether to force access even when user mapping is currently 806 * protected (but never forces write access to shared mapping). 807 * @pages: array that receives pointers to the pages pinned. 808 * Should be at least nr_pages long. Or NULL, if caller 809 * only intends to ensure the pages are faulted in. 810 * @vmas: array of pointers to vmas corresponding to each page. 811 * Or NULL if the caller does not require them. 812 * 813 * Returns number of pages pinned. This may be fewer than the number 814 * requested. If nr_pages is 0 or negative, returns 0. If no pages 815 * were pinned, returns -errno. Each page returned must be released 816 * with a put_page() call when it is finished with. vmas will only 817 * remain valid while mmap_sem is held. 818 * 819 * Must be called with mmap_sem held for read or write. 820 * 821 * get_user_pages walks a process's page tables and takes a reference to 822 * each struct page that each user address corresponds to at a given 823 * instant. That is, it takes the page that would be accessed if a user 824 * thread accesses the given user virtual address at that instant. 825 * 826 * This does not guarantee that the page exists in the user mappings when 827 * get_user_pages returns, and there may even be a completely different 828 * page there in some cases (eg. if mmapped pagecache has been invalidated 829 * and subsequently re faulted). However it does guarantee that the page 830 * won't be freed completely. And mostly callers simply care that the page 831 * contains data that was valid *at some point in time*. Typically, an IO 832 * or similar operation cannot guarantee anything stronger anyway because 833 * locks can't be held over the syscall boundary. 834 * 835 * If write=0, the page must not be written to. If the page is written to, 836 * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called 837 * after the page is finished with, and before put_page is called. 838 * 839 * get_user_pages is typically used for fewer-copy IO operations, to get a 840 * handle on the memory by some means other than accesses via the user virtual 841 * addresses. The pages may be submitted for DMA to devices or accessed via 842 * their kernel linear mapping (via the kmap APIs). Care should be taken to 843 * use the correct cache flushing APIs. 844 * 845 * See also get_user_pages_fast, for performance critical applications. 846 * 847 * get_user_pages should be phased out in favor of 848 * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing 849 * should use get_user_pages because it cannot pass 850 * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault. 851 */ 852 long get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 853 unsigned long start, unsigned long nr_pages, int write, 854 int force, struct page **pages, struct vm_area_struct **vmas) 855 { 856 return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force, 857 pages, vmas, NULL, false, FOLL_TOUCH); 858 } 859 EXPORT_SYMBOL(get_user_pages); 860 861 /** 862 * populate_vma_page_range() - populate a range of pages in the vma. 863 * @vma: target vma 864 * @start: start address 865 * @end: end address 866 * @nonblocking: 867 * 868 * This takes care of mlocking the pages too if VM_LOCKED is set. 869 * 870 * return 0 on success, negative error code on error. 871 * 872 * vma->vm_mm->mmap_sem must be held. 873 * 874 * If @nonblocking is NULL, it may be held for read or write and will 875 * be unperturbed. 876 * 877 * If @nonblocking is non-NULL, it must held for read only and may be 878 * released. If it's released, *@nonblocking will be set to 0. 879 */ 880 long populate_vma_page_range(struct vm_area_struct *vma, 881 unsigned long start, unsigned long end, int *nonblocking) 882 { 883 struct mm_struct *mm = vma->vm_mm; 884 unsigned long nr_pages = (end - start) / PAGE_SIZE; 885 int gup_flags; 886 887 VM_BUG_ON(start & ~PAGE_MASK); 888 VM_BUG_ON(end & ~PAGE_MASK); 889 VM_BUG_ON_VMA(start < vma->vm_start, vma); 890 VM_BUG_ON_VMA(end > vma->vm_end, vma); 891 VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); 892 893 gup_flags = FOLL_TOUCH | FOLL_POPULATE; 894 /* 895 * We want to touch writable mappings with a write fault in order 896 * to break COW, except for shared mappings because these don't COW 897 * and we would not want to dirty them for nothing. 898 */ 899 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) 900 gup_flags |= FOLL_WRITE; 901 902 /* 903 * We want mlock to succeed for regions that have any permissions 904 * other than PROT_NONE. 905 */ 906 if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) 907 gup_flags |= FOLL_FORCE; 908 909 /* 910 * We made sure addr is within a VMA, so the following will 911 * not result in a stack expansion that recurses back here. 912 */ 913 return __get_user_pages(current, mm, start, nr_pages, gup_flags, 914 NULL, NULL, nonblocking); 915 } 916 917 /* 918 * __mm_populate - populate and/or mlock pages within a range of address space. 919 * 920 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap 921 * flags. VMAs must be already marked with the desired vm_flags, and 922 * mmap_sem must not be held. 923 */ 924 int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) 925 { 926 struct mm_struct *mm = current->mm; 927 unsigned long end, nstart, nend; 928 struct vm_area_struct *vma = NULL; 929 int locked = 0; 930 long ret = 0; 931 932 VM_BUG_ON(start & ~PAGE_MASK); 933 VM_BUG_ON(len != PAGE_ALIGN(len)); 934 end = start + len; 935 936 for (nstart = start; nstart < end; nstart = nend) { 937 /* 938 * We want to fault in pages for [nstart; end) address range. 939 * Find first corresponding VMA. 940 */ 941 if (!locked) { 942 locked = 1; 943 down_read(&mm->mmap_sem); 944 vma = find_vma(mm, nstart); 945 } else if (nstart >= vma->vm_end) 946 vma = vma->vm_next; 947 if (!vma || vma->vm_start >= end) 948 break; 949 /* 950 * Set [nstart; nend) to intersection of desired address 951 * range with the first VMA. Also, skip undesirable VMA types. 952 */ 953 nend = min(end, vma->vm_end); 954 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) 955 continue; 956 if (nstart < vma->vm_start) 957 nstart = vma->vm_start; 958 /* 959 * Now fault in a range of pages. populate_vma_page_range() 960 * double checks the vma flags, so that it won't mlock pages 961 * if the vma was already munlocked. 962 */ 963 ret = populate_vma_page_range(vma, nstart, nend, &locked); 964 if (ret < 0) { 965 if (ignore_errors) { 966 ret = 0; 967 continue; /* continue at next VMA */ 968 } 969 break; 970 } 971 nend = nstart + ret * PAGE_SIZE; 972 ret = 0; 973 } 974 if (locked) 975 up_read(&mm->mmap_sem); 976 return ret; /* 0 or negative error code */ 977 } 978 979 /** 980 * get_dump_page() - pin user page in memory while writing it to core dump 981 * @addr: user address 982 * 983 * Returns struct page pointer of user page pinned for dump, 984 * to be freed afterwards by page_cache_release() or put_page(). 985 * 986 * Returns NULL on any kind of failure - a hole must then be inserted into 987 * the corefile, to preserve alignment with its headers; and also returns 988 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - 989 * allowing a hole to be left in the corefile to save diskspace. 990 * 991 * Called without mmap_sem, but after all other threads have been killed. 992 */ 993 #ifdef CONFIG_ELF_CORE 994 struct page *get_dump_page(unsigned long addr) 995 { 996 struct vm_area_struct *vma; 997 struct page *page; 998 999 if (__get_user_pages(current, current->mm, addr, 1, 1000 FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma, 1001 NULL) < 1) 1002 return NULL; 1003 flush_cache_page(vma, addr, page_to_pfn(page)); 1004 return page; 1005 } 1006 #endif /* CONFIG_ELF_CORE */ 1007 1008 /* 1009 * Generic RCU Fast GUP 1010 * 1011 * get_user_pages_fast attempts to pin user pages by walking the page 1012 * tables directly and avoids taking locks. Thus the walker needs to be 1013 * protected from page table pages being freed from under it, and should 1014 * block any THP splits. 1015 * 1016 * One way to achieve this is to have the walker disable interrupts, and 1017 * rely on IPIs from the TLB flushing code blocking before the page table 1018 * pages are freed. This is unsuitable for architectures that do not need 1019 * to broadcast an IPI when invalidating TLBs. 1020 * 1021 * Another way to achieve this is to batch up page table containing pages 1022 * belonging to more than one mm_user, then rcu_sched a callback to free those 1023 * pages. Disabling interrupts will allow the fast_gup walker to both block 1024 * the rcu_sched callback, and an IPI that we broadcast for splitting THPs 1025 * (which is a relatively rare event). The code below adopts this strategy. 1026 * 1027 * Before activating this code, please be aware that the following assumptions 1028 * are currently made: 1029 * 1030 * *) HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table is used to free 1031 * pages containing page tables. 1032 * 1033 * *) THP splits will broadcast an IPI, this can be achieved by overriding 1034 * pmdp_splitting_flush. 1035 * 1036 * *) ptes can be read atomically by the architecture. 1037 * 1038 * *) access_ok is sufficient to validate userspace address ranges. 1039 * 1040 * The last two assumptions can be relaxed by the addition of helper functions. 1041 * 1042 * This code is based heavily on the PowerPC implementation by Nick Piggin. 1043 */ 1044 #ifdef CONFIG_HAVE_GENERIC_RCU_GUP 1045 1046 #ifdef __HAVE_ARCH_PTE_SPECIAL 1047 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, 1048 int write, struct page **pages, int *nr) 1049 { 1050 pte_t *ptep, *ptem; 1051 int ret = 0; 1052 1053 ptem = ptep = pte_offset_map(&pmd, addr); 1054 do { 1055 /* 1056 * In the line below we are assuming that the pte can be read 1057 * atomically. If this is not the case for your architecture, 1058 * please wrap this in a helper function! 1059 * 1060 * for an example see gup_get_pte in arch/x86/mm/gup.c 1061 */ 1062 pte_t pte = READ_ONCE(*ptep); 1063 struct page *page; 1064 1065 /* 1066 * Similar to the PMD case below, NUMA hinting must take slow 1067 * path using the pte_protnone check. 1068 */ 1069 if (!pte_present(pte) || pte_special(pte) || 1070 pte_protnone(pte) || (write && !pte_write(pte))) 1071 goto pte_unmap; 1072 1073 VM_BUG_ON(!pfn_valid(pte_pfn(pte))); 1074 page = pte_page(pte); 1075 1076 if (!page_cache_get_speculative(page)) 1077 goto pte_unmap; 1078 1079 if (unlikely(pte_val(pte) != pte_val(*ptep))) { 1080 put_page(page); 1081 goto pte_unmap; 1082 } 1083 1084 pages[*nr] = page; 1085 (*nr)++; 1086 1087 } while (ptep++, addr += PAGE_SIZE, addr != end); 1088 1089 ret = 1; 1090 1091 pte_unmap: 1092 pte_unmap(ptem); 1093 return ret; 1094 } 1095 #else 1096 1097 /* 1098 * If we can't determine whether or not a pte is special, then fail immediately 1099 * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not 1100 * to be special. 1101 * 1102 * For a futex to be placed on a THP tail page, get_futex_key requires a 1103 * __get_user_pages_fast implementation that can pin pages. Thus it's still 1104 * useful to have gup_huge_pmd even if we can't operate on ptes. 1105 */ 1106 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, 1107 int write, struct page **pages, int *nr) 1108 { 1109 return 0; 1110 } 1111 #endif /* __HAVE_ARCH_PTE_SPECIAL */ 1112 1113 static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, 1114 unsigned long end, int write, struct page **pages, int *nr) 1115 { 1116 struct page *head, *page, *tail; 1117 int refs; 1118 1119 if (write && !pmd_write(orig)) 1120 return 0; 1121 1122 refs = 0; 1123 head = pmd_page(orig); 1124 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); 1125 tail = page; 1126 do { 1127 VM_BUG_ON_PAGE(compound_head(page) != head, page); 1128 pages[*nr] = page; 1129 (*nr)++; 1130 page++; 1131 refs++; 1132 } while (addr += PAGE_SIZE, addr != end); 1133 1134 if (!page_cache_add_speculative(head, refs)) { 1135 *nr -= refs; 1136 return 0; 1137 } 1138 1139 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { 1140 *nr -= refs; 1141 while (refs--) 1142 put_page(head); 1143 return 0; 1144 } 1145 1146 /* 1147 * Any tail pages need their mapcount reference taken before we 1148 * return. (This allows the THP code to bump their ref count when 1149 * they are split into base pages). 1150 */ 1151 while (refs--) { 1152 if (PageTail(tail)) 1153 get_huge_page_tail(tail); 1154 tail++; 1155 } 1156 1157 return 1; 1158 } 1159 1160 static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, 1161 unsigned long end, int write, struct page **pages, int *nr) 1162 { 1163 struct page *head, *page, *tail; 1164 int refs; 1165 1166 if (write && !pud_write(orig)) 1167 return 0; 1168 1169 refs = 0; 1170 head = pud_page(orig); 1171 page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT); 1172 tail = page; 1173 do { 1174 VM_BUG_ON_PAGE(compound_head(page) != head, page); 1175 pages[*nr] = page; 1176 (*nr)++; 1177 page++; 1178 refs++; 1179 } while (addr += PAGE_SIZE, addr != end); 1180 1181 if (!page_cache_add_speculative(head, refs)) { 1182 *nr -= refs; 1183 return 0; 1184 } 1185 1186 if (unlikely(pud_val(orig) != pud_val(*pudp))) { 1187 *nr -= refs; 1188 while (refs--) 1189 put_page(head); 1190 return 0; 1191 } 1192 1193 while (refs--) { 1194 if (PageTail(tail)) 1195 get_huge_page_tail(tail); 1196 tail++; 1197 } 1198 1199 return 1; 1200 } 1201 1202 static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, 1203 unsigned long end, int write, 1204 struct page **pages, int *nr) 1205 { 1206 int refs; 1207 struct page *head, *page, *tail; 1208 1209 if (write && !pgd_write(orig)) 1210 return 0; 1211 1212 refs = 0; 1213 head = pgd_page(orig); 1214 page = head + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT); 1215 tail = page; 1216 do { 1217 VM_BUG_ON_PAGE(compound_head(page) != head, page); 1218 pages[*nr] = page; 1219 (*nr)++; 1220 page++; 1221 refs++; 1222 } while (addr += PAGE_SIZE, addr != end); 1223 1224 if (!page_cache_add_speculative(head, refs)) { 1225 *nr -= refs; 1226 return 0; 1227 } 1228 1229 if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) { 1230 *nr -= refs; 1231 while (refs--) 1232 put_page(head); 1233 return 0; 1234 } 1235 1236 while (refs--) { 1237 if (PageTail(tail)) 1238 get_huge_page_tail(tail); 1239 tail++; 1240 } 1241 1242 return 1; 1243 } 1244 1245 static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, 1246 int write, struct page **pages, int *nr) 1247 { 1248 unsigned long next; 1249 pmd_t *pmdp; 1250 1251 pmdp = pmd_offset(&pud, addr); 1252 do { 1253 pmd_t pmd = READ_ONCE(*pmdp); 1254 1255 next = pmd_addr_end(addr, end); 1256 if (pmd_none(pmd) || pmd_trans_splitting(pmd)) 1257 return 0; 1258 1259 if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) { 1260 /* 1261 * NUMA hinting faults need to be handled in the GUP 1262 * slowpath for accounting purposes and so that they 1263 * can be serialised against THP migration. 1264 */ 1265 if (pmd_protnone(pmd)) 1266 return 0; 1267 1268 if (!gup_huge_pmd(pmd, pmdp, addr, next, write, 1269 pages, nr)) 1270 return 0; 1271 1272 } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) { 1273 /* 1274 * architecture have different format for hugetlbfs 1275 * pmd format and THP pmd format 1276 */ 1277 if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr, 1278 PMD_SHIFT, next, write, pages, nr)) 1279 return 0; 1280 } else if (!gup_pte_range(pmd, addr, next, write, pages, nr)) 1281 return 0; 1282 } while (pmdp++, addr = next, addr != end); 1283 1284 return 1; 1285 } 1286 1287 static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, 1288 int write, struct page **pages, int *nr) 1289 { 1290 unsigned long next; 1291 pud_t *pudp; 1292 1293 pudp = pud_offset(&pgd, addr); 1294 do { 1295 pud_t pud = READ_ONCE(*pudp); 1296 1297 next = pud_addr_end(addr, end); 1298 if (pud_none(pud)) 1299 return 0; 1300 if (unlikely(pud_huge(pud))) { 1301 if (!gup_huge_pud(pud, pudp, addr, next, write, 1302 pages, nr)) 1303 return 0; 1304 } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) { 1305 if (!gup_huge_pd(__hugepd(pud_val(pud)), addr, 1306 PUD_SHIFT, next, write, pages, nr)) 1307 return 0; 1308 } else if (!gup_pmd_range(pud, addr, next, write, pages, nr)) 1309 return 0; 1310 } while (pudp++, addr = next, addr != end); 1311 1312 return 1; 1313 } 1314 1315 /* 1316 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to 1317 * the regular GUP. It will only return non-negative values. 1318 */ 1319 int __get_user_pages_fast(unsigned long start, int nr_pages, int write, 1320 struct page **pages) 1321 { 1322 struct mm_struct *mm = current->mm; 1323 unsigned long addr, len, end; 1324 unsigned long next, flags; 1325 pgd_t *pgdp; 1326 int nr = 0; 1327 1328 start &= PAGE_MASK; 1329 addr = start; 1330 len = (unsigned long) nr_pages << PAGE_SHIFT; 1331 end = start + len; 1332 1333 if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, 1334 start, len))) 1335 return 0; 1336 1337 /* 1338 * Disable interrupts. We use the nested form as we can already have 1339 * interrupts disabled by get_futex_key. 1340 * 1341 * With interrupts disabled, we block page table pages from being 1342 * freed from under us. See mmu_gather_tlb in asm-generic/tlb.h 1343 * for more details. 1344 * 1345 * We do not adopt an rcu_read_lock(.) here as we also want to 1346 * block IPIs that come from THPs splitting. 1347 */ 1348 1349 local_irq_save(flags); 1350 pgdp = pgd_offset(mm, addr); 1351 do { 1352 pgd_t pgd = READ_ONCE(*pgdp); 1353 1354 next = pgd_addr_end(addr, end); 1355 if (pgd_none(pgd)) 1356 break; 1357 if (unlikely(pgd_huge(pgd))) { 1358 if (!gup_huge_pgd(pgd, pgdp, addr, next, write, 1359 pages, &nr)) 1360 break; 1361 } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) { 1362 if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, 1363 PGDIR_SHIFT, next, write, pages, &nr)) 1364 break; 1365 } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) 1366 break; 1367 } while (pgdp++, addr = next, addr != end); 1368 local_irq_restore(flags); 1369 1370 return nr; 1371 } 1372 1373 /** 1374 * get_user_pages_fast() - pin user pages in memory 1375 * @start: starting user address 1376 * @nr_pages: number of pages from start to pin 1377 * @write: whether pages will be written to 1378 * @pages: array that receives pointers to the pages pinned. 1379 * Should be at least nr_pages long. 1380 * 1381 * Attempt to pin user pages in memory without taking mm->mmap_sem. 1382 * If not successful, it will fall back to taking the lock and 1383 * calling get_user_pages(). 1384 * 1385 * Returns number of pages pinned. This may be fewer than the number 1386 * requested. If nr_pages is 0 or negative, returns 0. If no pages 1387 * were pinned, returns -errno. 1388 */ 1389 int get_user_pages_fast(unsigned long start, int nr_pages, int write, 1390 struct page **pages) 1391 { 1392 struct mm_struct *mm = current->mm; 1393 int nr, ret; 1394 1395 start &= PAGE_MASK; 1396 nr = __get_user_pages_fast(start, nr_pages, write, pages); 1397 ret = nr; 1398 1399 if (nr < nr_pages) { 1400 /* Try to get the remaining pages with get_user_pages */ 1401 start += nr << PAGE_SHIFT; 1402 pages += nr; 1403 1404 ret = get_user_pages_unlocked(current, mm, start, 1405 nr_pages - nr, write, 0, pages); 1406 1407 /* Have to be a bit careful with return values */ 1408 if (nr > 0) { 1409 if (ret < 0) 1410 ret = nr; 1411 else 1412 ret += nr; 1413 } 1414 } 1415 1416 return ret; 1417 } 1418 1419 #endif /* CONFIG_HAVE_GENERIC_RCU_GUP */ 1420