1 #include <linux/kernel.h> 2 #include <linux/errno.h> 3 #include <linux/err.h> 4 #include <linux/spinlock.h> 5 6 #include <linux/mm.h> 7 #include <linux/memremap.h> 8 #include <linux/pagemap.h> 9 #include <linux/rmap.h> 10 #include <linux/swap.h> 11 #include <linux/swapops.h> 12 13 #include <linux/sched/signal.h> 14 #include <linux/rwsem.h> 15 #include <linux/hugetlb.h> 16 17 #include <asm/mmu_context.h> 18 #include <asm/pgtable.h> 19 #include <asm/tlbflush.h> 20 21 #include "internal.h" 22 23 static struct page *no_page_table(struct vm_area_struct *vma, 24 unsigned int flags) 25 { 26 /* 27 * When core dumping an enormous anonymous area that nobody 28 * has touched so far, we don't want to allocate unnecessary pages or 29 * page tables. Return error instead of NULL to skip handle_mm_fault, 30 * then get_dump_page() will return NULL to leave a hole in the dump. 31 * But we can only make this optimization where a hole would surely 32 * be zero-filled if handle_mm_fault() actually did handle it. 33 */ 34 if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) 35 return ERR_PTR(-EFAULT); 36 return NULL; 37 } 38 39 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, 40 pte_t *pte, unsigned int flags) 41 { 42 /* No page to get reference */ 43 if (flags & FOLL_GET) 44 return -EFAULT; 45 46 if (flags & FOLL_TOUCH) { 47 pte_t entry = *pte; 48 49 if (flags & FOLL_WRITE) 50 entry = pte_mkdirty(entry); 51 entry = pte_mkyoung(entry); 52 53 if (!pte_same(*pte, entry)) { 54 set_pte_at(vma->vm_mm, address, pte, entry); 55 update_mmu_cache(vma, address, pte); 56 } 57 } 58 59 /* Proper page table entry exists, but no corresponding struct page */ 60 return -EEXIST; 61 } 62 63 /* 64 * FOLL_FORCE can write to even unwritable pte's, but only 65 * after we've gone through a COW cycle and they are dirty. 66 */ 67 static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) 68 { 69 return pte_write(pte) || 70 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); 71 } 72 73 static struct page *follow_page_pte(struct vm_area_struct *vma, 74 unsigned long address, pmd_t *pmd, unsigned int flags) 75 { 76 struct mm_struct *mm = vma->vm_mm; 77 struct dev_pagemap *pgmap = NULL; 78 struct page *page; 79 spinlock_t *ptl; 80 pte_t *ptep, pte; 81 82 retry: 83 if (unlikely(pmd_bad(*pmd))) 84 return no_page_table(vma, flags); 85 86 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 87 pte = *ptep; 88 if (!pte_present(pte)) { 89 swp_entry_t entry; 90 /* 91 * KSM's break_ksm() relies upon recognizing a ksm page 92 * even while it is being migrated, so for that case we 93 * need migration_entry_wait(). 94 */ 95 if (likely(!(flags & FOLL_MIGRATION))) 96 goto no_page; 97 if (pte_none(pte)) 98 goto no_page; 99 entry = pte_to_swp_entry(pte); 100 if (!is_migration_entry(entry)) 101 goto no_page; 102 pte_unmap_unlock(ptep, ptl); 103 migration_entry_wait(mm, pmd, address); 104 goto retry; 105 } 106 if ((flags & FOLL_NUMA) && pte_protnone(pte)) 107 goto no_page; 108 if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) { 109 pte_unmap_unlock(ptep, ptl); 110 return NULL; 111 } 112 113 page = vm_normal_page(vma, address, pte); 114 if (!page && pte_devmap(pte) && (flags & FOLL_GET)) { 115 /* 116 * Only return device mapping pages in the FOLL_GET case since 117 * they are only valid while holding the pgmap reference. 118 */ 119 pgmap = get_dev_pagemap(pte_pfn(pte), NULL); 120 if (pgmap) 121 page = pte_page(pte); 122 else 123 goto no_page; 124 } else if (unlikely(!page)) { 125 if (flags & FOLL_DUMP) { 126 /* Avoid special (like zero) pages in core dumps */ 127 page = ERR_PTR(-EFAULT); 128 goto out; 129 } 130 131 if (is_zero_pfn(pte_pfn(pte))) { 132 page = pte_page(pte); 133 } else { 134 int ret; 135 136 ret = follow_pfn_pte(vma, address, ptep, flags); 137 page = ERR_PTR(ret); 138 goto out; 139 } 140 } 141 142 if (flags & FOLL_SPLIT && PageTransCompound(page)) { 143 int ret; 144 get_page(page); 145 pte_unmap_unlock(ptep, ptl); 146 lock_page(page); 147 ret = split_huge_page(page); 148 unlock_page(page); 149 put_page(page); 150 if (ret) 151 return ERR_PTR(ret); 152 goto retry; 153 } 154 155 if (flags & FOLL_GET) { 156 get_page(page); 157 158 /* drop the pgmap reference now that we hold the page */ 159 if (pgmap) { 160 put_dev_pagemap(pgmap); 161 pgmap = NULL; 162 } 163 } 164 if (flags & FOLL_TOUCH) { 165 if ((flags & FOLL_WRITE) && 166 !pte_dirty(pte) && !PageDirty(page)) 167 set_page_dirty(page); 168 /* 169 * pte_mkyoung() would be more correct here, but atomic care 170 * is needed to avoid losing the dirty bit: it is easier to use 171 * mark_page_accessed(). 172 */ 173 mark_page_accessed(page); 174 } 175 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { 176 /* Do not mlock pte-mapped THP */ 177 if (PageTransCompound(page)) 178 goto out; 179 180 /* 181 * The preliminary mapping check is mainly to avoid the 182 * pointless overhead of lock_page on the ZERO_PAGE 183 * which might bounce very badly if there is contention. 184 * 185 * If the page is already locked, we don't need to 186 * handle it now - vmscan will handle it later if and 187 * when it attempts to reclaim the page. 188 */ 189 if (page->mapping && trylock_page(page)) { 190 lru_add_drain(); /* push cached pages to LRU */ 191 /* 192 * Because we lock page here, and migration is 193 * blocked by the pte's page reference, and we 194 * know the page is still mapped, we don't even 195 * need to check for file-cache page truncation. 196 */ 197 mlock_vma_page(page); 198 unlock_page(page); 199 } 200 } 201 out: 202 pte_unmap_unlock(ptep, ptl); 203 return page; 204 no_page: 205 pte_unmap_unlock(ptep, ptl); 206 if (!pte_none(pte)) 207 return NULL; 208 return no_page_table(vma, flags); 209 } 210 211 static struct page *follow_pmd_mask(struct vm_area_struct *vma, 212 unsigned long address, pud_t *pudp, 213 unsigned int flags, unsigned int *page_mask) 214 { 215 pmd_t *pmd; 216 spinlock_t *ptl; 217 struct page *page; 218 struct mm_struct *mm = vma->vm_mm; 219 220 pmd = pmd_offset(pudp, address); 221 if (pmd_none(*pmd)) 222 return no_page_table(vma, flags); 223 if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) { 224 page = follow_huge_pmd(mm, address, pmd, flags); 225 if (page) 226 return page; 227 return no_page_table(vma, flags); 228 } 229 if (is_hugepd(__hugepd(pmd_val(*pmd)))) { 230 page = follow_huge_pd(vma, address, 231 __hugepd(pmd_val(*pmd)), flags, 232 PMD_SHIFT); 233 if (page) 234 return page; 235 return no_page_table(vma, flags); 236 } 237 retry: 238 if (!pmd_present(*pmd)) { 239 if (likely(!(flags & FOLL_MIGRATION))) 240 return no_page_table(vma, flags); 241 VM_BUG_ON(thp_migration_supported() && 242 !is_pmd_migration_entry(*pmd)); 243 if (is_pmd_migration_entry(*pmd)) 244 pmd_migration_entry_wait(mm, pmd); 245 goto retry; 246 } 247 if (pmd_devmap(*pmd)) { 248 ptl = pmd_lock(mm, pmd); 249 page = follow_devmap_pmd(vma, address, pmd, flags); 250 spin_unlock(ptl); 251 if (page) 252 return page; 253 } 254 if (likely(!pmd_trans_huge(*pmd))) 255 return follow_page_pte(vma, address, pmd, flags); 256 257 if ((flags & FOLL_NUMA) && pmd_protnone(*pmd)) 258 return no_page_table(vma, flags); 259 260 retry_locked: 261 ptl = pmd_lock(mm, pmd); 262 if (unlikely(!pmd_present(*pmd))) { 263 spin_unlock(ptl); 264 if (likely(!(flags & FOLL_MIGRATION))) 265 return no_page_table(vma, flags); 266 pmd_migration_entry_wait(mm, pmd); 267 goto retry_locked; 268 } 269 if (unlikely(!pmd_trans_huge(*pmd))) { 270 spin_unlock(ptl); 271 return follow_page_pte(vma, address, pmd, flags); 272 } 273 if (flags & FOLL_SPLIT) { 274 int ret; 275 page = pmd_page(*pmd); 276 if (is_huge_zero_page(page)) { 277 spin_unlock(ptl); 278 ret = 0; 279 split_huge_pmd(vma, pmd, address); 280 if (pmd_trans_unstable(pmd)) 281 ret = -EBUSY; 282 } else { 283 get_page(page); 284 spin_unlock(ptl); 285 lock_page(page); 286 ret = split_huge_page(page); 287 unlock_page(page); 288 put_page(page); 289 if (pmd_none(*pmd)) 290 return no_page_table(vma, flags); 291 } 292 293 return ret ? ERR_PTR(ret) : 294 follow_page_pte(vma, address, pmd, flags); 295 } 296 page = follow_trans_huge_pmd(vma, address, pmd, flags); 297 spin_unlock(ptl); 298 *page_mask = HPAGE_PMD_NR - 1; 299 return page; 300 } 301 302 303 static struct page *follow_pud_mask(struct vm_area_struct *vma, 304 unsigned long address, p4d_t *p4dp, 305 unsigned int flags, unsigned int *page_mask) 306 { 307 pud_t *pud; 308 spinlock_t *ptl; 309 struct page *page; 310 struct mm_struct *mm = vma->vm_mm; 311 312 pud = pud_offset(p4dp, address); 313 if (pud_none(*pud)) 314 return no_page_table(vma, flags); 315 if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { 316 page = follow_huge_pud(mm, address, pud, flags); 317 if (page) 318 return page; 319 return no_page_table(vma, flags); 320 } 321 if (is_hugepd(__hugepd(pud_val(*pud)))) { 322 page = follow_huge_pd(vma, address, 323 __hugepd(pud_val(*pud)), flags, 324 PUD_SHIFT); 325 if (page) 326 return page; 327 return no_page_table(vma, flags); 328 } 329 if (pud_devmap(*pud)) { 330 ptl = pud_lock(mm, pud); 331 page = follow_devmap_pud(vma, address, pud, flags); 332 spin_unlock(ptl); 333 if (page) 334 return page; 335 } 336 if (unlikely(pud_bad(*pud))) 337 return no_page_table(vma, flags); 338 339 return follow_pmd_mask(vma, address, pud, flags, page_mask); 340 } 341 342 343 static struct page *follow_p4d_mask(struct vm_area_struct *vma, 344 unsigned long address, pgd_t *pgdp, 345 unsigned int flags, unsigned int *page_mask) 346 { 347 p4d_t *p4d; 348 struct page *page; 349 350 p4d = p4d_offset(pgdp, address); 351 if (p4d_none(*p4d)) 352 return no_page_table(vma, flags); 353 BUILD_BUG_ON(p4d_huge(*p4d)); 354 if (unlikely(p4d_bad(*p4d))) 355 return no_page_table(vma, flags); 356 357 if (is_hugepd(__hugepd(p4d_val(*p4d)))) { 358 page = follow_huge_pd(vma, address, 359 __hugepd(p4d_val(*p4d)), flags, 360 P4D_SHIFT); 361 if (page) 362 return page; 363 return no_page_table(vma, flags); 364 } 365 return follow_pud_mask(vma, address, p4d, flags, page_mask); 366 } 367 368 /** 369 * follow_page_mask - look up a page descriptor from a user-virtual address 370 * @vma: vm_area_struct mapping @address 371 * @address: virtual address to look up 372 * @flags: flags modifying lookup behaviour 373 * @page_mask: on output, *page_mask is set according to the size of the page 374 * 375 * @flags can have FOLL_ flags set, defined in <linux/mm.h> 376 * 377 * Returns the mapped (struct page *), %NULL if no mapping exists, or 378 * an error pointer if there is a mapping to something not represented 379 * by a page descriptor (see also vm_normal_page()). 380 */ 381 struct page *follow_page_mask(struct vm_area_struct *vma, 382 unsigned long address, unsigned int flags, 383 unsigned int *page_mask) 384 { 385 pgd_t *pgd; 386 struct page *page; 387 struct mm_struct *mm = vma->vm_mm; 388 389 *page_mask = 0; 390 391 /* make this handle hugepd */ 392 page = follow_huge_addr(mm, address, flags & FOLL_WRITE); 393 if (!IS_ERR(page)) { 394 BUG_ON(flags & FOLL_GET); 395 return page; 396 } 397 398 pgd = pgd_offset(mm, address); 399 400 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 401 return no_page_table(vma, flags); 402 403 if (pgd_huge(*pgd)) { 404 page = follow_huge_pgd(mm, address, pgd, flags); 405 if (page) 406 return page; 407 return no_page_table(vma, flags); 408 } 409 if (is_hugepd(__hugepd(pgd_val(*pgd)))) { 410 page = follow_huge_pd(vma, address, 411 __hugepd(pgd_val(*pgd)), flags, 412 PGDIR_SHIFT); 413 if (page) 414 return page; 415 return no_page_table(vma, flags); 416 } 417 418 return follow_p4d_mask(vma, address, pgd, flags, page_mask); 419 } 420 421 static int get_gate_page(struct mm_struct *mm, unsigned long address, 422 unsigned int gup_flags, struct vm_area_struct **vma, 423 struct page **page) 424 { 425 pgd_t *pgd; 426 p4d_t *p4d; 427 pud_t *pud; 428 pmd_t *pmd; 429 pte_t *pte; 430 int ret = -EFAULT; 431 432 /* user gate pages are read-only */ 433 if (gup_flags & FOLL_WRITE) 434 return -EFAULT; 435 if (address > TASK_SIZE) 436 pgd = pgd_offset_k(address); 437 else 438 pgd = pgd_offset_gate(mm, address); 439 BUG_ON(pgd_none(*pgd)); 440 p4d = p4d_offset(pgd, address); 441 BUG_ON(p4d_none(*p4d)); 442 pud = pud_offset(p4d, address); 443 BUG_ON(pud_none(*pud)); 444 pmd = pmd_offset(pud, address); 445 if (!pmd_present(*pmd)) 446 return -EFAULT; 447 VM_BUG_ON(pmd_trans_huge(*pmd)); 448 pte = pte_offset_map(pmd, address); 449 if (pte_none(*pte)) 450 goto unmap; 451 *vma = get_gate_vma(mm); 452 if (!page) 453 goto out; 454 *page = vm_normal_page(*vma, address, *pte); 455 if (!*page) { 456 if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte))) 457 goto unmap; 458 *page = pte_page(*pte); 459 460 /* 461 * This should never happen (a device public page in the gate 462 * area). 463 */ 464 if (is_device_public_page(*page)) 465 goto unmap; 466 } 467 get_page(*page); 468 out: 469 ret = 0; 470 unmap: 471 pte_unmap(pte); 472 return ret; 473 } 474 475 /* 476 * mmap_sem must be held on entry. If @nonblocking != NULL and 477 * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released. 478 * If it is, *@nonblocking will be set to 0 and -EBUSY returned. 479 */ 480 static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, 481 unsigned long address, unsigned int *flags, int *nonblocking) 482 { 483 unsigned int fault_flags = 0; 484 int ret; 485 486 /* mlock all present pages, but do not fault in new pages */ 487 if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK) 488 return -ENOENT; 489 if (*flags & FOLL_WRITE) 490 fault_flags |= FAULT_FLAG_WRITE; 491 if (*flags & FOLL_REMOTE) 492 fault_flags |= FAULT_FLAG_REMOTE; 493 if (nonblocking) 494 fault_flags |= FAULT_FLAG_ALLOW_RETRY; 495 if (*flags & FOLL_NOWAIT) 496 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; 497 if (*flags & FOLL_TRIED) { 498 VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY); 499 fault_flags |= FAULT_FLAG_TRIED; 500 } 501 502 ret = handle_mm_fault(vma, address, fault_flags); 503 if (ret & VM_FAULT_ERROR) { 504 int err = vm_fault_to_errno(ret, *flags); 505 506 if (err) 507 return err; 508 BUG(); 509 } 510 511 if (tsk) { 512 if (ret & VM_FAULT_MAJOR) 513 tsk->maj_flt++; 514 else 515 tsk->min_flt++; 516 } 517 518 if (ret & VM_FAULT_RETRY) { 519 if (nonblocking) 520 *nonblocking = 0; 521 return -EBUSY; 522 } 523 524 /* 525 * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when 526 * necessary, even if maybe_mkwrite decided not to set pte_write. We 527 * can thus safely do subsequent page lookups as if they were reads. 528 * But only do so when looping for pte_write is futile: in some cases 529 * userspace may also be wanting to write to the gotten user page, 530 * which a read fault here might prevent (a readonly page might get 531 * reCOWed by userspace write). 532 */ 533 if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) 534 *flags |= FOLL_COW; 535 return 0; 536 } 537 538 static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) 539 { 540 vm_flags_t vm_flags = vma->vm_flags; 541 int write = (gup_flags & FOLL_WRITE); 542 int foreign = (gup_flags & FOLL_REMOTE); 543 544 if (vm_flags & (VM_IO | VM_PFNMAP)) 545 return -EFAULT; 546 547 if (write) { 548 if (!(vm_flags & VM_WRITE)) { 549 if (!(gup_flags & FOLL_FORCE)) 550 return -EFAULT; 551 /* 552 * We used to let the write,force case do COW in a 553 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could 554 * set a breakpoint in a read-only mapping of an 555 * executable, without corrupting the file (yet only 556 * when that file had been opened for writing!). 557 * Anon pages in shared mappings are surprising: now 558 * just reject it. 559 */ 560 if (!is_cow_mapping(vm_flags)) 561 return -EFAULT; 562 } 563 } else if (!(vm_flags & VM_READ)) { 564 if (!(gup_flags & FOLL_FORCE)) 565 return -EFAULT; 566 /* 567 * Is there actually any vma we can reach here which does not 568 * have VM_MAYREAD set? 569 */ 570 if (!(vm_flags & VM_MAYREAD)) 571 return -EFAULT; 572 } 573 /* 574 * gups are always data accesses, not instruction 575 * fetches, so execute=false here 576 */ 577 if (!arch_vma_access_permitted(vma, write, false, foreign)) 578 return -EFAULT; 579 return 0; 580 } 581 582 /** 583 * __get_user_pages() - pin user pages in memory 584 * @tsk: task_struct of target task 585 * @mm: mm_struct of target mm 586 * @start: starting user address 587 * @nr_pages: number of pages from start to pin 588 * @gup_flags: flags modifying pin behaviour 589 * @pages: array that receives pointers to the pages pinned. 590 * Should be at least nr_pages long. Or NULL, if caller 591 * only intends to ensure the pages are faulted in. 592 * @vmas: array of pointers to vmas corresponding to each page. 593 * Or NULL if the caller does not require them. 594 * @nonblocking: whether waiting for disk IO or mmap_sem contention 595 * 596 * Returns number of pages pinned. This may be fewer than the number 597 * requested. If nr_pages is 0 or negative, returns 0. If no pages 598 * were pinned, returns -errno. Each page returned must be released 599 * with a put_page() call when it is finished with. vmas will only 600 * remain valid while mmap_sem is held. 601 * 602 * Must be called with mmap_sem held. It may be released. See below. 603 * 604 * __get_user_pages walks a process's page tables and takes a reference to 605 * each struct page that each user address corresponds to at a given 606 * instant. That is, it takes the page that would be accessed if a user 607 * thread accesses the given user virtual address at that instant. 608 * 609 * This does not guarantee that the page exists in the user mappings when 610 * __get_user_pages returns, and there may even be a completely different 611 * page there in some cases (eg. if mmapped pagecache has been invalidated 612 * and subsequently re faulted). However it does guarantee that the page 613 * won't be freed completely. And mostly callers simply care that the page 614 * contains data that was valid *at some point in time*. Typically, an IO 615 * or similar operation cannot guarantee anything stronger anyway because 616 * locks can't be held over the syscall boundary. 617 * 618 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If 619 * the page is written to, set_page_dirty (or set_page_dirty_lock, as 620 * appropriate) must be called after the page is finished with, and 621 * before put_page is called. 622 * 623 * If @nonblocking != NULL, __get_user_pages will not wait for disk IO 624 * or mmap_sem contention, and if waiting is needed to pin all pages, 625 * *@nonblocking will be set to 0. Further, if @gup_flags does not 626 * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in 627 * this case. 628 * 629 * A caller using such a combination of @nonblocking and @gup_flags 630 * must therefore hold the mmap_sem for reading only, and recognize 631 * when it's been released. Otherwise, it must be held for either 632 * reading or writing and will not be released. 633 * 634 * In most cases, get_user_pages or get_user_pages_fast should be used 635 * instead of __get_user_pages. __get_user_pages should be used only if 636 * you need some special @gup_flags. 637 */ 638 static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 639 unsigned long start, unsigned long nr_pages, 640 unsigned int gup_flags, struct page **pages, 641 struct vm_area_struct **vmas, int *nonblocking) 642 { 643 long i = 0; 644 unsigned int page_mask; 645 struct vm_area_struct *vma = NULL; 646 647 if (!nr_pages) 648 return 0; 649 650 VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); 651 652 /* 653 * If FOLL_FORCE is set then do not force a full fault as the hinting 654 * fault information is unrelated to the reference behaviour of a task 655 * using the address space 656 */ 657 if (!(gup_flags & FOLL_FORCE)) 658 gup_flags |= FOLL_NUMA; 659 660 do { 661 struct page *page; 662 unsigned int foll_flags = gup_flags; 663 unsigned int page_increm; 664 665 /* first iteration or cross vma bound */ 666 if (!vma || start >= vma->vm_end) { 667 vma = find_extend_vma(mm, start); 668 if (!vma && in_gate_area(mm, start)) { 669 int ret; 670 ret = get_gate_page(mm, start & PAGE_MASK, 671 gup_flags, &vma, 672 pages ? &pages[i] : NULL); 673 if (ret) 674 return i ? : ret; 675 page_mask = 0; 676 goto next_page; 677 } 678 679 if (!vma || check_vma_flags(vma, gup_flags)) 680 return i ? : -EFAULT; 681 if (is_vm_hugetlb_page(vma)) { 682 i = follow_hugetlb_page(mm, vma, pages, vmas, 683 &start, &nr_pages, i, 684 gup_flags, nonblocking); 685 continue; 686 } 687 } 688 retry: 689 /* 690 * If we have a pending SIGKILL, don't keep faulting pages and 691 * potentially allocating memory. 692 */ 693 if (unlikely(fatal_signal_pending(current))) 694 return i ? i : -ERESTARTSYS; 695 cond_resched(); 696 page = follow_page_mask(vma, start, foll_flags, &page_mask); 697 if (!page) { 698 int ret; 699 ret = faultin_page(tsk, vma, start, &foll_flags, 700 nonblocking); 701 switch (ret) { 702 case 0: 703 goto retry; 704 case -EFAULT: 705 case -ENOMEM: 706 case -EHWPOISON: 707 return i ? i : ret; 708 case -EBUSY: 709 return i; 710 case -ENOENT: 711 goto next_page; 712 } 713 BUG(); 714 } else if (PTR_ERR(page) == -EEXIST) { 715 /* 716 * Proper page table entry exists, but no corresponding 717 * struct page. 718 */ 719 goto next_page; 720 } else if (IS_ERR(page)) { 721 return i ? i : PTR_ERR(page); 722 } 723 if (pages) { 724 pages[i] = page; 725 flush_anon_page(vma, page, start); 726 flush_dcache_page(page); 727 page_mask = 0; 728 } 729 next_page: 730 if (vmas) { 731 vmas[i] = vma; 732 page_mask = 0; 733 } 734 page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask); 735 if (page_increm > nr_pages) 736 page_increm = nr_pages; 737 i += page_increm; 738 start += page_increm * PAGE_SIZE; 739 nr_pages -= page_increm; 740 } while (nr_pages); 741 return i; 742 } 743 744 static bool vma_permits_fault(struct vm_area_struct *vma, 745 unsigned int fault_flags) 746 { 747 bool write = !!(fault_flags & FAULT_FLAG_WRITE); 748 bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE); 749 vm_flags_t vm_flags = write ? VM_WRITE : VM_READ; 750 751 if (!(vm_flags & vma->vm_flags)) 752 return false; 753 754 /* 755 * The architecture might have a hardware protection 756 * mechanism other than read/write that can deny access. 757 * 758 * gup always represents data access, not instruction 759 * fetches, so execute=false here: 760 */ 761 if (!arch_vma_access_permitted(vma, write, false, foreign)) 762 return false; 763 764 return true; 765 } 766 767 /* 768 * fixup_user_fault() - manually resolve a user page fault 769 * @tsk: the task_struct to use for page fault accounting, or 770 * NULL if faults are not to be recorded. 771 * @mm: mm_struct of target mm 772 * @address: user address 773 * @fault_flags:flags to pass down to handle_mm_fault() 774 * @unlocked: did we unlock the mmap_sem while retrying, maybe NULL if caller 775 * does not allow retry 776 * 777 * This is meant to be called in the specific scenario where for locking reasons 778 * we try to access user memory in atomic context (within a pagefault_disable() 779 * section), this returns -EFAULT, and we want to resolve the user fault before 780 * trying again. 781 * 782 * Typically this is meant to be used by the futex code. 783 * 784 * The main difference with get_user_pages() is that this function will 785 * unconditionally call handle_mm_fault() which will in turn perform all the 786 * necessary SW fixup of the dirty and young bits in the PTE, while 787 * get_user_pages() only guarantees to update these in the struct page. 788 * 789 * This is important for some architectures where those bits also gate the 790 * access permission to the page because they are maintained in software. On 791 * such architectures, gup() will not be enough to make a subsequent access 792 * succeed. 793 * 794 * This function will not return with an unlocked mmap_sem. So it has not the 795 * same semantics wrt the @mm->mmap_sem as does filemap_fault(). 796 */ 797 int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, 798 unsigned long address, unsigned int fault_flags, 799 bool *unlocked) 800 { 801 struct vm_area_struct *vma; 802 int ret, major = 0; 803 804 if (unlocked) 805 fault_flags |= FAULT_FLAG_ALLOW_RETRY; 806 807 retry: 808 vma = find_extend_vma(mm, address); 809 if (!vma || address < vma->vm_start) 810 return -EFAULT; 811 812 if (!vma_permits_fault(vma, fault_flags)) 813 return -EFAULT; 814 815 ret = handle_mm_fault(vma, address, fault_flags); 816 major |= ret & VM_FAULT_MAJOR; 817 if (ret & VM_FAULT_ERROR) { 818 int err = vm_fault_to_errno(ret, 0); 819 820 if (err) 821 return err; 822 BUG(); 823 } 824 825 if (ret & VM_FAULT_RETRY) { 826 down_read(&mm->mmap_sem); 827 if (!(fault_flags & FAULT_FLAG_TRIED)) { 828 *unlocked = true; 829 fault_flags &= ~FAULT_FLAG_ALLOW_RETRY; 830 fault_flags |= FAULT_FLAG_TRIED; 831 goto retry; 832 } 833 } 834 835 if (tsk) { 836 if (major) 837 tsk->maj_flt++; 838 else 839 tsk->min_flt++; 840 } 841 return 0; 842 } 843 EXPORT_SYMBOL_GPL(fixup_user_fault); 844 845 static __always_inline long __get_user_pages_locked(struct task_struct *tsk, 846 struct mm_struct *mm, 847 unsigned long start, 848 unsigned long nr_pages, 849 struct page **pages, 850 struct vm_area_struct **vmas, 851 int *locked, bool notify_drop, 852 unsigned int flags) 853 { 854 long ret, pages_done; 855 bool lock_dropped; 856 857 if (locked) { 858 /* if VM_FAULT_RETRY can be returned, vmas become invalid */ 859 BUG_ON(vmas); 860 /* check caller initialized locked */ 861 BUG_ON(*locked != 1); 862 } 863 864 if (pages) 865 flags |= FOLL_GET; 866 867 pages_done = 0; 868 lock_dropped = false; 869 for (;;) { 870 ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, 871 vmas, locked); 872 if (!locked) 873 /* VM_FAULT_RETRY couldn't trigger, bypass */ 874 return ret; 875 876 /* VM_FAULT_RETRY cannot return errors */ 877 if (!*locked) { 878 BUG_ON(ret < 0); 879 BUG_ON(ret >= nr_pages); 880 } 881 882 if (!pages) 883 /* If it's a prefault don't insist harder */ 884 return ret; 885 886 if (ret > 0) { 887 nr_pages -= ret; 888 pages_done += ret; 889 if (!nr_pages) 890 break; 891 } 892 if (*locked) { 893 /* VM_FAULT_RETRY didn't trigger */ 894 if (!pages_done) 895 pages_done = ret; 896 break; 897 } 898 /* VM_FAULT_RETRY triggered, so seek to the faulting offset */ 899 pages += ret; 900 start += ret << PAGE_SHIFT; 901 902 /* 903 * Repeat on the address that fired VM_FAULT_RETRY 904 * without FAULT_FLAG_ALLOW_RETRY but with 905 * FAULT_FLAG_TRIED. 906 */ 907 *locked = 1; 908 lock_dropped = true; 909 down_read(&mm->mmap_sem); 910 ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED, 911 pages, NULL, NULL); 912 if (ret != 1) { 913 BUG_ON(ret > 1); 914 if (!pages_done) 915 pages_done = ret; 916 break; 917 } 918 nr_pages--; 919 pages_done++; 920 if (!nr_pages) 921 break; 922 pages++; 923 start += PAGE_SIZE; 924 } 925 if (notify_drop && lock_dropped && *locked) { 926 /* 927 * We must let the caller know we temporarily dropped the lock 928 * and so the critical section protected by it was lost. 929 */ 930 up_read(&mm->mmap_sem); 931 *locked = 0; 932 } 933 return pages_done; 934 } 935 936 /* 937 * We can leverage the VM_FAULT_RETRY functionality in the page fault 938 * paths better by using either get_user_pages_locked() or 939 * get_user_pages_unlocked(). 940 * 941 * get_user_pages_locked() is suitable to replace the form: 942 * 943 * down_read(&mm->mmap_sem); 944 * do_something() 945 * get_user_pages(tsk, mm, ..., pages, NULL); 946 * up_read(&mm->mmap_sem); 947 * 948 * to: 949 * 950 * int locked = 1; 951 * down_read(&mm->mmap_sem); 952 * do_something() 953 * get_user_pages_locked(tsk, mm, ..., pages, &locked); 954 * if (locked) 955 * up_read(&mm->mmap_sem); 956 */ 957 long get_user_pages_locked(unsigned long start, unsigned long nr_pages, 958 unsigned int gup_flags, struct page **pages, 959 int *locked) 960 { 961 return __get_user_pages_locked(current, current->mm, start, nr_pages, 962 pages, NULL, locked, true, 963 gup_flags | FOLL_TOUCH); 964 } 965 EXPORT_SYMBOL(get_user_pages_locked); 966 967 /* 968 * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows for 969 * tsk, mm to be specified. 970 * 971 * NOTE: here FOLL_TOUCH is not set implicitly and must be set by the 972 * caller if required (just like with __get_user_pages). "FOLL_GET" 973 * is set implicitly if "pages" is non-NULL. 974 */ 975 static __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, 976 struct mm_struct *mm, unsigned long start, 977 unsigned long nr_pages, struct page **pages, 978 unsigned int gup_flags) 979 { 980 long ret; 981 int locked = 1; 982 983 down_read(&mm->mmap_sem); 984 ret = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, NULL, 985 &locked, false, gup_flags); 986 if (locked) 987 up_read(&mm->mmap_sem); 988 return ret; 989 } 990 991 /* 992 * get_user_pages_unlocked() is suitable to replace the form: 993 * 994 * down_read(&mm->mmap_sem); 995 * get_user_pages(tsk, mm, ..., pages, NULL); 996 * up_read(&mm->mmap_sem); 997 * 998 * with: 999 * 1000 * get_user_pages_unlocked(tsk, mm, ..., pages); 1001 * 1002 * It is functionally equivalent to get_user_pages_fast so 1003 * get_user_pages_fast should be used instead if specific gup_flags 1004 * (e.g. FOLL_FORCE) are not required. 1005 */ 1006 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 1007 struct page **pages, unsigned int gup_flags) 1008 { 1009 return __get_user_pages_unlocked(current, current->mm, start, nr_pages, 1010 pages, gup_flags | FOLL_TOUCH); 1011 } 1012 EXPORT_SYMBOL(get_user_pages_unlocked); 1013 1014 /* 1015 * get_user_pages_remote() - pin user pages in memory 1016 * @tsk: the task_struct to use for page fault accounting, or 1017 * NULL if faults are not to be recorded. 1018 * @mm: mm_struct of target mm 1019 * @start: starting user address 1020 * @nr_pages: number of pages from start to pin 1021 * @gup_flags: flags modifying lookup behaviour 1022 * @pages: array that receives pointers to the pages pinned. 1023 * Should be at least nr_pages long. Or NULL, if caller 1024 * only intends to ensure the pages are faulted in. 1025 * @vmas: array of pointers to vmas corresponding to each page. 1026 * Or NULL if the caller does not require them. 1027 * @locked: pointer to lock flag indicating whether lock is held and 1028 * subsequently whether VM_FAULT_RETRY functionality can be 1029 * utilised. Lock must initially be held. 1030 * 1031 * Returns number of pages pinned. This may be fewer than the number 1032 * requested. If nr_pages is 0 or negative, returns 0. If no pages 1033 * were pinned, returns -errno. Each page returned must be released 1034 * with a put_page() call when it is finished with. vmas will only 1035 * remain valid while mmap_sem is held. 1036 * 1037 * Must be called with mmap_sem held for read or write. 1038 * 1039 * get_user_pages walks a process's page tables and takes a reference to 1040 * each struct page that each user address corresponds to at a given 1041 * instant. That is, it takes the page that would be accessed if a user 1042 * thread accesses the given user virtual address at that instant. 1043 * 1044 * This does not guarantee that the page exists in the user mappings when 1045 * get_user_pages returns, and there may even be a completely different 1046 * page there in some cases (eg. if mmapped pagecache has been invalidated 1047 * and subsequently re faulted). However it does guarantee that the page 1048 * won't be freed completely. And mostly callers simply care that the page 1049 * contains data that was valid *at some point in time*. Typically, an IO 1050 * or similar operation cannot guarantee anything stronger anyway because 1051 * locks can't be held over the syscall boundary. 1052 * 1053 * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page 1054 * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must 1055 * be called after the page is finished with, and before put_page is called. 1056 * 1057 * get_user_pages is typically used for fewer-copy IO operations, to get a 1058 * handle on the memory by some means other than accesses via the user virtual 1059 * addresses. The pages may be submitted for DMA to devices or accessed via 1060 * their kernel linear mapping (via the kmap APIs). Care should be taken to 1061 * use the correct cache flushing APIs. 1062 * 1063 * See also get_user_pages_fast, for performance critical applications. 1064 * 1065 * get_user_pages should be phased out in favor of 1066 * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing 1067 * should use get_user_pages because it cannot pass 1068 * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault. 1069 */ 1070 long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, 1071 unsigned long start, unsigned long nr_pages, 1072 unsigned int gup_flags, struct page **pages, 1073 struct vm_area_struct **vmas, int *locked) 1074 { 1075 return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, 1076 locked, true, 1077 gup_flags | FOLL_TOUCH | FOLL_REMOTE); 1078 } 1079 EXPORT_SYMBOL(get_user_pages_remote); 1080 1081 /* 1082 * This is the same as get_user_pages_remote(), just with a 1083 * less-flexible calling convention where we assume that the task 1084 * and mm being operated on are the current task's and don't allow 1085 * passing of a locked parameter. We also obviously don't pass 1086 * FOLL_REMOTE in here. 1087 */ 1088 long get_user_pages(unsigned long start, unsigned long nr_pages, 1089 unsigned int gup_flags, struct page **pages, 1090 struct vm_area_struct **vmas) 1091 { 1092 return __get_user_pages_locked(current, current->mm, start, nr_pages, 1093 pages, vmas, NULL, false, 1094 gup_flags | FOLL_TOUCH); 1095 } 1096 EXPORT_SYMBOL(get_user_pages); 1097 1098 /** 1099 * populate_vma_page_range() - populate a range of pages in the vma. 1100 * @vma: target vma 1101 * @start: start address 1102 * @end: end address 1103 * @nonblocking: 1104 * 1105 * This takes care of mlocking the pages too if VM_LOCKED is set. 1106 * 1107 * return 0 on success, negative error code on error. 1108 * 1109 * vma->vm_mm->mmap_sem must be held. 1110 * 1111 * If @nonblocking is NULL, it may be held for read or write and will 1112 * be unperturbed. 1113 * 1114 * If @nonblocking is non-NULL, it must held for read only and may be 1115 * released. If it's released, *@nonblocking will be set to 0. 1116 */ 1117 long populate_vma_page_range(struct vm_area_struct *vma, 1118 unsigned long start, unsigned long end, int *nonblocking) 1119 { 1120 struct mm_struct *mm = vma->vm_mm; 1121 unsigned long nr_pages = (end - start) / PAGE_SIZE; 1122 int gup_flags; 1123 1124 VM_BUG_ON(start & ~PAGE_MASK); 1125 VM_BUG_ON(end & ~PAGE_MASK); 1126 VM_BUG_ON_VMA(start < vma->vm_start, vma); 1127 VM_BUG_ON_VMA(end > vma->vm_end, vma); 1128 VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); 1129 1130 gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK; 1131 if (vma->vm_flags & VM_LOCKONFAULT) 1132 gup_flags &= ~FOLL_POPULATE; 1133 /* 1134 * We want to touch writable mappings with a write fault in order 1135 * to break COW, except for shared mappings because these don't COW 1136 * and we would not want to dirty them for nothing. 1137 */ 1138 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) 1139 gup_flags |= FOLL_WRITE; 1140 1141 /* 1142 * We want mlock to succeed for regions that have any permissions 1143 * other than PROT_NONE. 1144 */ 1145 if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) 1146 gup_flags |= FOLL_FORCE; 1147 1148 /* 1149 * We made sure addr is within a VMA, so the following will 1150 * not result in a stack expansion that recurses back here. 1151 */ 1152 return __get_user_pages(current, mm, start, nr_pages, gup_flags, 1153 NULL, NULL, nonblocking); 1154 } 1155 1156 /* 1157 * __mm_populate - populate and/or mlock pages within a range of address space. 1158 * 1159 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap 1160 * flags. VMAs must be already marked with the desired vm_flags, and 1161 * mmap_sem must not be held. 1162 */ 1163 int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) 1164 { 1165 struct mm_struct *mm = current->mm; 1166 unsigned long end, nstart, nend; 1167 struct vm_area_struct *vma = NULL; 1168 int locked = 0; 1169 long ret = 0; 1170 1171 VM_BUG_ON(start & ~PAGE_MASK); 1172 VM_BUG_ON(len != PAGE_ALIGN(len)); 1173 end = start + len; 1174 1175 for (nstart = start; nstart < end; nstart = nend) { 1176 /* 1177 * We want to fault in pages for [nstart; end) address range. 1178 * Find first corresponding VMA. 1179 */ 1180 if (!locked) { 1181 locked = 1; 1182 down_read(&mm->mmap_sem); 1183 vma = find_vma(mm, nstart); 1184 } else if (nstart >= vma->vm_end) 1185 vma = vma->vm_next; 1186 if (!vma || vma->vm_start >= end) 1187 break; 1188 /* 1189 * Set [nstart; nend) to intersection of desired address 1190 * range with the first VMA. Also, skip undesirable VMA types. 1191 */ 1192 nend = min(end, vma->vm_end); 1193 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) 1194 continue; 1195 if (nstart < vma->vm_start) 1196 nstart = vma->vm_start; 1197 /* 1198 * Now fault in a range of pages. populate_vma_page_range() 1199 * double checks the vma flags, so that it won't mlock pages 1200 * if the vma was already munlocked. 1201 */ 1202 ret = populate_vma_page_range(vma, nstart, nend, &locked); 1203 if (ret < 0) { 1204 if (ignore_errors) { 1205 ret = 0; 1206 continue; /* continue at next VMA */ 1207 } 1208 break; 1209 } 1210 nend = nstart + ret * PAGE_SIZE; 1211 ret = 0; 1212 } 1213 if (locked) 1214 up_read(&mm->mmap_sem); 1215 return ret; /* 0 or negative error code */ 1216 } 1217 1218 /** 1219 * get_dump_page() - pin user page in memory while writing it to core dump 1220 * @addr: user address 1221 * 1222 * Returns struct page pointer of user page pinned for dump, 1223 * to be freed afterwards by put_page(). 1224 * 1225 * Returns NULL on any kind of failure - a hole must then be inserted into 1226 * the corefile, to preserve alignment with its headers; and also returns 1227 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - 1228 * allowing a hole to be left in the corefile to save diskspace. 1229 * 1230 * Called without mmap_sem, but after all other threads have been killed. 1231 */ 1232 #ifdef CONFIG_ELF_CORE 1233 struct page *get_dump_page(unsigned long addr) 1234 { 1235 struct vm_area_struct *vma; 1236 struct page *page; 1237 1238 if (__get_user_pages(current, current->mm, addr, 1, 1239 FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma, 1240 NULL) < 1) 1241 return NULL; 1242 flush_cache_page(vma, addr, page_to_pfn(page)); 1243 return page; 1244 } 1245 #endif /* CONFIG_ELF_CORE */ 1246 1247 /* 1248 * Generic Fast GUP 1249 * 1250 * get_user_pages_fast attempts to pin user pages by walking the page 1251 * tables directly and avoids taking locks. Thus the walker needs to be 1252 * protected from page table pages being freed from under it, and should 1253 * block any THP splits. 1254 * 1255 * One way to achieve this is to have the walker disable interrupts, and 1256 * rely on IPIs from the TLB flushing code blocking before the page table 1257 * pages are freed. This is unsuitable for architectures that do not need 1258 * to broadcast an IPI when invalidating TLBs. 1259 * 1260 * Another way to achieve this is to batch up page table containing pages 1261 * belonging to more than one mm_user, then rcu_sched a callback to free those 1262 * pages. Disabling interrupts will allow the fast_gup walker to both block 1263 * the rcu_sched callback, and an IPI that we broadcast for splitting THPs 1264 * (which is a relatively rare event). The code below adopts this strategy. 1265 * 1266 * Before activating this code, please be aware that the following assumptions 1267 * are currently made: 1268 * 1269 * *) Either HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to 1270 * free pages containing page tables or TLB flushing requires IPI broadcast. 1271 * 1272 * *) ptes can be read atomically by the architecture. 1273 * 1274 * *) access_ok is sufficient to validate userspace address ranges. 1275 * 1276 * The last two assumptions can be relaxed by the addition of helper functions. 1277 * 1278 * This code is based heavily on the PowerPC implementation by Nick Piggin. 1279 */ 1280 #ifdef CONFIG_HAVE_GENERIC_GUP 1281 1282 #ifndef gup_get_pte 1283 /* 1284 * We assume that the PTE can be read atomically. If this is not the case for 1285 * your architecture, please provide the helper. 1286 */ 1287 static inline pte_t gup_get_pte(pte_t *ptep) 1288 { 1289 return READ_ONCE(*ptep); 1290 } 1291 #endif 1292 1293 static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages) 1294 { 1295 while ((*nr) - nr_start) { 1296 struct page *page = pages[--(*nr)]; 1297 1298 ClearPageReferenced(page); 1299 put_page(page); 1300 } 1301 } 1302 1303 #ifdef __HAVE_ARCH_PTE_SPECIAL 1304 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, 1305 int write, struct page **pages, int *nr) 1306 { 1307 struct dev_pagemap *pgmap = NULL; 1308 int nr_start = *nr, ret = 0; 1309 pte_t *ptep, *ptem; 1310 1311 ptem = ptep = pte_offset_map(&pmd, addr); 1312 do { 1313 pte_t pte = gup_get_pte(ptep); 1314 struct page *head, *page; 1315 1316 /* 1317 * Similar to the PMD case below, NUMA hinting must take slow 1318 * path using the pte_protnone check. 1319 */ 1320 if (pte_protnone(pte)) 1321 goto pte_unmap; 1322 1323 if (!pte_access_permitted(pte, write)) 1324 goto pte_unmap; 1325 1326 if (pte_devmap(pte)) { 1327 pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); 1328 if (unlikely(!pgmap)) { 1329 undo_dev_pagemap(nr, nr_start, pages); 1330 goto pte_unmap; 1331 } 1332 } else if (pte_special(pte)) 1333 goto pte_unmap; 1334 1335 VM_BUG_ON(!pfn_valid(pte_pfn(pte))); 1336 page = pte_page(pte); 1337 head = compound_head(page); 1338 1339 if (!page_cache_get_speculative(head)) 1340 goto pte_unmap; 1341 1342 if (unlikely(pte_val(pte) != pte_val(*ptep))) { 1343 put_page(head); 1344 goto pte_unmap; 1345 } 1346 1347 VM_BUG_ON_PAGE(compound_head(page) != head, page); 1348 1349 put_dev_pagemap(pgmap); 1350 SetPageReferenced(page); 1351 pages[*nr] = page; 1352 (*nr)++; 1353 1354 } while (ptep++, addr += PAGE_SIZE, addr != end); 1355 1356 ret = 1; 1357 1358 pte_unmap: 1359 pte_unmap(ptem); 1360 return ret; 1361 } 1362 #else 1363 1364 /* 1365 * If we can't determine whether or not a pte is special, then fail immediately 1366 * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not 1367 * to be special. 1368 * 1369 * For a futex to be placed on a THP tail page, get_futex_key requires a 1370 * __get_user_pages_fast implementation that can pin pages. Thus it's still 1371 * useful to have gup_huge_pmd even if we can't operate on ptes. 1372 */ 1373 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, 1374 int write, struct page **pages, int *nr) 1375 { 1376 return 0; 1377 } 1378 #endif /* __HAVE_ARCH_PTE_SPECIAL */ 1379 1380 #if defined(__HAVE_ARCH_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE) 1381 static int __gup_device_huge(unsigned long pfn, unsigned long addr, 1382 unsigned long end, struct page **pages, int *nr) 1383 { 1384 int nr_start = *nr; 1385 struct dev_pagemap *pgmap = NULL; 1386 1387 do { 1388 struct page *page = pfn_to_page(pfn); 1389 1390 pgmap = get_dev_pagemap(pfn, pgmap); 1391 if (unlikely(!pgmap)) { 1392 undo_dev_pagemap(nr, nr_start, pages); 1393 return 0; 1394 } 1395 SetPageReferenced(page); 1396 pages[*nr] = page; 1397 get_page(page); 1398 put_dev_pagemap(pgmap); 1399 (*nr)++; 1400 pfn++; 1401 } while (addr += PAGE_SIZE, addr != end); 1402 return 1; 1403 } 1404 1405 static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr, 1406 unsigned long end, struct page **pages, int *nr) 1407 { 1408 unsigned long fault_pfn; 1409 1410 fault_pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); 1411 return __gup_device_huge(fault_pfn, addr, end, pages, nr); 1412 } 1413 1414 static int __gup_device_huge_pud(pud_t pud, unsigned long addr, 1415 unsigned long end, struct page **pages, int *nr) 1416 { 1417 unsigned long fault_pfn; 1418 1419 fault_pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); 1420 return __gup_device_huge(fault_pfn, addr, end, pages, nr); 1421 } 1422 #else 1423 static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr, 1424 unsigned long end, struct page **pages, int *nr) 1425 { 1426 BUILD_BUG(); 1427 return 0; 1428 } 1429 1430 static int __gup_device_huge_pud(pud_t pud, unsigned long addr, 1431 unsigned long end, struct page **pages, int *nr) 1432 { 1433 BUILD_BUG(); 1434 return 0; 1435 } 1436 #endif 1437 1438 static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, 1439 unsigned long end, int write, struct page **pages, int *nr) 1440 { 1441 struct page *head, *page; 1442 int refs; 1443 1444 if (!pmd_access_permitted(orig, write)) 1445 return 0; 1446 1447 if (pmd_devmap(orig)) 1448 return __gup_device_huge_pmd(orig, addr, end, pages, nr); 1449 1450 refs = 0; 1451 page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); 1452 do { 1453 pages[*nr] = page; 1454 (*nr)++; 1455 page++; 1456 refs++; 1457 } while (addr += PAGE_SIZE, addr != end); 1458 1459 head = compound_head(pmd_page(orig)); 1460 if (!page_cache_add_speculative(head, refs)) { 1461 *nr -= refs; 1462 return 0; 1463 } 1464 1465 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { 1466 *nr -= refs; 1467 while (refs--) 1468 put_page(head); 1469 return 0; 1470 } 1471 1472 SetPageReferenced(head); 1473 return 1; 1474 } 1475 1476 static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, 1477 unsigned long end, int write, struct page **pages, int *nr) 1478 { 1479 struct page *head, *page; 1480 int refs; 1481 1482 if (!pud_access_permitted(orig, write)) 1483 return 0; 1484 1485 if (pud_devmap(orig)) 1486 return __gup_device_huge_pud(orig, addr, end, pages, nr); 1487 1488 refs = 0; 1489 page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); 1490 do { 1491 pages[*nr] = page; 1492 (*nr)++; 1493 page++; 1494 refs++; 1495 } while (addr += PAGE_SIZE, addr != end); 1496 1497 head = compound_head(pud_page(orig)); 1498 if (!page_cache_add_speculative(head, refs)) { 1499 *nr -= refs; 1500 return 0; 1501 } 1502 1503 if (unlikely(pud_val(orig) != pud_val(*pudp))) { 1504 *nr -= refs; 1505 while (refs--) 1506 put_page(head); 1507 return 0; 1508 } 1509 1510 SetPageReferenced(head); 1511 return 1; 1512 } 1513 1514 static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, 1515 unsigned long end, int write, 1516 struct page **pages, int *nr) 1517 { 1518 int refs; 1519 struct page *head, *page; 1520 1521 if (!pgd_access_permitted(orig, write)) 1522 return 0; 1523 1524 BUILD_BUG_ON(pgd_devmap(orig)); 1525 refs = 0; 1526 page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT); 1527 do { 1528 pages[*nr] = page; 1529 (*nr)++; 1530 page++; 1531 refs++; 1532 } while (addr += PAGE_SIZE, addr != end); 1533 1534 head = compound_head(pgd_page(orig)); 1535 if (!page_cache_add_speculative(head, refs)) { 1536 *nr -= refs; 1537 return 0; 1538 } 1539 1540 if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) { 1541 *nr -= refs; 1542 while (refs--) 1543 put_page(head); 1544 return 0; 1545 } 1546 1547 SetPageReferenced(head); 1548 return 1; 1549 } 1550 1551 static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, 1552 int write, struct page **pages, int *nr) 1553 { 1554 unsigned long next; 1555 pmd_t *pmdp; 1556 1557 pmdp = pmd_offset(&pud, addr); 1558 do { 1559 pmd_t pmd = READ_ONCE(*pmdp); 1560 1561 next = pmd_addr_end(addr, end); 1562 if (!pmd_present(pmd)) 1563 return 0; 1564 1565 if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) { 1566 /* 1567 * NUMA hinting faults need to be handled in the GUP 1568 * slowpath for accounting purposes and so that they 1569 * can be serialised against THP migration. 1570 */ 1571 if (pmd_protnone(pmd)) 1572 return 0; 1573 1574 if (!gup_huge_pmd(pmd, pmdp, addr, next, write, 1575 pages, nr)) 1576 return 0; 1577 1578 } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) { 1579 /* 1580 * architecture have different format for hugetlbfs 1581 * pmd format and THP pmd format 1582 */ 1583 if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr, 1584 PMD_SHIFT, next, write, pages, nr)) 1585 return 0; 1586 } else if (!gup_pte_range(pmd, addr, next, write, pages, nr)) 1587 return 0; 1588 } while (pmdp++, addr = next, addr != end); 1589 1590 return 1; 1591 } 1592 1593 static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end, 1594 int write, struct page **pages, int *nr) 1595 { 1596 unsigned long next; 1597 pud_t *pudp; 1598 1599 pudp = pud_offset(&p4d, addr); 1600 do { 1601 pud_t pud = READ_ONCE(*pudp); 1602 1603 next = pud_addr_end(addr, end); 1604 if (pud_none(pud)) 1605 return 0; 1606 if (unlikely(pud_huge(pud))) { 1607 if (!gup_huge_pud(pud, pudp, addr, next, write, 1608 pages, nr)) 1609 return 0; 1610 } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) { 1611 if (!gup_huge_pd(__hugepd(pud_val(pud)), addr, 1612 PUD_SHIFT, next, write, pages, nr)) 1613 return 0; 1614 } else if (!gup_pmd_range(pud, addr, next, write, pages, nr)) 1615 return 0; 1616 } while (pudp++, addr = next, addr != end); 1617 1618 return 1; 1619 } 1620 1621 static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end, 1622 int write, struct page **pages, int *nr) 1623 { 1624 unsigned long next; 1625 p4d_t *p4dp; 1626 1627 p4dp = p4d_offset(&pgd, addr); 1628 do { 1629 p4d_t p4d = READ_ONCE(*p4dp); 1630 1631 next = p4d_addr_end(addr, end); 1632 if (p4d_none(p4d)) 1633 return 0; 1634 BUILD_BUG_ON(p4d_huge(p4d)); 1635 if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) { 1636 if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr, 1637 P4D_SHIFT, next, write, pages, nr)) 1638 return 0; 1639 } else if (!gup_pud_range(p4d, addr, next, write, pages, nr)) 1640 return 0; 1641 } while (p4dp++, addr = next, addr != end); 1642 1643 return 1; 1644 } 1645 1646 /* 1647 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to 1648 * the regular GUP. It will only return non-negative values. 1649 */ 1650 int __get_user_pages_fast(unsigned long start, int nr_pages, int write, 1651 struct page **pages) 1652 { 1653 struct mm_struct *mm = current->mm; 1654 unsigned long addr, len, end; 1655 unsigned long next, flags; 1656 pgd_t *pgdp; 1657 int nr = 0; 1658 1659 start &= PAGE_MASK; 1660 addr = start; 1661 len = (unsigned long) nr_pages << PAGE_SHIFT; 1662 end = start + len; 1663 1664 if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, 1665 (void __user *)start, len))) 1666 return 0; 1667 1668 /* 1669 * Disable interrupts. We use the nested form as we can already have 1670 * interrupts disabled by get_futex_key. 1671 * 1672 * With interrupts disabled, we block page table pages from being 1673 * freed from under us. See mmu_gather_tlb in asm-generic/tlb.h 1674 * for more details. 1675 * 1676 * We do not adopt an rcu_read_lock(.) here as we also want to 1677 * block IPIs that come from THPs splitting. 1678 */ 1679 1680 local_irq_save(flags); 1681 pgdp = pgd_offset(mm, addr); 1682 do { 1683 pgd_t pgd = READ_ONCE(*pgdp); 1684 1685 next = pgd_addr_end(addr, end); 1686 if (pgd_none(pgd)) 1687 break; 1688 if (unlikely(pgd_huge(pgd))) { 1689 if (!gup_huge_pgd(pgd, pgdp, addr, next, write, 1690 pages, &nr)) 1691 break; 1692 } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) { 1693 if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, 1694 PGDIR_SHIFT, next, write, pages, &nr)) 1695 break; 1696 } else if (!gup_p4d_range(pgd, addr, next, write, pages, &nr)) 1697 break; 1698 } while (pgdp++, addr = next, addr != end); 1699 local_irq_restore(flags); 1700 1701 return nr; 1702 } 1703 1704 #ifndef gup_fast_permitted 1705 /* 1706 * Check if it's allowed to use __get_user_pages_fast() for the range, or 1707 * we need to fall back to the slow version: 1708 */ 1709 bool gup_fast_permitted(unsigned long start, int nr_pages, int write) 1710 { 1711 unsigned long len, end; 1712 1713 len = (unsigned long) nr_pages << PAGE_SHIFT; 1714 end = start + len; 1715 return end >= start; 1716 } 1717 #endif 1718 1719 /** 1720 * get_user_pages_fast() - pin user pages in memory 1721 * @start: starting user address 1722 * @nr_pages: number of pages from start to pin 1723 * @write: whether pages will be written to 1724 * @pages: array that receives pointers to the pages pinned. 1725 * Should be at least nr_pages long. 1726 * 1727 * Attempt to pin user pages in memory without taking mm->mmap_sem. 1728 * If not successful, it will fall back to taking the lock and 1729 * calling get_user_pages(). 1730 * 1731 * Returns number of pages pinned. This may be fewer than the number 1732 * requested. If nr_pages is 0 or negative, returns 0. If no pages 1733 * were pinned, returns -errno. 1734 */ 1735 int get_user_pages_fast(unsigned long start, int nr_pages, int write, 1736 struct page **pages) 1737 { 1738 int nr = 0, ret = 0; 1739 1740 start &= PAGE_MASK; 1741 1742 if (gup_fast_permitted(start, nr_pages, write)) { 1743 nr = __get_user_pages_fast(start, nr_pages, write, pages); 1744 ret = nr; 1745 } 1746 1747 if (nr < nr_pages) { 1748 /* Try to get the remaining pages with get_user_pages */ 1749 start += nr << PAGE_SHIFT; 1750 pages += nr; 1751 1752 ret = get_user_pages_unlocked(start, nr_pages - nr, pages, 1753 write ? FOLL_WRITE : 0); 1754 1755 /* Have to be a bit careful with return values */ 1756 if (nr > 0) { 1757 if (ret < 0) 1758 ret = nr; 1759 else 1760 ret += nr; 1761 } 1762 } 1763 1764 return ret; 1765 } 1766 1767 #endif /* CONFIG_HAVE_GENERIC_GUP */ 1768