1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/pagewalk.h> 3 #include <linux/highmem.h> 4 #include <linux/sched.h> 5 #include <linux/hugetlb.h> 6 #include <linux/mmu_context.h> 7 #include <linux/swap.h> 8 #include <linux/leafops.h> 9 10 #include <asm/tlbflush.h> 11 12 #include "internal.h" 13 14 /* 15 * We want to know the real level where a entry is located ignoring any 16 * folding of levels which may be happening. For example if p4d is folded then 17 * a missing entry found at level 1 (p4d) is actually at level 0 (pgd). 18 */ 19 static int real_depth(int depth) 20 { 21 if (depth == 3 && PTRS_PER_PMD == 1) 22 depth = 2; 23 if (depth == 2 && PTRS_PER_PUD == 1) 24 depth = 1; 25 if (depth == 1 && PTRS_PER_P4D == 1) 26 depth = 0; 27 return depth; 28 } 29 30 static int walk_pte_range_inner(pte_t *pte, unsigned long addr, 31 unsigned long end, struct mm_walk *walk) 32 { 33 const struct mm_walk_ops *ops = walk->ops; 34 int err = 0; 35 36 for (;;) { 37 if (ops->install_pte && pte_none(ptep_get(pte))) { 38 pte_t new_pte; 39 40 err = ops->install_pte(addr, addr + PAGE_SIZE, &new_pte, 41 walk); 42 if (err) 43 break; 44 45 set_pte_at(walk->mm, addr, pte, new_pte); 46 /* Non-present before, so for arches that need it. */ 47 if (!WARN_ON_ONCE(walk->no_vma)) 48 update_mmu_cache(walk->vma, addr, pte); 49 } else { 50 err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk); 51 if (err) 52 break; 53 } 54 if (addr >= end - PAGE_SIZE) 55 break; 56 addr += PAGE_SIZE; 57 pte++; 58 } 59 return err; 60 } 61 62 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 63 struct mm_walk *walk) 64 { 65 pte_t *pte; 66 int err = 0; 67 spinlock_t *ptl; 68 69 if (walk->no_vma) { 70 /* 71 * pte_offset_map() might apply user-specific validation. 72 * Indeed, on x86_64 the pmd entries set up by init_espfix_ap() 73 * fit its pmd_bad() check (_PAGE_NX set and _PAGE_RW clear), 74 * and CONFIG_EFI_PGT_DUMP efi_mm goes so far as to walk them. 75 */ 76 if (walk->mm == &init_mm || addr >= TASK_SIZE) 77 pte = pte_offset_kernel(pmd, addr); 78 else 79 pte = pte_offset_map(pmd, addr); 80 if (pte) { 81 err = walk_pte_range_inner(pte, addr, end, walk); 82 if (walk->mm != &init_mm && addr < TASK_SIZE) 83 pte_unmap(pte); 84 } 85 } else { 86 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 87 if (pte) { 88 err = walk_pte_range_inner(pte, addr, end, walk); 89 pte_unmap_unlock(pte, ptl); 90 } 91 } 92 if (!pte) 93 walk->action = ACTION_AGAIN; 94 return err; 95 } 96 97 static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, 98 struct mm_walk *walk) 99 { 100 pud_t pudval = pudp_get(pud); 101 pmd_t *pmd; 102 unsigned long next; 103 const struct mm_walk_ops *ops = walk->ops; 104 bool has_handler = ops->pte_entry; 105 bool has_install = ops->install_pte; 106 int err = 0; 107 int depth = real_depth(3); 108 109 /* 110 * For PTE handling, pte_offset_map_lock() takes care of checking 111 * whether there actually is a page table. But it also has to be 112 * very careful about concurrent page table reclaim. 113 * 114 * Similarly, we have to be careful here - a PUD entry that points 115 * to a PMD table cannot go away, so we can just walk it. But if 116 * it's something else, we need to ensure we didn't race something, 117 * so need to retry. 118 * 119 * A pertinent example of this is a PUD refault after PUD split - 120 * we will need to split again or risk accessing invalid memory. 121 */ 122 if (!pud_present(pudval) || pud_leaf(pudval)) { 123 walk->action = ACTION_AGAIN; 124 return 0; 125 } 126 127 pmd = pmd_offset(pud, addr); 128 do { 129 again: 130 next = pmd_addr_end(addr, end); 131 if (pmd_none(*pmd)) { 132 if (has_install) 133 err = __pte_alloc(walk->mm, pmd); 134 else if (ops->pte_hole) 135 err = ops->pte_hole(addr, next, depth, walk); 136 if (err) 137 break; 138 if (!has_install) 139 continue; 140 } 141 142 walk->action = ACTION_SUBTREE; 143 144 /* 145 * This implies that each ->pmd_entry() handler 146 * needs to know about pmd_trans_huge() pmds 147 */ 148 if (ops->pmd_entry) 149 err = ops->pmd_entry(pmd, addr, next, walk); 150 if (err) 151 break; 152 153 if (walk->action == ACTION_AGAIN) 154 goto again; 155 if (walk->action == ACTION_CONTINUE) 156 continue; 157 158 if (!has_handler) { /* No handlers for lower page tables. */ 159 if (!has_install) 160 continue; /* Nothing to do. */ 161 /* 162 * We are ONLY installing, so avoid unnecessarily 163 * splitting a present huge page. 164 */ 165 if (pmd_present(*pmd) && pmd_trans_huge(*pmd)) 166 continue; 167 } 168 169 if (walk->vma) 170 split_huge_pmd(walk->vma, pmd, addr); 171 else if (pmd_leaf(*pmd) || !pmd_present(*pmd)) 172 continue; /* Nothing to do. */ 173 174 err = walk_pte_range(pmd, addr, next, walk); 175 if (err) 176 break; 177 178 if (walk->action == ACTION_AGAIN) 179 goto again; 180 181 } while (pmd++, addr = next, addr != end); 182 183 return err; 184 } 185 186 static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, 187 struct mm_walk *walk) 188 { 189 pud_t *pud; 190 unsigned long next; 191 const struct mm_walk_ops *ops = walk->ops; 192 bool has_handler = ops->pmd_entry || ops->pte_entry; 193 bool has_install = ops->install_pte; 194 int err = 0; 195 int depth = real_depth(2); 196 197 pud = pud_offset(p4d, addr); 198 do { 199 again: 200 next = pud_addr_end(addr, end); 201 if (pud_none(*pud)) { 202 if (has_install) 203 err = __pmd_alloc(walk->mm, pud, addr); 204 else if (ops->pte_hole) 205 err = ops->pte_hole(addr, next, depth, walk); 206 if (err) 207 break; 208 if (!has_install) 209 continue; 210 } 211 212 walk->action = ACTION_SUBTREE; 213 214 if (ops->pud_entry) 215 err = ops->pud_entry(pud, addr, next, walk); 216 if (err) 217 break; 218 219 if (walk->action == ACTION_AGAIN) 220 goto again; 221 if (walk->action == ACTION_CONTINUE) 222 continue; 223 224 if (!has_handler) { /* No handlers for lower page tables. */ 225 if (!has_install) 226 continue; /* Nothing to do. */ 227 /* 228 * We are ONLY installing, so avoid unnecessarily 229 * splitting a present huge page. 230 */ 231 if (pud_present(*pud) && pud_trans_huge(*pud)) 232 continue; 233 } 234 235 if (walk->vma) 236 split_huge_pud(walk->vma, pud, addr); 237 else if (pud_leaf(*pud) || !pud_present(*pud)) 238 continue; /* Nothing to do. */ 239 240 err = walk_pmd_range(pud, addr, next, walk); 241 if (err) 242 break; 243 244 if (walk->action == ACTION_AGAIN) 245 goto again; 246 } while (pud++, addr = next, addr != end); 247 248 return err; 249 } 250 251 static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, 252 struct mm_walk *walk) 253 { 254 p4d_t *p4d; 255 unsigned long next; 256 const struct mm_walk_ops *ops = walk->ops; 257 bool has_handler = ops->pud_entry || ops->pmd_entry || ops->pte_entry; 258 bool has_install = ops->install_pte; 259 int err = 0; 260 int depth = real_depth(1); 261 262 p4d = p4d_offset(pgd, addr); 263 do { 264 next = p4d_addr_end(addr, end); 265 if (p4d_none_or_clear_bad(p4d)) { 266 if (has_install) 267 err = __pud_alloc(walk->mm, p4d, addr); 268 else if (ops->pte_hole) 269 err = ops->pte_hole(addr, next, depth, walk); 270 if (err) 271 break; 272 if (!has_install) 273 continue; 274 } 275 if (ops->p4d_entry) { 276 err = ops->p4d_entry(p4d, addr, next, walk); 277 if (err) 278 break; 279 } 280 if (has_handler || has_install) 281 err = walk_pud_range(p4d, addr, next, walk); 282 if (err) 283 break; 284 } while (p4d++, addr = next, addr != end); 285 286 return err; 287 } 288 289 static int walk_pgd_range(unsigned long addr, unsigned long end, 290 struct mm_walk *walk) 291 { 292 pgd_t *pgd; 293 unsigned long next; 294 const struct mm_walk_ops *ops = walk->ops; 295 bool has_handler = ops->p4d_entry || ops->pud_entry || ops->pmd_entry || 296 ops->pte_entry; 297 bool has_install = ops->install_pte; 298 int err = 0; 299 300 if (walk->pgd) 301 pgd = walk->pgd + pgd_index(addr); 302 else 303 pgd = pgd_offset(walk->mm, addr); 304 do { 305 next = pgd_addr_end(addr, end); 306 if (pgd_none_or_clear_bad(pgd)) { 307 if (has_install) 308 err = __p4d_alloc(walk->mm, pgd, addr); 309 else if (ops->pte_hole) 310 err = ops->pte_hole(addr, next, 0, walk); 311 if (err) 312 break; 313 if (!has_install) 314 continue; 315 } 316 if (ops->pgd_entry) { 317 err = ops->pgd_entry(pgd, addr, next, walk); 318 if (err) 319 break; 320 } 321 if (has_handler || has_install) 322 err = walk_p4d_range(pgd, addr, next, walk); 323 if (err) 324 break; 325 } while (pgd++, addr = next, addr != end); 326 327 return err; 328 } 329 330 #ifdef CONFIG_HUGETLB_PAGE 331 static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr, 332 unsigned long end) 333 { 334 unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h); 335 336 return min(boundary, end); 337 } 338 339 static int walk_hugetlb_range(unsigned long addr, unsigned long end, 340 struct mm_walk *walk) 341 { 342 struct vm_area_struct *vma = walk->vma; 343 struct hstate *h = hstate_vma(vma); 344 unsigned long next; 345 unsigned long hmask = huge_page_mask(h); 346 unsigned long sz = huge_page_size(h); 347 pte_t *pte; 348 const struct mm_walk_ops *ops = walk->ops; 349 int err = 0; 350 351 hugetlb_vma_lock_read(vma); 352 do { 353 next = hugetlb_entry_end(h, addr, end); 354 pte = hugetlb_walk(vma, addr & hmask, sz); 355 if (pte) 356 err = ops->hugetlb_entry(pte, hmask, addr, next, walk); 357 else if (ops->pte_hole) 358 err = ops->pte_hole(addr, next, -1, walk); 359 if (err) 360 break; 361 } while (addr = next, addr != end); 362 hugetlb_vma_unlock_read(vma); 363 364 return err; 365 } 366 367 #else /* CONFIG_HUGETLB_PAGE */ 368 static int walk_hugetlb_range(unsigned long addr, unsigned long end, 369 struct mm_walk *walk) 370 { 371 return 0; 372 } 373 374 #endif /* CONFIG_HUGETLB_PAGE */ 375 376 /* 377 * Decide whether we really walk over the current vma on [@start, @end) 378 * or skip it via the returned value. Return 0 if we do walk over the 379 * current vma, and return 1 if we skip the vma. Negative values means 380 * error, where we abort the current walk. 381 */ 382 static int walk_page_test(unsigned long start, unsigned long end, 383 struct mm_walk *walk) 384 { 385 struct vm_area_struct *vma = walk->vma; 386 const struct mm_walk_ops *ops = walk->ops; 387 388 if (ops->test_walk) 389 return ops->test_walk(start, end, walk); 390 391 /* 392 * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP 393 * range, so we don't walk over it as we do for normal vmas. However, 394 * Some callers are interested in handling hole range and they don't 395 * want to just ignore any single address range. Such users certainly 396 * define their ->pte_hole() callbacks, so let's delegate them to handle 397 * vma(VM_PFNMAP). 398 */ 399 if (vma->vm_flags & VM_PFNMAP) { 400 int err = 1; 401 if (ops->pte_hole) 402 err = ops->pte_hole(start, end, -1, walk); 403 return err ? err : 1; 404 } 405 return 0; 406 } 407 408 static int __walk_page_range(unsigned long start, unsigned long end, 409 struct mm_walk *walk) 410 { 411 int err = 0; 412 struct vm_area_struct *vma = walk->vma; 413 const struct mm_walk_ops *ops = walk->ops; 414 bool is_hugetlb = is_vm_hugetlb_page(vma); 415 416 /* We do not support hugetlb PTE installation. */ 417 if (ops->install_pte && is_hugetlb) 418 return -EINVAL; 419 420 if (ops->pre_vma) { 421 err = ops->pre_vma(start, end, walk); 422 if (err) 423 return err; 424 } 425 426 if (is_hugetlb) { 427 if (ops->hugetlb_entry) 428 err = walk_hugetlb_range(start, end, walk); 429 } else 430 err = walk_pgd_range(start, end, walk); 431 432 if (ops->post_vma) 433 ops->post_vma(walk); 434 435 return err; 436 } 437 438 static inline void process_mm_walk_lock(struct mm_struct *mm, 439 enum page_walk_lock walk_lock) 440 { 441 if (walk_lock == PGWALK_RDLOCK) 442 mmap_assert_locked(mm); 443 else if (walk_lock != PGWALK_VMA_RDLOCK_VERIFY) 444 mmap_assert_write_locked(mm); 445 } 446 447 static inline void process_vma_walk_lock(struct vm_area_struct *vma, 448 enum page_walk_lock walk_lock) 449 { 450 #ifdef CONFIG_PER_VMA_LOCK 451 switch (walk_lock) { 452 case PGWALK_WRLOCK: 453 vma_start_write(vma); 454 break; 455 case PGWALK_WRLOCK_VERIFY: 456 vma_assert_write_locked(vma); 457 break; 458 case PGWALK_VMA_RDLOCK_VERIFY: 459 vma_assert_locked(vma); 460 break; 461 case PGWALK_RDLOCK: 462 /* PGWALK_RDLOCK is handled by process_mm_walk_lock */ 463 break; 464 } 465 #endif 466 } 467 468 /* 469 * See the comment for walk_page_range(), this performs the heavy lifting of the 470 * operation, only sets no restrictions on how the walk proceeds. 471 * 472 * We usually restrict the ability to install PTEs, but this functionality is 473 * available to internal memory management code and provided in mm/internal.h. 474 */ 475 int walk_page_range_mm_unsafe(struct mm_struct *mm, unsigned long start, 476 unsigned long end, const struct mm_walk_ops *ops, 477 void *private) 478 { 479 int err = 0; 480 unsigned long next; 481 struct vm_area_struct *vma; 482 struct mm_walk walk = { 483 .ops = ops, 484 .mm = mm, 485 .private = private, 486 }; 487 488 if (start >= end) 489 return -EINVAL; 490 491 if (!walk.mm) 492 return -EINVAL; 493 494 process_mm_walk_lock(walk.mm, ops->walk_lock); 495 496 vma = find_vma(walk.mm, start); 497 do { 498 if (!vma) { /* after the last vma */ 499 walk.vma = NULL; 500 next = end; 501 if (ops->pte_hole) 502 err = ops->pte_hole(start, next, -1, &walk); 503 } else if (start < vma->vm_start) { /* outside vma */ 504 walk.vma = NULL; 505 next = min(end, vma->vm_start); 506 if (ops->pte_hole) 507 err = ops->pte_hole(start, next, -1, &walk); 508 } else { /* inside vma */ 509 process_vma_walk_lock(vma, ops->walk_lock); 510 walk.vma = vma; 511 next = min(end, vma->vm_end); 512 vma = find_vma(mm, vma->vm_end); 513 514 err = walk_page_test(start, next, &walk); 515 if (err > 0) { 516 /* 517 * positive return values are purely for 518 * controlling the pagewalk, so should never 519 * be passed to the callers. 520 */ 521 err = 0; 522 continue; 523 } 524 if (err < 0) 525 break; 526 err = __walk_page_range(start, next, &walk); 527 } 528 if (err) 529 break; 530 } while (start = next, start < end); 531 return err; 532 } 533 534 /* 535 * Determine if the walk operations specified are permitted to be used for a 536 * page table walk. 537 * 538 * This check is performed on all functions which are parameterised by walk 539 * operations and exposed in include/linux/pagewalk.h. 540 * 541 * Internal memory management code can use *_unsafe() functions to be able to 542 * use all page walking operations. 543 */ 544 static bool check_ops_safe(const struct mm_walk_ops *ops) 545 { 546 /* 547 * The installation of PTEs is solely under the control of memory 548 * management logic and subject to many subtle locking, security and 549 * cache considerations so we cannot permit other users to do so, and 550 * certainly not for exported symbols. 551 */ 552 if (ops->install_pte) 553 return false; 554 555 return true; 556 } 557 558 /** 559 * walk_page_range - walk page table with caller specific callbacks 560 * @mm: mm_struct representing the target process of page table walk 561 * @start: start address of the virtual address range 562 * @end: end address of the virtual address range 563 * @ops: operation to call during the walk 564 * @private: private data for callbacks' usage 565 * 566 * Recursively walk the page table tree of the process represented by @mm 567 * within the virtual address range [@start, @end). During walking, we can do 568 * some caller-specific works for each entry, by setting up pmd_entry(), 569 * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these 570 * callbacks, the associated entries/pages are just ignored. 571 * The return values of these callbacks are commonly defined like below: 572 * 573 * - 0 : succeeded to handle the current entry, and if you don't reach the 574 * end address yet, continue to walk. 575 * - >0 : succeeded to handle the current entry, and return to the caller 576 * with caller specific value. 577 * - <0 : failed to handle the current entry, and return to the caller 578 * with error code. 579 * 580 * Before starting to walk page table, some callers want to check whether 581 * they really want to walk over the current vma, typically by checking 582 * its vm_flags. walk_page_test() and @ops->test_walk() are used for this 583 * purpose. 584 * 585 * If operations need to be staged before and committed after a vma is walked, 586 * there are two callbacks, pre_vma() and post_vma(). Note that post_vma(), 587 * since it is intended to handle commit-type operations, can't return any 588 * errors. 589 * 590 * struct mm_walk keeps current values of some common data like vma and pmd, 591 * which are useful for the access from callbacks. If you want to pass some 592 * caller-specific data to callbacks, @private should be helpful. 593 * 594 * Locking: 595 * Callers of walk_page_range() and walk_page_vma() should hold @mm->mmap_lock, 596 * because these function traverse vma list and/or access to vma's data. 597 */ 598 int walk_page_range(struct mm_struct *mm, unsigned long start, 599 unsigned long end, const struct mm_walk_ops *ops, 600 void *private) 601 { 602 if (!check_ops_safe(ops)) 603 return -EINVAL; 604 605 return walk_page_range_mm_unsafe(mm, start, end, ops, private); 606 } 607 608 /** 609 * walk_kernel_page_table_range - walk a range of kernel pagetables. 610 * @start: start address of the virtual address range 611 * @end: end address of the virtual address range 612 * @ops: operation to call during the walk 613 * @pgd: pgd to walk if different from mm->pgd 614 * @private: private data for callbacks' usage 615 * 616 * Similar to walk_page_range() but can walk any page tables even if they are 617 * not backed by VMAs. Because 'unusual' entries may be walked this function 618 * will also not lock the PTEs for the pte_entry() callback. This is useful for 619 * walking kernel pages tables or page tables for firmware. 620 * 621 * Note: Be careful to walk the kernel pages tables, the caller may be need to 622 * take other effective approaches (mmap lock may be insufficient) to prevent 623 * the intermediate kernel page tables belonging to the specified address range 624 * from being freed (e.g. memory hot-remove). 625 */ 626 int walk_kernel_page_table_range(unsigned long start, unsigned long end, 627 const struct mm_walk_ops *ops, pgd_t *pgd, void *private) 628 { 629 /* 630 * Kernel intermediate page tables are usually not freed, so the mmap 631 * read lock is sufficient. But there are some exceptions. 632 * E.g. memory hot-remove. In which case, the mmap lock is insufficient 633 * to prevent the intermediate kernel pages tables belonging to the 634 * specified address range from being freed. The caller should take 635 * other actions to prevent this race. 636 */ 637 mmap_assert_locked(&init_mm); 638 639 return walk_kernel_page_table_range_lockless(start, end, ops, pgd, 640 private); 641 } 642 643 /* 644 * Use this function to walk the kernel page tables locklessly. It should be 645 * guaranteed that the caller has exclusive access over the range they are 646 * operating on - that there should be no concurrent access, for example, 647 * changing permissions for vmalloc objects. 648 */ 649 int walk_kernel_page_table_range_lockless(unsigned long start, unsigned long end, 650 const struct mm_walk_ops *ops, pgd_t *pgd, void *private) 651 { 652 struct mm_walk walk = { 653 .ops = ops, 654 .mm = &init_mm, 655 .pgd = pgd, 656 .private = private, 657 .no_vma = true 658 }; 659 660 if (start >= end) 661 return -EINVAL; 662 if (!check_ops_safe(ops)) 663 return -EINVAL; 664 665 return walk_pgd_range(start, end, &walk); 666 } 667 668 /** 669 * walk_page_range_debug - walk a range of pagetables not backed by a vma 670 * @mm: mm_struct representing the target process of page table walk 671 * @start: start address of the virtual address range 672 * @end: end address of the virtual address range 673 * @ops: operation to call during the walk 674 * @pgd: pgd to walk if different from mm->pgd 675 * @private: private data for callbacks' usage 676 * 677 * Similar to walk_page_range() but can walk any page tables even if they are 678 * not backed by VMAs. Because 'unusual' entries may be walked this function 679 * will also not lock the PTEs for the pte_entry() callback. 680 * 681 * This is for debugging purposes ONLY. 682 */ 683 int walk_page_range_debug(struct mm_struct *mm, unsigned long start, 684 unsigned long end, const struct mm_walk_ops *ops, 685 pgd_t *pgd, void *private) 686 { 687 struct mm_walk walk = { 688 .ops = ops, 689 .mm = mm, 690 .pgd = pgd, 691 .private = private, 692 .no_vma = true 693 }; 694 695 /* For convenience, we allow traversal of kernel mappings. */ 696 if (mm == &init_mm) 697 return walk_kernel_page_table_range(start, end, ops, 698 pgd, private); 699 if (start >= end || !walk.mm) 700 return -EINVAL; 701 if (!check_ops_safe(ops)) 702 return -EINVAL; 703 704 /* 705 * The mmap lock protects the page walker from changes to the page 706 * tables during the walk. However a read lock is insufficient to 707 * protect those areas which don't have a VMA as munmap() detaches 708 * the VMAs before downgrading to a read lock and actually tearing 709 * down PTEs/page tables. In which case, the mmap write lock should 710 * be held. 711 */ 712 mmap_assert_write_locked(mm); 713 714 return walk_pgd_range(start, end, &walk); 715 } 716 717 int walk_page_range_vma_unsafe(struct vm_area_struct *vma, unsigned long start, 718 unsigned long end, const struct mm_walk_ops *ops, void *private) 719 { 720 struct mm_walk walk = { 721 .ops = ops, 722 .mm = vma->vm_mm, 723 .vma = vma, 724 .private = private, 725 }; 726 727 if (start >= end || !walk.mm) 728 return -EINVAL; 729 if (start < vma->vm_start || end > vma->vm_end) 730 return -EINVAL; 731 732 process_mm_walk_lock(walk.mm, ops->walk_lock); 733 process_vma_walk_lock(vma, ops->walk_lock); 734 return __walk_page_range(start, end, &walk); 735 } 736 737 int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start, 738 unsigned long end, const struct mm_walk_ops *ops, 739 void *private) 740 { 741 if (!check_ops_safe(ops)) 742 return -EINVAL; 743 744 return walk_page_range_vma_unsafe(vma, start, end, ops, private); 745 } 746 747 int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops, 748 void *private) 749 { 750 struct mm_walk walk = { 751 .ops = ops, 752 .mm = vma->vm_mm, 753 .vma = vma, 754 .private = private, 755 }; 756 757 if (!walk.mm) 758 return -EINVAL; 759 if (!check_ops_safe(ops)) 760 return -EINVAL; 761 762 process_mm_walk_lock(walk.mm, ops->walk_lock); 763 process_vma_walk_lock(vma, ops->walk_lock); 764 return __walk_page_range(vma->vm_start, vma->vm_end, &walk); 765 } 766 767 /** 768 * walk_page_mapping - walk all memory areas mapped into a struct address_space. 769 * @mapping: Pointer to the struct address_space 770 * @first_index: First page offset in the address_space 771 * @nr: Number of incremental page offsets to cover 772 * @ops: operation to call during the walk 773 * @private: private data for callbacks' usage 774 * 775 * This function walks all memory areas mapped into a struct address_space. 776 * The walk is limited to only the given page-size index range, but if 777 * the index boundaries cross a huge page-table entry, that entry will be 778 * included. 779 * 780 * Also see walk_page_range() for additional information. 781 * 782 * Locking: 783 * This function can't require that the struct mm_struct::mmap_lock is held, 784 * since @mapping may be mapped by multiple processes. Instead 785 * @mapping->i_mmap_rwsem must be held. This might have implications in the 786 * callbacks, and it's up tho the caller to ensure that the 787 * struct mm_struct::mmap_lock is not needed. 788 * 789 * Also this means that a caller can't rely on the struct 790 * vm_area_struct::vm_flags to be constant across a call, 791 * except for immutable flags. Callers requiring this shouldn't use 792 * this function. 793 * 794 * Return: 0 on success, negative error code on failure, positive number on 795 * caller defined premature termination. 796 */ 797 int walk_page_mapping(struct address_space *mapping, pgoff_t first_index, 798 pgoff_t nr, const struct mm_walk_ops *ops, 799 void *private) 800 { 801 struct mm_walk walk = { 802 .ops = ops, 803 .private = private, 804 }; 805 struct vm_area_struct *vma; 806 pgoff_t vba, vea, cba, cea; 807 unsigned long start_addr, end_addr; 808 int err = 0; 809 810 if (!check_ops_safe(ops)) 811 return -EINVAL; 812 813 lockdep_assert_held(&mapping->i_mmap_rwsem); 814 vma_interval_tree_foreach(vma, &mapping->i_mmap, first_index, 815 first_index + nr - 1) { 816 /* Clip to the vma */ 817 vba = vma->vm_pgoff; 818 vea = vba + vma_pages(vma); 819 cba = first_index; 820 cba = max(cba, vba); 821 cea = first_index + nr; 822 cea = min(cea, vea); 823 824 start_addr = ((cba - vba) << PAGE_SHIFT) + vma->vm_start; 825 end_addr = ((cea - vba) << PAGE_SHIFT) + vma->vm_start; 826 if (start_addr >= end_addr) 827 continue; 828 829 walk.vma = vma; 830 walk.mm = vma->vm_mm; 831 832 err = walk_page_test(vma->vm_start, vma->vm_end, &walk); 833 if (err > 0) { 834 err = 0; 835 break; 836 } else if (err < 0) 837 break; 838 839 err = __walk_page_range(start_addr, end_addr, &walk); 840 if (err) 841 break; 842 } 843 844 return err; 845 } 846 847 /** 848 * folio_walk_start - walk the page tables to a folio 849 * @fw: filled with information on success. 850 * @vma: the VMA. 851 * @addr: the virtual address to use for the page table walk. 852 * @flags: flags modifying which folios to walk to. 853 * 854 * Walk the page tables using @addr in a given @vma to a mapped folio and 855 * return the folio, making sure that the page table entry referenced by 856 * @addr cannot change until folio_walk_end() was called. 857 * 858 * As default, this function returns only folios that are not special (e.g., not 859 * the zeropage) and never returns folios that are supposed to be ignored by the 860 * VM as documented by vm_normal_page(). If requested, zeropages will be 861 * returned as well. 862 * 863 * As default, this function only considers present page table entries. 864 * If requested, it will also consider migration entries. 865 * 866 * If this function returns NULL it might either indicate "there is nothing" or 867 * "there is nothing suitable". 868 * 869 * On success, @fw is filled and the function returns the folio while the PTL 870 * is still held and folio_walk_end() must be called to clean up, 871 * releasing any held locks. The returned folio must *not* be used after the 872 * call to folio_walk_end(), unless a short-term folio reference is taken before 873 * that call. 874 * 875 * @fw->page will correspond to the page that is effectively referenced by 876 * @addr. However, for migration entries and shared zeropages @fw->page is 877 * set to NULL. Note that large folios might be mapped by multiple page table 878 * entries, and this function will always only lookup a single entry as 879 * specified by @addr, which might or might not cover more than a single page of 880 * the returned folio. 881 * 882 * This function must *not* be used as a naive replacement for 883 * get_user_pages() / pin_user_pages(), especially not to perform DMA or 884 * to carelessly modify page content. This function may *only* be used to grab 885 * short-term folio references, never to grab long-term folio references. 886 * 887 * Using the page table entry pointers in @fw for reading or modifying the 888 * entry should be avoided where possible: however, there might be valid 889 * use cases. 890 * 891 * WARNING: Modifying page table entries in hugetlb VMAs requires a lot of care. 892 * For example, PMD page table sharing might require prior unsharing. Also, 893 * logical hugetlb entries might span multiple physical page table entries, 894 * which *must* be modified in a single operation (set_huge_pte_at(), 895 * huge_ptep_set_*, ...). Note that the page table entry stored in @fw might 896 * not correspond to the first physical entry of a logical hugetlb entry. 897 * 898 * The mmap lock must be held in read mode. 899 * 900 * Return: folio pointer on success, otherwise NULL. 901 */ 902 struct folio *folio_walk_start(struct folio_walk *fw, 903 struct vm_area_struct *vma, unsigned long addr, 904 folio_walk_flags_t flags) 905 { 906 unsigned long entry_size; 907 bool expose_page = true; 908 struct page *page; 909 pud_t *pudp, pud; 910 pmd_t *pmdp, pmd; 911 pte_t *ptep, pte; 912 spinlock_t *ptl; 913 pgd_t *pgdp; 914 p4d_t *p4dp; 915 916 mmap_assert_locked(vma->vm_mm); 917 vma_pgtable_walk_begin(vma); 918 919 if (WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end)) 920 goto not_found; 921 922 pgdp = pgd_offset(vma->vm_mm, addr); 923 if (pgd_none_or_clear_bad(pgdp)) 924 goto not_found; 925 926 p4dp = p4d_offset(pgdp, addr); 927 if (p4d_none_or_clear_bad(p4dp)) 928 goto not_found; 929 930 pudp = pud_offset(p4dp, addr); 931 pud = pudp_get(pudp); 932 if (pud_none(pud)) 933 goto not_found; 934 if (IS_ENABLED(CONFIG_PGTABLE_HAS_HUGE_LEAVES) && 935 (!pud_present(pud) || pud_leaf(pud))) { 936 ptl = pud_lock(vma->vm_mm, pudp); 937 pud = pudp_get(pudp); 938 939 entry_size = PUD_SIZE; 940 fw->level = FW_LEVEL_PUD; 941 fw->pudp = pudp; 942 fw->pud = pud; 943 944 if (pud_none(pud)) { 945 spin_unlock(ptl); 946 goto not_found; 947 } else if (pud_present(pud) && !pud_leaf(pud)) { 948 spin_unlock(ptl); 949 goto pmd_table; 950 } else if (pud_present(pud)) { 951 page = vm_normal_page_pud(vma, addr, pud); 952 if (page) 953 goto found; 954 } 955 /* 956 * TODO: FW_MIGRATION support for PUD migration entries 957 * once there are relevant users. 958 */ 959 spin_unlock(ptl); 960 goto not_found; 961 } 962 963 pmd_table: 964 VM_WARN_ON_ONCE(!pud_present(pud) || pud_leaf(pud)); 965 pmdp = pmd_offset(pudp, addr); 966 pmd = pmdp_get_lockless(pmdp); 967 if (pmd_none(pmd)) 968 goto not_found; 969 if (IS_ENABLED(CONFIG_PGTABLE_HAS_HUGE_LEAVES) && 970 (!pmd_present(pmd) || pmd_leaf(pmd))) { 971 ptl = pmd_lock(vma->vm_mm, pmdp); 972 pmd = pmdp_get(pmdp); 973 974 entry_size = PMD_SIZE; 975 fw->level = FW_LEVEL_PMD; 976 fw->pmdp = pmdp; 977 fw->pmd = pmd; 978 979 if (pmd_none(pmd)) { 980 spin_unlock(ptl); 981 goto not_found; 982 } else if (pmd_present(pmd) && !pmd_leaf(pmd)) { 983 spin_unlock(ptl); 984 goto pte_table; 985 } else if (pmd_present(pmd)) { 986 page = vm_normal_page_pmd(vma, addr, pmd); 987 if (page) { 988 goto found; 989 } else if ((flags & FW_ZEROPAGE) && 990 is_huge_zero_pmd(pmd)) { 991 page = pfn_to_page(pmd_pfn(pmd)); 992 expose_page = false; 993 goto found; 994 } 995 } else if ((flags & FW_MIGRATION) && 996 pmd_is_migration_entry(pmd)) { 997 const softleaf_t entry = softleaf_from_pmd(pmd); 998 999 page = softleaf_to_page(entry); 1000 expose_page = false; 1001 goto found; 1002 } 1003 spin_unlock(ptl); 1004 goto not_found; 1005 } 1006 1007 pte_table: 1008 VM_WARN_ON_ONCE(!pmd_present(pmd) || pmd_leaf(pmd)); 1009 ptep = pte_offset_map_lock(vma->vm_mm, pmdp, addr, &ptl); 1010 if (!ptep) 1011 goto not_found; 1012 pte = ptep_get(ptep); 1013 1014 entry_size = PAGE_SIZE; 1015 fw->level = FW_LEVEL_PTE; 1016 fw->ptep = ptep; 1017 fw->pte = pte; 1018 1019 if (pte_present(pte)) { 1020 page = vm_normal_page(vma, addr, pte); 1021 if (page) 1022 goto found; 1023 if ((flags & FW_ZEROPAGE) && 1024 is_zero_pfn(pte_pfn(pte))) { 1025 page = pfn_to_page(pte_pfn(pte)); 1026 expose_page = false; 1027 goto found; 1028 } 1029 } else if (!pte_none(pte)) { 1030 const softleaf_t entry = softleaf_from_pte(pte); 1031 1032 if ((flags & FW_MIGRATION) && softleaf_is_migration(entry)) { 1033 page = softleaf_to_page(entry); 1034 expose_page = false; 1035 goto found; 1036 } 1037 } 1038 pte_unmap_unlock(ptep, ptl); 1039 not_found: 1040 vma_pgtable_walk_end(vma); 1041 return NULL; 1042 found: 1043 if (expose_page) 1044 /* Note: Offset from the mapped page, not the folio start. */ 1045 fw->page = page + ((addr & (entry_size - 1)) >> PAGE_SHIFT); 1046 else 1047 fw->page = NULL; 1048 fw->ptl = ptl; 1049 return page_folio(page); 1050 } 1051