1 2 // SPDX-License-Identifier: GPL-2.0-only 3 /* 4 * linux/mm/memory.c 5 * 6 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 7 */ 8 9 /* 10 * demand-loading started 01.12.91 - seems it is high on the list of 11 * things wanted, and it should be easy to implement. - Linus 12 */ 13 14 /* 15 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared 16 * pages started 02.12.91, seems to work. - Linus. 17 * 18 * Tested sharing by executing about 30 /bin/sh: under the old kernel it 19 * would have taken more than the 6M I have free, but it worked well as 20 * far as I could see. 21 * 22 * Also corrected some "invalidate()"s - I wasn't doing enough of them. 23 */ 24 25 /* 26 * Real VM (paging to/from disk) started 18.12.91. Much more work and 27 * thought has to go into this. Oh, well.. 28 * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why. 29 * Found it. Everything seems to work now. 30 * 20.12.91 - Ok, making the swap-device changeable like the root. 31 */ 32 33 /* 34 * 05.04.94 - Multi-page memory management added for v1.1. 35 * Idea by Alex Bligh (alex@cconcepts.co.uk) 36 * 37 * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG 38 * (Gerhard.Wichert@pdb.siemens.de) 39 * 40 * Aug/Sep 2004 Changed to four level page tables (Andi Kleen) 41 */ 42 43 #include <linux/kernel_stat.h> 44 #include <linux/mm.h> 45 #include <linux/mm_inline.h> 46 #include <linux/sched/mm.h> 47 #include <linux/sched/coredump.h> 48 #include <linux/sched/numa_balancing.h> 49 #include <linux/sched/task.h> 50 #include <linux/hugetlb.h> 51 #include <linux/mman.h> 52 #include <linux/swap.h> 53 #include <linux/highmem.h> 54 #include <linux/pagemap.h> 55 #include <linux/memremap.h> 56 #include <linux/kmsan.h> 57 #include <linux/ksm.h> 58 #include <linux/rmap.h> 59 #include <linux/export.h> 60 #include <linux/delayacct.h> 61 #include <linux/init.h> 62 #include <linux/pfn_t.h> 63 #include <linux/writeback.h> 64 #include <linux/memcontrol.h> 65 #include <linux/mmu_notifier.h> 66 #include <linux/swapops.h> 67 #include <linux/elf.h> 68 #include <linux/gfp.h> 69 #include <linux/migrate.h> 70 #include <linux/string.h> 71 #include <linux/memory-tiers.h> 72 #include <linux/debugfs.h> 73 #include <linux/userfaultfd_k.h> 74 #include <linux/dax.h> 75 #include <linux/oom.h> 76 #include <linux/numa.h> 77 #include <linux/perf_event.h> 78 #include <linux/ptrace.h> 79 #include <linux/vmalloc.h> 80 #include <linux/sched/sysctl.h> 81 82 #include <trace/events/kmem.h> 83 84 #include <asm/io.h> 85 #include <asm/mmu_context.h> 86 #include <asm/pgalloc.h> 87 #include <linux/uaccess.h> 88 #include <asm/tlb.h> 89 #include <asm/tlbflush.h> 90 91 #include "pgalloc-track.h" 92 #include "internal.h" 93 #include "swap.h" 94 95 #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST) 96 #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid. 97 #endif 98 99 #ifndef CONFIG_NUMA 100 unsigned long max_mapnr; 101 EXPORT_SYMBOL(max_mapnr); 102 103 struct page *mem_map; 104 EXPORT_SYMBOL(mem_map); 105 #endif 106 107 static vm_fault_t do_fault(struct vm_fault *vmf); 108 static vm_fault_t do_anonymous_page(struct vm_fault *vmf); 109 static bool vmf_pte_changed(struct vm_fault *vmf); 110 111 /* 112 * Return true if the original pte was a uffd-wp pte marker (so the pte was 113 * wr-protected). 114 */ 115 static bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf) 116 { 117 if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)) 118 return false; 119 120 return pte_marker_uffd_wp(vmf->orig_pte); 121 } 122 123 /* 124 * A number of key systems in x86 including ioremap() rely on the assumption 125 * that high_memory defines the upper bound on direct map memory, then end 126 * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and 127 * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL 128 * and ZONE_HIGHMEM. 129 */ 130 void *high_memory; 131 EXPORT_SYMBOL(high_memory); 132 133 /* 134 * Randomize the address space (stacks, mmaps, brk, etc.). 135 * 136 * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization, 137 * as ancient (libc5 based) binaries can segfault. ) 138 */ 139 int randomize_va_space __read_mostly = 140 #ifdef CONFIG_COMPAT_BRK 141 1; 142 #else 143 2; 144 #endif 145 146 #ifndef arch_wants_old_prefaulted_pte 147 static inline bool arch_wants_old_prefaulted_pte(void) 148 { 149 /* 150 * Transitioning a PTE from 'old' to 'young' can be expensive on 151 * some architectures, even if it's performed in hardware. By 152 * default, "false" means prefaulted entries will be 'young'. 153 */ 154 return false; 155 } 156 #endif 157 158 static int __init disable_randmaps(char *s) 159 { 160 randomize_va_space = 0; 161 return 1; 162 } 163 __setup("norandmaps", disable_randmaps); 164 165 unsigned long zero_pfn __read_mostly; 166 EXPORT_SYMBOL(zero_pfn); 167 168 unsigned long highest_memmap_pfn __read_mostly; 169 170 /* 171 * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() 172 */ 173 static int __init init_zero_pfn(void) 174 { 175 zero_pfn = page_to_pfn(ZERO_PAGE(0)); 176 return 0; 177 } 178 early_initcall(init_zero_pfn); 179 180 void mm_trace_rss_stat(struct mm_struct *mm, int member) 181 { 182 trace_rss_stat(mm, member); 183 } 184 185 /* 186 * Note: this doesn't free the actual pages themselves. That 187 * has been handled earlier when unmapping all the memory regions. 188 */ 189 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, 190 unsigned long addr) 191 { 192 pgtable_t token = pmd_pgtable(*pmd); 193 pmd_clear(pmd); 194 pte_free_tlb(tlb, token, addr); 195 mm_dec_nr_ptes(tlb->mm); 196 } 197 198 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, 199 unsigned long addr, unsigned long end, 200 unsigned long floor, unsigned long ceiling) 201 { 202 pmd_t *pmd; 203 unsigned long next; 204 unsigned long start; 205 206 start = addr; 207 pmd = pmd_offset(pud, addr); 208 do { 209 next = pmd_addr_end(addr, end); 210 if (pmd_none_or_clear_bad(pmd)) 211 continue; 212 free_pte_range(tlb, pmd, addr); 213 } while (pmd++, addr = next, addr != end); 214 215 start &= PUD_MASK; 216 if (start < floor) 217 return; 218 if (ceiling) { 219 ceiling &= PUD_MASK; 220 if (!ceiling) 221 return; 222 } 223 if (end - 1 > ceiling - 1) 224 return; 225 226 pmd = pmd_offset(pud, start); 227 pud_clear(pud); 228 pmd_free_tlb(tlb, pmd, start); 229 mm_dec_nr_pmds(tlb->mm); 230 } 231 232 static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d, 233 unsigned long addr, unsigned long end, 234 unsigned long floor, unsigned long ceiling) 235 { 236 pud_t *pud; 237 unsigned long next; 238 unsigned long start; 239 240 start = addr; 241 pud = pud_offset(p4d, addr); 242 do { 243 next = pud_addr_end(addr, end); 244 if (pud_none_or_clear_bad(pud)) 245 continue; 246 free_pmd_range(tlb, pud, addr, next, floor, ceiling); 247 } while (pud++, addr = next, addr != end); 248 249 start &= P4D_MASK; 250 if (start < floor) 251 return; 252 if (ceiling) { 253 ceiling &= P4D_MASK; 254 if (!ceiling) 255 return; 256 } 257 if (end - 1 > ceiling - 1) 258 return; 259 260 pud = pud_offset(p4d, start); 261 p4d_clear(p4d); 262 pud_free_tlb(tlb, pud, start); 263 mm_dec_nr_puds(tlb->mm); 264 } 265 266 static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd, 267 unsigned long addr, unsigned long end, 268 unsigned long floor, unsigned long ceiling) 269 { 270 p4d_t *p4d; 271 unsigned long next; 272 unsigned long start; 273 274 start = addr; 275 p4d = p4d_offset(pgd, addr); 276 do { 277 next = p4d_addr_end(addr, end); 278 if (p4d_none_or_clear_bad(p4d)) 279 continue; 280 free_pud_range(tlb, p4d, addr, next, floor, ceiling); 281 } while (p4d++, addr = next, addr != end); 282 283 start &= PGDIR_MASK; 284 if (start < floor) 285 return; 286 if (ceiling) { 287 ceiling &= PGDIR_MASK; 288 if (!ceiling) 289 return; 290 } 291 if (end - 1 > ceiling - 1) 292 return; 293 294 p4d = p4d_offset(pgd, start); 295 pgd_clear(pgd); 296 p4d_free_tlb(tlb, p4d, start); 297 } 298 299 /* 300 * This function frees user-level page tables of a process. 301 */ 302 void free_pgd_range(struct mmu_gather *tlb, 303 unsigned long addr, unsigned long end, 304 unsigned long floor, unsigned long ceiling) 305 { 306 pgd_t *pgd; 307 unsigned long next; 308 309 /* 310 * The next few lines have given us lots of grief... 311 * 312 * Why are we testing PMD* at this top level? Because often 313 * there will be no work to do at all, and we'd prefer not to 314 * go all the way down to the bottom just to discover that. 315 * 316 * Why all these "- 1"s? Because 0 represents both the bottom 317 * of the address space and the top of it (using -1 for the 318 * top wouldn't help much: the masks would do the wrong thing). 319 * The rule is that addr 0 and floor 0 refer to the bottom of 320 * the address space, but end 0 and ceiling 0 refer to the top 321 * Comparisons need to use "end - 1" and "ceiling - 1" (though 322 * that end 0 case should be mythical). 323 * 324 * Wherever addr is brought up or ceiling brought down, we must 325 * be careful to reject "the opposite 0" before it confuses the 326 * subsequent tests. But what about where end is brought down 327 * by PMD_SIZE below? no, end can't go down to 0 there. 328 * 329 * Whereas we round start (addr) and ceiling down, by different 330 * masks at different levels, in order to test whether a table 331 * now has no other vmas using it, so can be freed, we don't 332 * bother to round floor or end up - the tests don't need that. 333 */ 334 335 addr &= PMD_MASK; 336 if (addr < floor) { 337 addr += PMD_SIZE; 338 if (!addr) 339 return; 340 } 341 if (ceiling) { 342 ceiling &= PMD_MASK; 343 if (!ceiling) 344 return; 345 } 346 if (end - 1 > ceiling - 1) 347 end -= PMD_SIZE; 348 if (addr > end - 1) 349 return; 350 /* 351 * We add page table cache pages with PAGE_SIZE, 352 * (see pte_free_tlb()), flush the tlb if we need 353 */ 354 tlb_change_page_size(tlb, PAGE_SIZE); 355 pgd = pgd_offset(tlb->mm, addr); 356 do { 357 next = pgd_addr_end(addr, end); 358 if (pgd_none_or_clear_bad(pgd)) 359 continue; 360 free_p4d_range(tlb, pgd, addr, next, floor, ceiling); 361 } while (pgd++, addr = next, addr != end); 362 } 363 364 void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, 365 struct vm_area_struct *vma, unsigned long floor, 366 unsigned long ceiling, bool mm_wr_locked) 367 { 368 do { 369 unsigned long addr = vma->vm_start; 370 struct vm_area_struct *next; 371 372 /* 373 * Note: USER_PGTABLES_CEILING may be passed as ceiling and may 374 * be 0. This will underflow and is okay. 375 */ 376 next = mas_find(mas, ceiling - 1); 377 378 /* 379 * Hide vma from rmap and truncate_pagecache before freeing 380 * pgtables 381 */ 382 if (mm_wr_locked) 383 vma_start_write(vma); 384 unlink_anon_vmas(vma); 385 unlink_file_vma(vma); 386 387 if (is_vm_hugetlb_page(vma)) { 388 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, 389 floor, next ? next->vm_start : ceiling); 390 } else { 391 /* 392 * Optimization: gather nearby vmas into one call down 393 */ 394 while (next && next->vm_start <= vma->vm_end + PMD_SIZE 395 && !is_vm_hugetlb_page(next)) { 396 vma = next; 397 next = mas_find(mas, ceiling - 1); 398 if (mm_wr_locked) 399 vma_start_write(vma); 400 unlink_anon_vmas(vma); 401 unlink_file_vma(vma); 402 } 403 free_pgd_range(tlb, addr, vma->vm_end, 404 floor, next ? next->vm_start : ceiling); 405 } 406 vma = next; 407 } while (vma); 408 } 409 410 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte) 411 { 412 spinlock_t *ptl = pmd_lock(mm, pmd); 413 414 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ 415 mm_inc_nr_ptes(mm); 416 /* 417 * Ensure all pte setup (eg. pte page lock and page clearing) are 418 * visible before the pte is made visible to other CPUs by being 419 * put into page tables. 420 * 421 * The other side of the story is the pointer chasing in the page 422 * table walking code (when walking the page table without locking; 423 * ie. most of the time). Fortunately, these data accesses consist 424 * of a chain of data-dependent loads, meaning most CPUs (alpha 425 * being the notable exception) will already guarantee loads are 426 * seen in-order. See the alpha page table accessors for the 427 * smp_rmb() barriers in page table walking code. 428 */ 429 smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ 430 pmd_populate(mm, pmd, *pte); 431 *pte = NULL; 432 } 433 spin_unlock(ptl); 434 } 435 436 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd) 437 { 438 pgtable_t new = pte_alloc_one(mm); 439 if (!new) 440 return -ENOMEM; 441 442 pmd_install(mm, pmd, &new); 443 if (new) 444 pte_free(mm, new); 445 return 0; 446 } 447 448 int __pte_alloc_kernel(pmd_t *pmd) 449 { 450 pte_t *new = pte_alloc_one_kernel(&init_mm); 451 if (!new) 452 return -ENOMEM; 453 454 spin_lock(&init_mm.page_table_lock); 455 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ 456 smp_wmb(); /* See comment in pmd_install() */ 457 pmd_populate_kernel(&init_mm, pmd, new); 458 new = NULL; 459 } 460 spin_unlock(&init_mm.page_table_lock); 461 if (new) 462 pte_free_kernel(&init_mm, new); 463 return 0; 464 } 465 466 static inline void init_rss_vec(int *rss) 467 { 468 memset(rss, 0, sizeof(int) * NR_MM_COUNTERS); 469 } 470 471 static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss) 472 { 473 int i; 474 475 for (i = 0; i < NR_MM_COUNTERS; i++) 476 if (rss[i]) 477 add_mm_counter(mm, i, rss[i]); 478 } 479 480 /* 481 * This function is called to print an error when a bad pte 482 * is found. For example, we might have a PFN-mapped pte in 483 * a region that doesn't allow it. 484 * 485 * The calling function must still handle the error. 486 */ 487 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, 488 pte_t pte, struct page *page) 489 { 490 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); 491 p4d_t *p4d = p4d_offset(pgd, addr); 492 pud_t *pud = pud_offset(p4d, addr); 493 pmd_t *pmd = pmd_offset(pud, addr); 494 struct address_space *mapping; 495 pgoff_t index; 496 static unsigned long resume; 497 static unsigned long nr_shown; 498 static unsigned long nr_unshown; 499 500 /* 501 * Allow a burst of 60 reports, then keep quiet for that minute; 502 * or allow a steady drip of one report per second. 503 */ 504 if (nr_shown == 60) { 505 if (time_before(jiffies, resume)) { 506 nr_unshown++; 507 return; 508 } 509 if (nr_unshown) { 510 pr_alert("BUG: Bad page map: %lu messages suppressed\n", 511 nr_unshown); 512 nr_unshown = 0; 513 } 514 nr_shown = 0; 515 } 516 if (nr_shown++ == 0) 517 resume = jiffies + 60 * HZ; 518 519 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; 520 index = linear_page_index(vma, addr); 521 522 pr_alert("BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n", 523 current->comm, 524 (long long)pte_val(pte), (long long)pmd_val(*pmd)); 525 if (page) 526 dump_page(page, "bad pte"); 527 pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n", 528 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); 529 pr_alert("file:%pD fault:%ps mmap:%ps read_folio:%ps\n", 530 vma->vm_file, 531 vma->vm_ops ? vma->vm_ops->fault : NULL, 532 vma->vm_file ? vma->vm_file->f_op->mmap : NULL, 533 mapping ? mapping->a_ops->read_folio : NULL); 534 dump_stack(); 535 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 536 } 537 538 /* 539 * vm_normal_page -- This function gets the "struct page" associated with a pte. 540 * 541 * "Special" mappings do not wish to be associated with a "struct page" (either 542 * it doesn't exist, or it exists but they don't want to touch it). In this 543 * case, NULL is returned here. "Normal" mappings do have a struct page. 544 * 545 * There are 2 broad cases. Firstly, an architecture may define a pte_special() 546 * pte bit, in which case this function is trivial. Secondly, an architecture 547 * may not have a spare pte bit, which requires a more complicated scheme, 548 * described below. 549 * 550 * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a 551 * special mapping (even if there are underlying and valid "struct pages"). 552 * COWed pages of a VM_PFNMAP are always normal. 553 * 554 * The way we recognize COWed pages within VM_PFNMAP mappings is through the 555 * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit 556 * set, and the vm_pgoff will point to the first PFN mapped: thus every special 557 * mapping will always honor the rule 558 * 559 * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT) 560 * 561 * And for normal mappings this is false. 562 * 563 * This restricts such mappings to be a linear translation from virtual address 564 * to pfn. To get around this restriction, we allow arbitrary mappings so long 565 * as the vma is not a COW mapping; in that case, we know that all ptes are 566 * special (because none can have been COWed). 567 * 568 * 569 * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP. 570 * 571 * VM_MIXEDMAP mappings can likewise contain memory with or without "struct 572 * page" backing, however the difference is that _all_ pages with a struct 573 * page (that is, those where pfn_valid is true) are refcounted and considered 574 * normal pages by the VM. The disadvantage is that pages are refcounted 575 * (which can be slower and simply not an option for some PFNMAP users). The 576 * advantage is that we don't have to follow the strict linearity rule of 577 * PFNMAP mappings in order to support COWable mappings. 578 * 579 */ 580 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 581 pte_t pte) 582 { 583 unsigned long pfn = pte_pfn(pte); 584 585 if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) { 586 if (likely(!pte_special(pte))) 587 goto check_pfn; 588 if (vma->vm_ops && vma->vm_ops->find_special_page) 589 return vma->vm_ops->find_special_page(vma, addr); 590 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) 591 return NULL; 592 if (is_zero_pfn(pfn)) 593 return NULL; 594 if (pte_devmap(pte)) 595 /* 596 * NOTE: New users of ZONE_DEVICE will not set pte_devmap() 597 * and will have refcounts incremented on their struct pages 598 * when they are inserted into PTEs, thus they are safe to 599 * return here. Legacy ZONE_DEVICE pages that set pte_devmap() 600 * do not have refcounts. Example of legacy ZONE_DEVICE is 601 * MEMORY_DEVICE_FS_DAX type in pmem or virtio_fs drivers. 602 */ 603 return NULL; 604 605 print_bad_pte(vma, addr, pte, NULL); 606 return NULL; 607 } 608 609 /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */ 610 611 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { 612 if (vma->vm_flags & VM_MIXEDMAP) { 613 if (!pfn_valid(pfn)) 614 return NULL; 615 goto out; 616 } else { 617 unsigned long off; 618 off = (addr - vma->vm_start) >> PAGE_SHIFT; 619 if (pfn == vma->vm_pgoff + off) 620 return NULL; 621 if (!is_cow_mapping(vma->vm_flags)) 622 return NULL; 623 } 624 } 625 626 if (is_zero_pfn(pfn)) 627 return NULL; 628 629 check_pfn: 630 if (unlikely(pfn > highest_memmap_pfn)) { 631 print_bad_pte(vma, addr, pte, NULL); 632 return NULL; 633 } 634 635 /* 636 * NOTE! We still have PageReserved() pages in the page tables. 637 * eg. VDSO mappings can cause them to exist. 638 */ 639 out: 640 return pfn_to_page(pfn); 641 } 642 643 struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr, 644 pte_t pte) 645 { 646 struct page *page = vm_normal_page(vma, addr, pte); 647 648 if (page) 649 return page_folio(page); 650 return NULL; 651 } 652 653 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 654 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, 655 pmd_t pmd) 656 { 657 unsigned long pfn = pmd_pfn(pmd); 658 659 /* 660 * There is no pmd_special() but there may be special pmds, e.g. 661 * in a direct-access (dax) mapping, so let's just replicate the 662 * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here. 663 */ 664 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { 665 if (vma->vm_flags & VM_MIXEDMAP) { 666 if (!pfn_valid(pfn)) 667 return NULL; 668 goto out; 669 } else { 670 unsigned long off; 671 off = (addr - vma->vm_start) >> PAGE_SHIFT; 672 if (pfn == vma->vm_pgoff + off) 673 return NULL; 674 if (!is_cow_mapping(vma->vm_flags)) 675 return NULL; 676 } 677 } 678 679 if (pmd_devmap(pmd)) 680 return NULL; 681 if (is_huge_zero_pmd(pmd)) 682 return NULL; 683 if (unlikely(pfn > highest_memmap_pfn)) 684 return NULL; 685 686 /* 687 * NOTE! We still have PageReserved() pages in the page tables. 688 * eg. VDSO mappings can cause them to exist. 689 */ 690 out: 691 return pfn_to_page(pfn); 692 } 693 694 struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma, 695 unsigned long addr, pmd_t pmd) 696 { 697 struct page *page = vm_normal_page_pmd(vma, addr, pmd); 698 699 if (page) 700 return page_folio(page); 701 return NULL; 702 } 703 #endif 704 705 static void restore_exclusive_pte(struct vm_area_struct *vma, 706 struct page *page, unsigned long address, 707 pte_t *ptep) 708 { 709 pte_t orig_pte; 710 pte_t pte; 711 swp_entry_t entry; 712 713 orig_pte = ptep_get(ptep); 714 pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot))); 715 if (pte_swp_soft_dirty(orig_pte)) 716 pte = pte_mksoft_dirty(pte); 717 718 entry = pte_to_swp_entry(orig_pte); 719 if (pte_swp_uffd_wp(orig_pte)) 720 pte = pte_mkuffd_wp(pte); 721 else if (is_writable_device_exclusive_entry(entry)) 722 pte = maybe_mkwrite(pte_mkdirty(pte), vma); 723 724 VM_BUG_ON(pte_write(pte) && !(PageAnon(page) && PageAnonExclusive(page))); 725 726 /* 727 * No need to take a page reference as one was already 728 * created when the swap entry was made. 729 */ 730 if (PageAnon(page)) 731 page_add_anon_rmap(page, vma, address, RMAP_NONE); 732 else 733 /* 734 * Currently device exclusive access only supports anonymous 735 * memory so the entry shouldn't point to a filebacked page. 736 */ 737 WARN_ON_ONCE(1); 738 739 set_pte_at(vma->vm_mm, address, ptep, pte); 740 741 /* 742 * No need to invalidate - it was non-present before. However 743 * secondary CPUs may have mappings that need invalidating. 744 */ 745 update_mmu_cache(vma, address, ptep); 746 } 747 748 /* 749 * Tries to restore an exclusive pte if the page lock can be acquired without 750 * sleeping. 751 */ 752 static int 753 try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma, 754 unsigned long addr) 755 { 756 swp_entry_t entry = pte_to_swp_entry(ptep_get(src_pte)); 757 struct page *page = pfn_swap_entry_to_page(entry); 758 759 if (trylock_page(page)) { 760 restore_exclusive_pte(vma, page, addr, src_pte); 761 unlock_page(page); 762 return 0; 763 } 764 765 return -EBUSY; 766 } 767 768 /* 769 * copy one vm_area from one task to the other. Assumes the page tables 770 * already present in the new task to be cleared in the whole range 771 * covered by this vma. 772 */ 773 774 static unsigned long 775 copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, 776 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma, 777 struct vm_area_struct *src_vma, unsigned long addr, int *rss) 778 { 779 unsigned long vm_flags = dst_vma->vm_flags; 780 pte_t orig_pte = ptep_get(src_pte); 781 pte_t pte = orig_pte; 782 struct page *page; 783 swp_entry_t entry = pte_to_swp_entry(orig_pte); 784 785 if (likely(!non_swap_entry(entry))) { 786 if (swap_duplicate(entry) < 0) 787 return -EIO; 788 789 /* make sure dst_mm is on swapoff's mmlist. */ 790 if (unlikely(list_empty(&dst_mm->mmlist))) { 791 spin_lock(&mmlist_lock); 792 if (list_empty(&dst_mm->mmlist)) 793 list_add(&dst_mm->mmlist, 794 &src_mm->mmlist); 795 spin_unlock(&mmlist_lock); 796 } 797 /* Mark the swap entry as shared. */ 798 if (pte_swp_exclusive(orig_pte)) { 799 pte = pte_swp_clear_exclusive(orig_pte); 800 set_pte_at(src_mm, addr, src_pte, pte); 801 } 802 rss[MM_SWAPENTS]++; 803 } else if (is_migration_entry(entry)) { 804 page = pfn_swap_entry_to_page(entry); 805 806 rss[mm_counter(page)]++; 807 808 if (!is_readable_migration_entry(entry) && 809 is_cow_mapping(vm_flags)) { 810 /* 811 * COW mappings require pages in both parent and child 812 * to be set to read. A previously exclusive entry is 813 * now shared. 814 */ 815 entry = make_readable_migration_entry( 816 swp_offset(entry)); 817 pte = swp_entry_to_pte(entry); 818 if (pte_swp_soft_dirty(orig_pte)) 819 pte = pte_swp_mksoft_dirty(pte); 820 if (pte_swp_uffd_wp(orig_pte)) 821 pte = pte_swp_mkuffd_wp(pte); 822 set_pte_at(src_mm, addr, src_pte, pte); 823 } 824 } else if (is_device_private_entry(entry)) { 825 page = pfn_swap_entry_to_page(entry); 826 827 /* 828 * Update rss count even for unaddressable pages, as 829 * they should treated just like normal pages in this 830 * respect. 831 * 832 * We will likely want to have some new rss counters 833 * for unaddressable pages, at some point. But for now 834 * keep things as they are. 835 */ 836 get_page(page); 837 rss[mm_counter(page)]++; 838 /* Cannot fail as these pages cannot get pinned. */ 839 BUG_ON(page_try_dup_anon_rmap(page, false, src_vma)); 840 841 /* 842 * We do not preserve soft-dirty information, because so 843 * far, checkpoint/restore is the only feature that 844 * requires that. And checkpoint/restore does not work 845 * when a device driver is involved (you cannot easily 846 * save and restore device driver state). 847 */ 848 if (is_writable_device_private_entry(entry) && 849 is_cow_mapping(vm_flags)) { 850 entry = make_readable_device_private_entry( 851 swp_offset(entry)); 852 pte = swp_entry_to_pte(entry); 853 if (pte_swp_uffd_wp(orig_pte)) 854 pte = pte_swp_mkuffd_wp(pte); 855 set_pte_at(src_mm, addr, src_pte, pte); 856 } 857 } else if (is_device_exclusive_entry(entry)) { 858 /* 859 * Make device exclusive entries present by restoring the 860 * original entry then copying as for a present pte. Device 861 * exclusive entries currently only support private writable 862 * (ie. COW) mappings. 863 */ 864 VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags)); 865 if (try_restore_exclusive_pte(src_pte, src_vma, addr)) 866 return -EBUSY; 867 return -ENOENT; 868 } else if (is_pte_marker_entry(entry)) { 869 pte_marker marker = copy_pte_marker(entry, dst_vma); 870 871 if (marker) 872 set_pte_at(dst_mm, addr, dst_pte, 873 make_pte_marker(marker)); 874 return 0; 875 } 876 if (!userfaultfd_wp(dst_vma)) 877 pte = pte_swp_clear_uffd_wp(pte); 878 set_pte_at(dst_mm, addr, dst_pte, pte); 879 return 0; 880 } 881 882 /* 883 * Copy a present and normal page. 884 * 885 * NOTE! The usual case is that this isn't required; 886 * instead, the caller can just increase the page refcount 887 * and re-use the pte the traditional way. 888 * 889 * And if we need a pre-allocated page but don't yet have 890 * one, return a negative error to let the preallocation 891 * code know so that it can do so outside the page table 892 * lock. 893 */ 894 static inline int 895 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 896 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss, 897 struct folio **prealloc, struct page *page) 898 { 899 struct folio *new_folio; 900 pte_t pte; 901 902 new_folio = *prealloc; 903 if (!new_folio) 904 return -EAGAIN; 905 906 /* 907 * We have a prealloc page, all good! Take it 908 * over and copy the page & arm it. 909 */ 910 *prealloc = NULL; 911 copy_user_highpage(&new_folio->page, page, addr, src_vma); 912 __folio_mark_uptodate(new_folio); 913 folio_add_new_anon_rmap(new_folio, dst_vma, addr); 914 folio_add_lru_vma(new_folio, dst_vma); 915 rss[MM_ANONPAGES]++; 916 917 /* All done, just insert the new page copy in the child */ 918 pte = mk_pte(&new_folio->page, dst_vma->vm_page_prot); 919 pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma); 920 if (userfaultfd_pte_wp(dst_vma, ptep_get(src_pte))) 921 /* Uffd-wp needs to be delivered to dest pte as well */ 922 pte = pte_mkuffd_wp(pte); 923 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); 924 return 0; 925 } 926 927 /* 928 * Copy one pte. Returns 0 if succeeded, or -EAGAIN if one preallocated page 929 * is required to copy this pte. 930 */ 931 static inline int 932 copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 933 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss, 934 struct folio **prealloc) 935 { 936 struct mm_struct *src_mm = src_vma->vm_mm; 937 unsigned long vm_flags = src_vma->vm_flags; 938 pte_t pte = ptep_get(src_pte); 939 struct page *page; 940 struct folio *folio; 941 942 page = vm_normal_page(src_vma, addr, pte); 943 if (page) 944 folio = page_folio(page); 945 if (page && folio_test_anon(folio)) { 946 /* 947 * If this page may have been pinned by the parent process, 948 * copy the page immediately for the child so that we'll always 949 * guarantee the pinned page won't be randomly replaced in the 950 * future. 951 */ 952 folio_get(folio); 953 if (unlikely(page_try_dup_anon_rmap(page, false, src_vma))) { 954 /* Page may be pinned, we have to copy. */ 955 folio_put(folio); 956 return copy_present_page(dst_vma, src_vma, dst_pte, src_pte, 957 addr, rss, prealloc, page); 958 } 959 rss[MM_ANONPAGES]++; 960 } else if (page) { 961 folio_get(folio); 962 page_dup_file_rmap(page, false); 963 rss[mm_counter_file(page)]++; 964 } 965 966 /* 967 * If it's a COW mapping, write protect it both 968 * in the parent and the child 969 */ 970 if (is_cow_mapping(vm_flags) && pte_write(pte)) { 971 ptep_set_wrprotect(src_mm, addr, src_pte); 972 pte = pte_wrprotect(pte); 973 } 974 VM_BUG_ON(page && folio_test_anon(folio) && PageAnonExclusive(page)); 975 976 /* 977 * If it's a shared mapping, mark it clean in 978 * the child 979 */ 980 if (vm_flags & VM_SHARED) 981 pte = pte_mkclean(pte); 982 pte = pte_mkold(pte); 983 984 if (!userfaultfd_wp(dst_vma)) 985 pte = pte_clear_uffd_wp(pte); 986 987 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); 988 return 0; 989 } 990 991 static inline struct folio *page_copy_prealloc(struct mm_struct *src_mm, 992 struct vm_area_struct *vma, unsigned long addr) 993 { 994 struct folio *new_folio; 995 996 new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false); 997 if (!new_folio) 998 return NULL; 999 1000 if (mem_cgroup_charge(new_folio, src_mm, GFP_KERNEL)) { 1001 folio_put(new_folio); 1002 return NULL; 1003 } 1004 folio_throttle_swaprate(new_folio, GFP_KERNEL); 1005 1006 return new_folio; 1007 } 1008 1009 static int 1010 copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 1011 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 1012 unsigned long end) 1013 { 1014 struct mm_struct *dst_mm = dst_vma->vm_mm; 1015 struct mm_struct *src_mm = src_vma->vm_mm; 1016 pte_t *orig_src_pte, *orig_dst_pte; 1017 pte_t *src_pte, *dst_pte; 1018 pte_t ptent; 1019 spinlock_t *src_ptl, *dst_ptl; 1020 int progress, ret = 0; 1021 int rss[NR_MM_COUNTERS]; 1022 swp_entry_t entry = (swp_entry_t){0}; 1023 struct folio *prealloc = NULL; 1024 1025 again: 1026 progress = 0; 1027 init_rss_vec(rss); 1028 1029 /* 1030 * copy_pmd_range()'s prior pmd_none_or_clear_bad(src_pmd), and the 1031 * error handling here, assume that exclusive mmap_lock on dst and src 1032 * protects anon from unexpected THP transitions; with shmem and file 1033 * protected by mmap_lock-less collapse skipping areas with anon_vma 1034 * (whereas vma_needs_copy() skips areas without anon_vma). A rework 1035 * can remove such assumptions later, but this is good enough for now. 1036 */ 1037 dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); 1038 if (!dst_pte) { 1039 ret = -ENOMEM; 1040 goto out; 1041 } 1042 src_pte = pte_offset_map_nolock(src_mm, src_pmd, addr, &src_ptl); 1043 if (!src_pte) { 1044 pte_unmap_unlock(dst_pte, dst_ptl); 1045 /* ret == 0 */ 1046 goto out; 1047 } 1048 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 1049 orig_src_pte = src_pte; 1050 orig_dst_pte = dst_pte; 1051 arch_enter_lazy_mmu_mode(); 1052 1053 do { 1054 /* 1055 * We are holding two locks at this point - either of them 1056 * could generate latencies in another task on another CPU. 1057 */ 1058 if (progress >= 32) { 1059 progress = 0; 1060 if (need_resched() || 1061 spin_needbreak(src_ptl) || spin_needbreak(dst_ptl)) 1062 break; 1063 } 1064 ptent = ptep_get(src_pte); 1065 if (pte_none(ptent)) { 1066 progress++; 1067 continue; 1068 } 1069 if (unlikely(!pte_present(ptent))) { 1070 ret = copy_nonpresent_pte(dst_mm, src_mm, 1071 dst_pte, src_pte, 1072 dst_vma, src_vma, 1073 addr, rss); 1074 if (ret == -EIO) { 1075 entry = pte_to_swp_entry(ptep_get(src_pte)); 1076 break; 1077 } else if (ret == -EBUSY) { 1078 break; 1079 } else if (!ret) { 1080 progress += 8; 1081 continue; 1082 } 1083 1084 /* 1085 * Device exclusive entry restored, continue by copying 1086 * the now present pte. 1087 */ 1088 WARN_ON_ONCE(ret != -ENOENT); 1089 } 1090 /* copy_present_pte() will clear `*prealloc' if consumed */ 1091 ret = copy_present_pte(dst_vma, src_vma, dst_pte, src_pte, 1092 addr, rss, &prealloc); 1093 /* 1094 * If we need a pre-allocated page for this pte, drop the 1095 * locks, allocate, and try again. 1096 */ 1097 if (unlikely(ret == -EAGAIN)) 1098 break; 1099 if (unlikely(prealloc)) { 1100 /* 1101 * pre-alloc page cannot be reused by next time so as 1102 * to strictly follow mempolicy (e.g., alloc_page_vma() 1103 * will allocate page according to address). This 1104 * could only happen if one pinned pte changed. 1105 */ 1106 folio_put(prealloc); 1107 prealloc = NULL; 1108 } 1109 progress += 8; 1110 } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); 1111 1112 arch_leave_lazy_mmu_mode(); 1113 pte_unmap_unlock(orig_src_pte, src_ptl); 1114 add_mm_rss_vec(dst_mm, rss); 1115 pte_unmap_unlock(orig_dst_pte, dst_ptl); 1116 cond_resched(); 1117 1118 if (ret == -EIO) { 1119 VM_WARN_ON_ONCE(!entry.val); 1120 if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) { 1121 ret = -ENOMEM; 1122 goto out; 1123 } 1124 entry.val = 0; 1125 } else if (ret == -EBUSY) { 1126 goto out; 1127 } else if (ret == -EAGAIN) { 1128 prealloc = page_copy_prealloc(src_mm, src_vma, addr); 1129 if (!prealloc) 1130 return -ENOMEM; 1131 } else if (ret) { 1132 VM_WARN_ON_ONCE(1); 1133 } 1134 1135 /* We've captured and resolved the error. Reset, try again. */ 1136 ret = 0; 1137 1138 if (addr != end) 1139 goto again; 1140 out: 1141 if (unlikely(prealloc)) 1142 folio_put(prealloc); 1143 return ret; 1144 } 1145 1146 static inline int 1147 copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 1148 pud_t *dst_pud, pud_t *src_pud, unsigned long addr, 1149 unsigned long end) 1150 { 1151 struct mm_struct *dst_mm = dst_vma->vm_mm; 1152 struct mm_struct *src_mm = src_vma->vm_mm; 1153 pmd_t *src_pmd, *dst_pmd; 1154 unsigned long next; 1155 1156 dst_pmd = pmd_alloc(dst_mm, dst_pud, addr); 1157 if (!dst_pmd) 1158 return -ENOMEM; 1159 src_pmd = pmd_offset(src_pud, addr); 1160 do { 1161 next = pmd_addr_end(addr, end); 1162 if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd) 1163 || pmd_devmap(*src_pmd)) { 1164 int err; 1165 VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma); 1166 err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd, 1167 addr, dst_vma, src_vma); 1168 if (err == -ENOMEM) 1169 return -ENOMEM; 1170 if (!err) 1171 continue; 1172 /* fall through */ 1173 } 1174 if (pmd_none_or_clear_bad(src_pmd)) 1175 continue; 1176 if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd, 1177 addr, next)) 1178 return -ENOMEM; 1179 } while (dst_pmd++, src_pmd++, addr = next, addr != end); 1180 return 0; 1181 } 1182 1183 static inline int 1184 copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 1185 p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr, 1186 unsigned long end) 1187 { 1188 struct mm_struct *dst_mm = dst_vma->vm_mm; 1189 struct mm_struct *src_mm = src_vma->vm_mm; 1190 pud_t *src_pud, *dst_pud; 1191 unsigned long next; 1192 1193 dst_pud = pud_alloc(dst_mm, dst_p4d, addr); 1194 if (!dst_pud) 1195 return -ENOMEM; 1196 src_pud = pud_offset(src_p4d, addr); 1197 do { 1198 next = pud_addr_end(addr, end); 1199 if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) { 1200 int err; 1201 1202 VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma); 1203 err = copy_huge_pud(dst_mm, src_mm, 1204 dst_pud, src_pud, addr, src_vma); 1205 if (err == -ENOMEM) 1206 return -ENOMEM; 1207 if (!err) 1208 continue; 1209 /* fall through */ 1210 } 1211 if (pud_none_or_clear_bad(src_pud)) 1212 continue; 1213 if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud, 1214 addr, next)) 1215 return -ENOMEM; 1216 } while (dst_pud++, src_pud++, addr = next, addr != end); 1217 return 0; 1218 } 1219 1220 static inline int 1221 copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 1222 pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr, 1223 unsigned long end) 1224 { 1225 struct mm_struct *dst_mm = dst_vma->vm_mm; 1226 p4d_t *src_p4d, *dst_p4d; 1227 unsigned long next; 1228 1229 dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr); 1230 if (!dst_p4d) 1231 return -ENOMEM; 1232 src_p4d = p4d_offset(src_pgd, addr); 1233 do { 1234 next = p4d_addr_end(addr, end); 1235 if (p4d_none_or_clear_bad(src_p4d)) 1236 continue; 1237 if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d, 1238 addr, next)) 1239 return -ENOMEM; 1240 } while (dst_p4d++, src_p4d++, addr = next, addr != end); 1241 return 0; 1242 } 1243 1244 /* 1245 * Return true if the vma needs to copy the pgtable during this fork(). Return 1246 * false when we can speed up fork() by allowing lazy page faults later until 1247 * when the child accesses the memory range. 1248 */ 1249 static bool 1250 vma_needs_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) 1251 { 1252 /* 1253 * Always copy pgtables when dst_vma has uffd-wp enabled even if it's 1254 * file-backed (e.g. shmem). Because when uffd-wp is enabled, pgtable 1255 * contains uffd-wp protection information, that's something we can't 1256 * retrieve from page cache, and skip copying will lose those info. 1257 */ 1258 if (userfaultfd_wp(dst_vma)) 1259 return true; 1260 1261 if (src_vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) 1262 return true; 1263 1264 if (src_vma->anon_vma) 1265 return true; 1266 1267 /* 1268 * Don't copy ptes where a page fault will fill them correctly. Fork 1269 * becomes much lighter when there are big shared or private readonly 1270 * mappings. The tradeoff is that copy_page_range is more efficient 1271 * than faulting. 1272 */ 1273 return false; 1274 } 1275 1276 int 1277 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) 1278 { 1279 pgd_t *src_pgd, *dst_pgd; 1280 unsigned long next; 1281 unsigned long addr = src_vma->vm_start; 1282 unsigned long end = src_vma->vm_end; 1283 struct mm_struct *dst_mm = dst_vma->vm_mm; 1284 struct mm_struct *src_mm = src_vma->vm_mm; 1285 struct mmu_notifier_range range; 1286 bool is_cow; 1287 int ret; 1288 1289 if (!vma_needs_copy(dst_vma, src_vma)) 1290 return 0; 1291 1292 if (is_vm_hugetlb_page(src_vma)) 1293 return copy_hugetlb_page_range(dst_mm, src_mm, dst_vma, src_vma); 1294 1295 if (unlikely(src_vma->vm_flags & VM_PFNMAP)) { 1296 /* 1297 * We do not free on error cases below as remove_vma 1298 * gets called on error from higher level routine 1299 */ 1300 ret = track_pfn_copy(src_vma); 1301 if (ret) 1302 return ret; 1303 } 1304 1305 /* 1306 * We need to invalidate the secondary MMU mappings only when 1307 * there could be a permission downgrade on the ptes of the 1308 * parent mm. And a permission downgrade will only happen if 1309 * is_cow_mapping() returns true. 1310 */ 1311 is_cow = is_cow_mapping(src_vma->vm_flags); 1312 1313 if (is_cow) { 1314 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 1315 0, src_mm, addr, end); 1316 mmu_notifier_invalidate_range_start(&range); 1317 /* 1318 * Disabling preemption is not needed for the write side, as 1319 * the read side doesn't spin, but goes to the mmap_lock. 1320 * 1321 * Use the raw variant of the seqcount_t write API to avoid 1322 * lockdep complaining about preemptibility. 1323 */ 1324 vma_assert_write_locked(src_vma); 1325 raw_write_seqcount_begin(&src_mm->write_protect_seq); 1326 } 1327 1328 ret = 0; 1329 dst_pgd = pgd_offset(dst_mm, addr); 1330 src_pgd = pgd_offset(src_mm, addr); 1331 do { 1332 next = pgd_addr_end(addr, end); 1333 if (pgd_none_or_clear_bad(src_pgd)) 1334 continue; 1335 if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd, 1336 addr, next))) { 1337 untrack_pfn_clear(dst_vma); 1338 ret = -ENOMEM; 1339 break; 1340 } 1341 } while (dst_pgd++, src_pgd++, addr = next, addr != end); 1342 1343 if (is_cow) { 1344 raw_write_seqcount_end(&src_mm->write_protect_seq); 1345 mmu_notifier_invalidate_range_end(&range); 1346 } 1347 return ret; 1348 } 1349 1350 /* Whether we should zap all COWed (private) pages too */ 1351 static inline bool should_zap_cows(struct zap_details *details) 1352 { 1353 /* By default, zap all pages */ 1354 if (!details) 1355 return true; 1356 1357 /* Or, we zap COWed pages only if the caller wants to */ 1358 return details->even_cows; 1359 } 1360 1361 /* Decides whether we should zap this page with the page pointer specified */ 1362 static inline bool should_zap_page(struct zap_details *details, struct page *page) 1363 { 1364 /* If we can make a decision without *page.. */ 1365 if (should_zap_cows(details)) 1366 return true; 1367 1368 /* E.g. the caller passes NULL for the case of a zero page */ 1369 if (!page) 1370 return true; 1371 1372 /* Otherwise we should only zap non-anon pages */ 1373 return !PageAnon(page); 1374 } 1375 1376 static inline bool zap_drop_file_uffd_wp(struct zap_details *details) 1377 { 1378 if (!details) 1379 return false; 1380 1381 return details->zap_flags & ZAP_FLAG_DROP_MARKER; 1382 } 1383 1384 /* 1385 * This function makes sure that we'll replace the none pte with an uffd-wp 1386 * swap special pte marker when necessary. Must be with the pgtable lock held. 1387 */ 1388 static inline void 1389 zap_install_uffd_wp_if_needed(struct vm_area_struct *vma, 1390 unsigned long addr, pte_t *pte, 1391 struct zap_details *details, pte_t pteval) 1392 { 1393 /* Zap on anonymous always means dropping everything */ 1394 if (vma_is_anonymous(vma)) 1395 return; 1396 1397 if (zap_drop_file_uffd_wp(details)) 1398 return; 1399 1400 pte_install_uffd_wp_if_needed(vma, addr, pte, pteval); 1401 } 1402 1403 static unsigned long zap_pte_range(struct mmu_gather *tlb, 1404 struct vm_area_struct *vma, pmd_t *pmd, 1405 unsigned long addr, unsigned long end, 1406 struct zap_details *details) 1407 { 1408 struct mm_struct *mm = tlb->mm; 1409 int force_flush = 0; 1410 int rss[NR_MM_COUNTERS]; 1411 spinlock_t *ptl; 1412 pte_t *start_pte; 1413 pte_t *pte; 1414 swp_entry_t entry; 1415 1416 tlb_change_page_size(tlb, PAGE_SIZE); 1417 init_rss_vec(rss); 1418 start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 1419 if (!pte) 1420 return addr; 1421 1422 flush_tlb_batched_pending(mm); 1423 arch_enter_lazy_mmu_mode(); 1424 do { 1425 pte_t ptent = ptep_get(pte); 1426 struct page *page; 1427 1428 if (pte_none(ptent)) 1429 continue; 1430 1431 if (need_resched()) 1432 break; 1433 1434 if (pte_present(ptent)) { 1435 unsigned int delay_rmap; 1436 1437 page = vm_normal_page(vma, addr, ptent); 1438 if (unlikely(!should_zap_page(details, page))) 1439 continue; 1440 ptent = ptep_get_and_clear_full(mm, addr, pte, 1441 tlb->fullmm); 1442 arch_check_zapped_pte(vma, ptent); 1443 tlb_remove_tlb_entry(tlb, pte, addr); 1444 zap_install_uffd_wp_if_needed(vma, addr, pte, details, 1445 ptent); 1446 if (unlikely(!page)) { 1447 ksm_might_unmap_zero_page(mm, ptent); 1448 continue; 1449 } 1450 1451 delay_rmap = 0; 1452 if (!PageAnon(page)) { 1453 if (pte_dirty(ptent)) { 1454 set_page_dirty(page); 1455 if (tlb_delay_rmap(tlb)) { 1456 delay_rmap = 1; 1457 force_flush = 1; 1458 } 1459 } 1460 if (pte_young(ptent) && likely(vma_has_recency(vma))) 1461 mark_page_accessed(page); 1462 } 1463 rss[mm_counter(page)]--; 1464 if (!delay_rmap) { 1465 page_remove_rmap(page, vma, false); 1466 if (unlikely(page_mapcount(page) < 0)) 1467 print_bad_pte(vma, addr, ptent, page); 1468 } 1469 if (unlikely(__tlb_remove_page(tlb, page, delay_rmap))) { 1470 force_flush = 1; 1471 addr += PAGE_SIZE; 1472 break; 1473 } 1474 continue; 1475 } 1476 1477 entry = pte_to_swp_entry(ptent); 1478 if (is_device_private_entry(entry) || 1479 is_device_exclusive_entry(entry)) { 1480 page = pfn_swap_entry_to_page(entry); 1481 if (unlikely(!should_zap_page(details, page))) 1482 continue; 1483 /* 1484 * Both device private/exclusive mappings should only 1485 * work with anonymous page so far, so we don't need to 1486 * consider uffd-wp bit when zap. For more information, 1487 * see zap_install_uffd_wp_if_needed(). 1488 */ 1489 WARN_ON_ONCE(!vma_is_anonymous(vma)); 1490 rss[mm_counter(page)]--; 1491 if (is_device_private_entry(entry)) 1492 page_remove_rmap(page, vma, false); 1493 put_page(page); 1494 } else if (!non_swap_entry(entry)) { 1495 /* Genuine swap entry, hence a private anon page */ 1496 if (!should_zap_cows(details)) 1497 continue; 1498 rss[MM_SWAPENTS]--; 1499 if (unlikely(!free_swap_and_cache(entry))) 1500 print_bad_pte(vma, addr, ptent, NULL); 1501 } else if (is_migration_entry(entry)) { 1502 page = pfn_swap_entry_to_page(entry); 1503 if (!should_zap_page(details, page)) 1504 continue; 1505 rss[mm_counter(page)]--; 1506 } else if (pte_marker_entry_uffd_wp(entry)) { 1507 /* 1508 * For anon: always drop the marker; for file: only 1509 * drop the marker if explicitly requested. 1510 */ 1511 if (!vma_is_anonymous(vma) && 1512 !zap_drop_file_uffd_wp(details)) 1513 continue; 1514 } else if (is_hwpoison_entry(entry) || 1515 is_poisoned_swp_entry(entry)) { 1516 if (!should_zap_cows(details)) 1517 continue; 1518 } else { 1519 /* We should have covered all the swap entry types */ 1520 pr_alert("unrecognized swap entry 0x%lx\n", entry.val); 1521 WARN_ON_ONCE(1); 1522 } 1523 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 1524 zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent); 1525 } while (pte++, addr += PAGE_SIZE, addr != end); 1526 1527 add_mm_rss_vec(mm, rss); 1528 arch_leave_lazy_mmu_mode(); 1529 1530 /* Do the actual TLB flush before dropping ptl */ 1531 if (force_flush) { 1532 tlb_flush_mmu_tlbonly(tlb); 1533 tlb_flush_rmaps(tlb, vma); 1534 } 1535 pte_unmap_unlock(start_pte, ptl); 1536 1537 /* 1538 * If we forced a TLB flush (either due to running out of 1539 * batch buffers or because we needed to flush dirty TLB 1540 * entries before releasing the ptl), free the batched 1541 * memory too. Come back again if we didn't do everything. 1542 */ 1543 if (force_flush) 1544 tlb_flush_mmu(tlb); 1545 1546 return addr; 1547 } 1548 1549 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, 1550 struct vm_area_struct *vma, pud_t *pud, 1551 unsigned long addr, unsigned long end, 1552 struct zap_details *details) 1553 { 1554 pmd_t *pmd; 1555 unsigned long next; 1556 1557 pmd = pmd_offset(pud, addr); 1558 do { 1559 next = pmd_addr_end(addr, end); 1560 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { 1561 if (next - addr != HPAGE_PMD_SIZE) 1562 __split_huge_pmd(vma, pmd, addr, false, NULL); 1563 else if (zap_huge_pmd(tlb, vma, pmd, addr)) { 1564 addr = next; 1565 continue; 1566 } 1567 /* fall through */ 1568 } else if (details && details->single_folio && 1569 folio_test_pmd_mappable(details->single_folio) && 1570 next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) { 1571 spinlock_t *ptl = pmd_lock(tlb->mm, pmd); 1572 /* 1573 * Take and drop THP pmd lock so that we cannot return 1574 * prematurely, while zap_huge_pmd() has cleared *pmd, 1575 * but not yet decremented compound_mapcount(). 1576 */ 1577 spin_unlock(ptl); 1578 } 1579 if (pmd_none(*pmd)) { 1580 addr = next; 1581 continue; 1582 } 1583 addr = zap_pte_range(tlb, vma, pmd, addr, next, details); 1584 if (addr != next) 1585 pmd--; 1586 } while (pmd++, cond_resched(), addr != end); 1587 1588 return addr; 1589 } 1590 1591 static inline unsigned long zap_pud_range(struct mmu_gather *tlb, 1592 struct vm_area_struct *vma, p4d_t *p4d, 1593 unsigned long addr, unsigned long end, 1594 struct zap_details *details) 1595 { 1596 pud_t *pud; 1597 unsigned long next; 1598 1599 pud = pud_offset(p4d, addr); 1600 do { 1601 next = pud_addr_end(addr, end); 1602 if (pud_trans_huge(*pud) || pud_devmap(*pud)) { 1603 if (next - addr != HPAGE_PUD_SIZE) { 1604 mmap_assert_locked(tlb->mm); 1605 split_huge_pud(vma, pud, addr); 1606 } else if (zap_huge_pud(tlb, vma, pud, addr)) 1607 goto next; 1608 /* fall through */ 1609 } 1610 if (pud_none_or_clear_bad(pud)) 1611 continue; 1612 next = zap_pmd_range(tlb, vma, pud, addr, next, details); 1613 next: 1614 cond_resched(); 1615 } while (pud++, addr = next, addr != end); 1616 1617 return addr; 1618 } 1619 1620 static inline unsigned long zap_p4d_range(struct mmu_gather *tlb, 1621 struct vm_area_struct *vma, pgd_t *pgd, 1622 unsigned long addr, unsigned long end, 1623 struct zap_details *details) 1624 { 1625 p4d_t *p4d; 1626 unsigned long next; 1627 1628 p4d = p4d_offset(pgd, addr); 1629 do { 1630 next = p4d_addr_end(addr, end); 1631 if (p4d_none_or_clear_bad(p4d)) 1632 continue; 1633 next = zap_pud_range(tlb, vma, p4d, addr, next, details); 1634 } while (p4d++, addr = next, addr != end); 1635 1636 return addr; 1637 } 1638 1639 void unmap_page_range(struct mmu_gather *tlb, 1640 struct vm_area_struct *vma, 1641 unsigned long addr, unsigned long end, 1642 struct zap_details *details) 1643 { 1644 pgd_t *pgd; 1645 unsigned long next; 1646 1647 BUG_ON(addr >= end); 1648 tlb_start_vma(tlb, vma); 1649 pgd = pgd_offset(vma->vm_mm, addr); 1650 do { 1651 next = pgd_addr_end(addr, end); 1652 if (pgd_none_or_clear_bad(pgd)) 1653 continue; 1654 next = zap_p4d_range(tlb, vma, pgd, addr, next, details); 1655 } while (pgd++, addr = next, addr != end); 1656 tlb_end_vma(tlb, vma); 1657 } 1658 1659 1660 static void unmap_single_vma(struct mmu_gather *tlb, 1661 struct vm_area_struct *vma, unsigned long start_addr, 1662 unsigned long end_addr, 1663 struct zap_details *details, bool mm_wr_locked) 1664 { 1665 unsigned long start = max(vma->vm_start, start_addr); 1666 unsigned long end; 1667 1668 if (start >= vma->vm_end) 1669 return; 1670 end = min(vma->vm_end, end_addr); 1671 if (end <= vma->vm_start) 1672 return; 1673 1674 if (vma->vm_file) 1675 uprobe_munmap(vma, start, end); 1676 1677 if (unlikely(vma->vm_flags & VM_PFNMAP)) 1678 untrack_pfn(vma, 0, 0, mm_wr_locked); 1679 1680 if (start != end) { 1681 if (unlikely(is_vm_hugetlb_page(vma))) { 1682 /* 1683 * It is undesirable to test vma->vm_file as it 1684 * should be non-null for valid hugetlb area. 1685 * However, vm_file will be NULL in the error 1686 * cleanup path of mmap_region. When 1687 * hugetlbfs ->mmap method fails, 1688 * mmap_region() nullifies vma->vm_file 1689 * before calling this function to clean up. 1690 * Since no pte has actually been setup, it is 1691 * safe to do nothing in this case. 1692 */ 1693 if (vma->vm_file) { 1694 zap_flags_t zap_flags = details ? 1695 details->zap_flags : 0; 1696 __unmap_hugepage_range(tlb, vma, start, end, 1697 NULL, zap_flags); 1698 } 1699 } else 1700 unmap_page_range(tlb, vma, start, end, details); 1701 } 1702 } 1703 1704 /** 1705 * unmap_vmas - unmap a range of memory covered by a list of vma's 1706 * @tlb: address of the caller's struct mmu_gather 1707 * @mas: the maple state 1708 * @vma: the starting vma 1709 * @start_addr: virtual address at which to start unmapping 1710 * @end_addr: virtual address at which to end unmapping 1711 * @tree_end: The maximum index to check 1712 * @mm_wr_locked: lock flag 1713 * 1714 * Unmap all pages in the vma list. 1715 * 1716 * Only addresses between `start' and `end' will be unmapped. 1717 * 1718 * The VMA list must be sorted in ascending virtual address order. 1719 * 1720 * unmap_vmas() assumes that the caller will flush the whole unmapped address 1721 * range after unmap_vmas() returns. So the only responsibility here is to 1722 * ensure that any thus-far unmapped pages are flushed before unmap_vmas() 1723 * drops the lock and schedules. 1724 */ 1725 void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas, 1726 struct vm_area_struct *vma, unsigned long start_addr, 1727 unsigned long end_addr, unsigned long tree_end, 1728 bool mm_wr_locked) 1729 { 1730 struct mmu_notifier_range range; 1731 struct zap_details details = { 1732 .zap_flags = ZAP_FLAG_DROP_MARKER | ZAP_FLAG_UNMAP, 1733 /* Careful - we need to zap private pages too! */ 1734 .even_cows = true, 1735 }; 1736 1737 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm, 1738 start_addr, end_addr); 1739 mmu_notifier_invalidate_range_start(&range); 1740 do { 1741 unsigned long start = start_addr; 1742 unsigned long end = end_addr; 1743 hugetlb_zap_begin(vma, &start, &end); 1744 unmap_single_vma(tlb, vma, start, end, &details, 1745 mm_wr_locked); 1746 hugetlb_zap_end(vma, &details); 1747 } while ((vma = mas_find(mas, tree_end - 1)) != NULL); 1748 mmu_notifier_invalidate_range_end(&range); 1749 } 1750 1751 /** 1752 * zap_page_range_single - remove user pages in a given range 1753 * @vma: vm_area_struct holding the applicable pages 1754 * @address: starting address of pages to zap 1755 * @size: number of bytes to zap 1756 * @details: details of shared cache invalidation 1757 * 1758 * The range must fit into one VMA. 1759 */ 1760 void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, 1761 unsigned long size, struct zap_details *details) 1762 { 1763 const unsigned long end = address + size; 1764 struct mmu_notifier_range range; 1765 struct mmu_gather tlb; 1766 1767 lru_add_drain(); 1768 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 1769 address, end); 1770 hugetlb_zap_begin(vma, &range.start, &range.end); 1771 tlb_gather_mmu(&tlb, vma->vm_mm); 1772 update_hiwater_rss(vma->vm_mm); 1773 mmu_notifier_invalidate_range_start(&range); 1774 /* 1775 * unmap 'address-end' not 'range.start-range.end' as range 1776 * could have been expanded for hugetlb pmd sharing. 1777 */ 1778 unmap_single_vma(&tlb, vma, address, end, details, false); 1779 mmu_notifier_invalidate_range_end(&range); 1780 tlb_finish_mmu(&tlb); 1781 hugetlb_zap_end(vma, details); 1782 } 1783 1784 /** 1785 * zap_vma_ptes - remove ptes mapping the vma 1786 * @vma: vm_area_struct holding ptes to be zapped 1787 * @address: starting address of pages to zap 1788 * @size: number of bytes to zap 1789 * 1790 * This function only unmaps ptes assigned to VM_PFNMAP vmas. 1791 * 1792 * The entire address range must be fully contained within the vma. 1793 * 1794 */ 1795 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 1796 unsigned long size) 1797 { 1798 if (!range_in_vma(vma, address, address + size) || 1799 !(vma->vm_flags & VM_PFNMAP)) 1800 return; 1801 1802 zap_page_range_single(vma, address, size, NULL); 1803 } 1804 EXPORT_SYMBOL_GPL(zap_vma_ptes); 1805 1806 static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr) 1807 { 1808 pgd_t *pgd; 1809 p4d_t *p4d; 1810 pud_t *pud; 1811 pmd_t *pmd; 1812 1813 pgd = pgd_offset(mm, addr); 1814 p4d = p4d_alloc(mm, pgd, addr); 1815 if (!p4d) 1816 return NULL; 1817 pud = pud_alloc(mm, p4d, addr); 1818 if (!pud) 1819 return NULL; 1820 pmd = pmd_alloc(mm, pud, addr); 1821 if (!pmd) 1822 return NULL; 1823 1824 VM_BUG_ON(pmd_trans_huge(*pmd)); 1825 return pmd; 1826 } 1827 1828 pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, 1829 spinlock_t **ptl) 1830 { 1831 pmd_t *pmd = walk_to_pmd(mm, addr); 1832 1833 if (!pmd) 1834 return NULL; 1835 return pte_alloc_map_lock(mm, pmd, addr, ptl); 1836 } 1837 1838 static int validate_page_before_insert(struct page *page) 1839 { 1840 if (PageAnon(page) || PageSlab(page) || page_has_type(page)) 1841 return -EINVAL; 1842 flush_dcache_page(page); 1843 return 0; 1844 } 1845 1846 static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte, 1847 unsigned long addr, struct page *page, pgprot_t prot) 1848 { 1849 if (!pte_none(ptep_get(pte))) 1850 return -EBUSY; 1851 /* Ok, finally just insert the thing.. */ 1852 get_page(page); 1853 inc_mm_counter(vma->vm_mm, mm_counter_file(page)); 1854 page_add_file_rmap(page, vma, false); 1855 set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot)); 1856 return 0; 1857 } 1858 1859 /* 1860 * This is the old fallback for page remapping. 1861 * 1862 * For historical reasons, it only allows reserved pages. Only 1863 * old drivers should use this, and they needed to mark their 1864 * pages reserved for the old functions anyway. 1865 */ 1866 static int insert_page(struct vm_area_struct *vma, unsigned long addr, 1867 struct page *page, pgprot_t prot) 1868 { 1869 int retval; 1870 pte_t *pte; 1871 spinlock_t *ptl; 1872 1873 retval = validate_page_before_insert(page); 1874 if (retval) 1875 goto out; 1876 retval = -ENOMEM; 1877 pte = get_locked_pte(vma->vm_mm, addr, &ptl); 1878 if (!pte) 1879 goto out; 1880 retval = insert_page_into_pte_locked(vma, pte, addr, page, prot); 1881 pte_unmap_unlock(pte, ptl); 1882 out: 1883 return retval; 1884 } 1885 1886 static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte, 1887 unsigned long addr, struct page *page, pgprot_t prot) 1888 { 1889 int err; 1890 1891 if (!page_count(page)) 1892 return -EINVAL; 1893 err = validate_page_before_insert(page); 1894 if (err) 1895 return err; 1896 return insert_page_into_pte_locked(vma, pte, addr, page, prot); 1897 } 1898 1899 /* insert_pages() amortizes the cost of spinlock operations 1900 * when inserting pages in a loop. 1901 */ 1902 static int insert_pages(struct vm_area_struct *vma, unsigned long addr, 1903 struct page **pages, unsigned long *num, pgprot_t prot) 1904 { 1905 pmd_t *pmd = NULL; 1906 pte_t *start_pte, *pte; 1907 spinlock_t *pte_lock; 1908 struct mm_struct *const mm = vma->vm_mm; 1909 unsigned long curr_page_idx = 0; 1910 unsigned long remaining_pages_total = *num; 1911 unsigned long pages_to_write_in_pmd; 1912 int ret; 1913 more: 1914 ret = -EFAULT; 1915 pmd = walk_to_pmd(mm, addr); 1916 if (!pmd) 1917 goto out; 1918 1919 pages_to_write_in_pmd = min_t(unsigned long, 1920 remaining_pages_total, PTRS_PER_PTE - pte_index(addr)); 1921 1922 /* Allocate the PTE if necessary; takes PMD lock once only. */ 1923 ret = -ENOMEM; 1924 if (pte_alloc(mm, pmd)) 1925 goto out; 1926 1927 while (pages_to_write_in_pmd) { 1928 int pte_idx = 0; 1929 const int batch_size = min_t(int, pages_to_write_in_pmd, 8); 1930 1931 start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock); 1932 if (!start_pte) { 1933 ret = -EFAULT; 1934 goto out; 1935 } 1936 for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) { 1937 int err = insert_page_in_batch_locked(vma, pte, 1938 addr, pages[curr_page_idx], prot); 1939 if (unlikely(err)) { 1940 pte_unmap_unlock(start_pte, pte_lock); 1941 ret = err; 1942 remaining_pages_total -= pte_idx; 1943 goto out; 1944 } 1945 addr += PAGE_SIZE; 1946 ++curr_page_idx; 1947 } 1948 pte_unmap_unlock(start_pte, pte_lock); 1949 pages_to_write_in_pmd -= batch_size; 1950 remaining_pages_total -= batch_size; 1951 } 1952 if (remaining_pages_total) 1953 goto more; 1954 ret = 0; 1955 out: 1956 *num = remaining_pages_total; 1957 return ret; 1958 } 1959 1960 /** 1961 * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock. 1962 * @vma: user vma to map to 1963 * @addr: target start user address of these pages 1964 * @pages: source kernel pages 1965 * @num: in: number of pages to map. out: number of pages that were *not* 1966 * mapped. (0 means all pages were successfully mapped). 1967 * 1968 * Preferred over vm_insert_page() when inserting multiple pages. 1969 * 1970 * In case of error, we may have mapped a subset of the provided 1971 * pages. It is the caller's responsibility to account for this case. 1972 * 1973 * The same restrictions apply as in vm_insert_page(). 1974 */ 1975 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, 1976 struct page **pages, unsigned long *num) 1977 { 1978 const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1; 1979 1980 if (addr < vma->vm_start || end_addr >= vma->vm_end) 1981 return -EFAULT; 1982 if (!(vma->vm_flags & VM_MIXEDMAP)) { 1983 BUG_ON(mmap_read_trylock(vma->vm_mm)); 1984 BUG_ON(vma->vm_flags & VM_PFNMAP); 1985 vm_flags_set(vma, VM_MIXEDMAP); 1986 } 1987 /* Defer page refcount checking till we're about to map that page. */ 1988 return insert_pages(vma, addr, pages, num, vma->vm_page_prot); 1989 } 1990 EXPORT_SYMBOL(vm_insert_pages); 1991 1992 /** 1993 * vm_insert_page - insert single page into user vma 1994 * @vma: user vma to map to 1995 * @addr: target user address of this page 1996 * @page: source kernel page 1997 * 1998 * This allows drivers to insert individual pages they've allocated 1999 * into a user vma. 2000 * 2001 * The page has to be a nice clean _individual_ kernel allocation. 2002 * If you allocate a compound page, you need to have marked it as 2003 * such (__GFP_COMP), or manually just split the page up yourself 2004 * (see split_page()). 2005 * 2006 * NOTE! Traditionally this was done with "remap_pfn_range()" which 2007 * took an arbitrary page protection parameter. This doesn't allow 2008 * that. Your vma protection will have to be set up correctly, which 2009 * means that if you want a shared writable mapping, you'd better 2010 * ask for a shared writable mapping! 2011 * 2012 * The page does not need to be reserved. 2013 * 2014 * Usually this function is called from f_op->mmap() handler 2015 * under mm->mmap_lock write-lock, so it can change vma->vm_flags. 2016 * Caller must set VM_MIXEDMAP on vma if it wants to call this 2017 * function from other places, for example from page-fault handler. 2018 * 2019 * Return: %0 on success, negative error code otherwise. 2020 */ 2021 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, 2022 struct page *page) 2023 { 2024 if (addr < vma->vm_start || addr >= vma->vm_end) 2025 return -EFAULT; 2026 if (!page_count(page)) 2027 return -EINVAL; 2028 if (!(vma->vm_flags & VM_MIXEDMAP)) { 2029 BUG_ON(mmap_read_trylock(vma->vm_mm)); 2030 BUG_ON(vma->vm_flags & VM_PFNMAP); 2031 vm_flags_set(vma, VM_MIXEDMAP); 2032 } 2033 return insert_page(vma, addr, page, vma->vm_page_prot); 2034 } 2035 EXPORT_SYMBOL(vm_insert_page); 2036 2037 /* 2038 * __vm_map_pages - maps range of kernel pages into user vma 2039 * @vma: user vma to map to 2040 * @pages: pointer to array of source kernel pages 2041 * @num: number of pages in page array 2042 * @offset: user's requested vm_pgoff 2043 * 2044 * This allows drivers to map range of kernel pages into a user vma. 2045 * 2046 * Return: 0 on success and error code otherwise. 2047 */ 2048 static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages, 2049 unsigned long num, unsigned long offset) 2050 { 2051 unsigned long count = vma_pages(vma); 2052 unsigned long uaddr = vma->vm_start; 2053 int ret, i; 2054 2055 /* Fail if the user requested offset is beyond the end of the object */ 2056 if (offset >= num) 2057 return -ENXIO; 2058 2059 /* Fail if the user requested size exceeds available object size */ 2060 if (count > num - offset) 2061 return -ENXIO; 2062 2063 for (i = 0; i < count; i++) { 2064 ret = vm_insert_page(vma, uaddr, pages[offset + i]); 2065 if (ret < 0) 2066 return ret; 2067 uaddr += PAGE_SIZE; 2068 } 2069 2070 return 0; 2071 } 2072 2073 /** 2074 * vm_map_pages - maps range of kernel pages starts with non zero offset 2075 * @vma: user vma to map to 2076 * @pages: pointer to array of source kernel pages 2077 * @num: number of pages in page array 2078 * 2079 * Maps an object consisting of @num pages, catering for the user's 2080 * requested vm_pgoff 2081 * 2082 * If we fail to insert any page into the vma, the function will return 2083 * immediately leaving any previously inserted pages present. Callers 2084 * from the mmap handler may immediately return the error as their caller 2085 * will destroy the vma, removing any successfully inserted pages. Other 2086 * callers should make their own arrangements for calling unmap_region(). 2087 * 2088 * Context: Process context. Called by mmap handlers. 2089 * Return: 0 on success and error code otherwise. 2090 */ 2091 int vm_map_pages(struct vm_area_struct *vma, struct page **pages, 2092 unsigned long num) 2093 { 2094 return __vm_map_pages(vma, pages, num, vma->vm_pgoff); 2095 } 2096 EXPORT_SYMBOL(vm_map_pages); 2097 2098 /** 2099 * vm_map_pages_zero - map range of kernel pages starts with zero offset 2100 * @vma: user vma to map to 2101 * @pages: pointer to array of source kernel pages 2102 * @num: number of pages in page array 2103 * 2104 * Similar to vm_map_pages(), except that it explicitly sets the offset 2105 * to 0. This function is intended for the drivers that did not consider 2106 * vm_pgoff. 2107 * 2108 * Context: Process context. Called by mmap handlers. 2109 * Return: 0 on success and error code otherwise. 2110 */ 2111 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, 2112 unsigned long num) 2113 { 2114 return __vm_map_pages(vma, pages, num, 0); 2115 } 2116 EXPORT_SYMBOL(vm_map_pages_zero); 2117 2118 static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr, 2119 pfn_t pfn, pgprot_t prot, bool mkwrite) 2120 { 2121 struct mm_struct *mm = vma->vm_mm; 2122 pte_t *pte, entry; 2123 spinlock_t *ptl; 2124 2125 pte = get_locked_pte(mm, addr, &ptl); 2126 if (!pte) 2127 return VM_FAULT_OOM; 2128 entry = ptep_get(pte); 2129 if (!pte_none(entry)) { 2130 if (mkwrite) { 2131 /* 2132 * For read faults on private mappings the PFN passed 2133 * in may not match the PFN we have mapped if the 2134 * mapped PFN is a writeable COW page. In the mkwrite 2135 * case we are creating a writable PTE for a shared 2136 * mapping and we expect the PFNs to match. If they 2137 * don't match, we are likely racing with block 2138 * allocation and mapping invalidation so just skip the 2139 * update. 2140 */ 2141 if (pte_pfn(entry) != pfn_t_to_pfn(pfn)) { 2142 WARN_ON_ONCE(!is_zero_pfn(pte_pfn(entry))); 2143 goto out_unlock; 2144 } 2145 entry = pte_mkyoung(entry); 2146 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2147 if (ptep_set_access_flags(vma, addr, pte, entry, 1)) 2148 update_mmu_cache(vma, addr, pte); 2149 } 2150 goto out_unlock; 2151 } 2152 2153 /* Ok, finally just insert the thing.. */ 2154 if (pfn_t_devmap(pfn)) 2155 entry = pte_mkdevmap(pfn_t_pte(pfn, prot)); 2156 else 2157 entry = pte_mkspecial(pfn_t_pte(pfn, prot)); 2158 2159 if (mkwrite) { 2160 entry = pte_mkyoung(entry); 2161 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2162 } 2163 2164 set_pte_at(mm, addr, pte, entry); 2165 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ 2166 2167 out_unlock: 2168 pte_unmap_unlock(pte, ptl); 2169 return VM_FAULT_NOPAGE; 2170 } 2171 2172 /** 2173 * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot 2174 * @vma: user vma to map to 2175 * @addr: target user address of this page 2176 * @pfn: source kernel pfn 2177 * @pgprot: pgprot flags for the inserted page 2178 * 2179 * This is exactly like vmf_insert_pfn(), except that it allows drivers 2180 * to override pgprot on a per-page basis. 2181 * 2182 * This only makes sense for IO mappings, and it makes no sense for 2183 * COW mappings. In general, using multiple vmas is preferable; 2184 * vmf_insert_pfn_prot should only be used if using multiple VMAs is 2185 * impractical. 2186 * 2187 * pgprot typically only differs from @vma->vm_page_prot when drivers set 2188 * caching- and encryption bits different than those of @vma->vm_page_prot, 2189 * because the caching- or encryption mode may not be known at mmap() time. 2190 * 2191 * This is ok as long as @vma->vm_page_prot is not used by the core vm 2192 * to set caching and encryption bits for those vmas (except for COW pages). 2193 * This is ensured by core vm only modifying these page table entries using 2194 * functions that don't touch caching- or encryption bits, using pte_modify() 2195 * if needed. (See for example mprotect()). 2196 * 2197 * Also when new page-table entries are created, this is only done using the 2198 * fault() callback, and never using the value of vma->vm_page_prot, 2199 * except for page-table entries that point to anonymous pages as the result 2200 * of COW. 2201 * 2202 * Context: Process context. May allocate using %GFP_KERNEL. 2203 * Return: vm_fault_t value. 2204 */ 2205 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, 2206 unsigned long pfn, pgprot_t pgprot) 2207 { 2208 /* 2209 * Technically, architectures with pte_special can avoid all these 2210 * restrictions (same for remap_pfn_range). However we would like 2211 * consistency in testing and feature parity among all, so we should 2212 * try to keep these invariants in place for everybody. 2213 */ 2214 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); 2215 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 2216 (VM_PFNMAP|VM_MIXEDMAP)); 2217 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 2218 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); 2219 2220 if (addr < vma->vm_start || addr >= vma->vm_end) 2221 return VM_FAULT_SIGBUS; 2222 2223 if (!pfn_modify_allowed(pfn, pgprot)) 2224 return VM_FAULT_SIGBUS; 2225 2226 track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV)); 2227 2228 return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot, 2229 false); 2230 } 2231 EXPORT_SYMBOL(vmf_insert_pfn_prot); 2232 2233 /** 2234 * vmf_insert_pfn - insert single pfn into user vma 2235 * @vma: user vma to map to 2236 * @addr: target user address of this page 2237 * @pfn: source kernel pfn 2238 * 2239 * Similar to vm_insert_page, this allows drivers to insert individual pages 2240 * they've allocated into a user vma. Same comments apply. 2241 * 2242 * This function should only be called from a vm_ops->fault handler, and 2243 * in that case the handler should return the result of this function. 2244 * 2245 * vma cannot be a COW mapping. 2246 * 2247 * As this is called only for pages that do not currently exist, we 2248 * do not need to flush old virtual caches or the TLB. 2249 * 2250 * Context: Process context. May allocate using %GFP_KERNEL. 2251 * Return: vm_fault_t value. 2252 */ 2253 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 2254 unsigned long pfn) 2255 { 2256 return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot); 2257 } 2258 EXPORT_SYMBOL(vmf_insert_pfn); 2259 2260 static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn) 2261 { 2262 /* these checks mirror the abort conditions in vm_normal_page */ 2263 if (vma->vm_flags & VM_MIXEDMAP) 2264 return true; 2265 if (pfn_t_devmap(pfn)) 2266 return true; 2267 if (pfn_t_special(pfn)) 2268 return true; 2269 if (is_zero_pfn(pfn_t_to_pfn(pfn))) 2270 return true; 2271 return false; 2272 } 2273 2274 static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma, 2275 unsigned long addr, pfn_t pfn, bool mkwrite) 2276 { 2277 pgprot_t pgprot = vma->vm_page_prot; 2278 int err; 2279 2280 BUG_ON(!vm_mixed_ok(vma, pfn)); 2281 2282 if (addr < vma->vm_start || addr >= vma->vm_end) 2283 return VM_FAULT_SIGBUS; 2284 2285 track_pfn_insert(vma, &pgprot, pfn); 2286 2287 if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot)) 2288 return VM_FAULT_SIGBUS; 2289 2290 /* 2291 * If we don't have pte special, then we have to use the pfn_valid() 2292 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must* 2293 * refcount the page if pfn_valid is true (hence insert_page rather 2294 * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP 2295 * without pte special, it would there be refcounted as a normal page. 2296 */ 2297 if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && 2298 !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) { 2299 struct page *page; 2300 2301 /* 2302 * At this point we are committed to insert_page() 2303 * regardless of whether the caller specified flags that 2304 * result in pfn_t_has_page() == false. 2305 */ 2306 page = pfn_to_page(pfn_t_to_pfn(pfn)); 2307 err = insert_page(vma, addr, page, pgprot); 2308 } else { 2309 return insert_pfn(vma, addr, pfn, pgprot, mkwrite); 2310 } 2311 2312 if (err == -ENOMEM) 2313 return VM_FAULT_OOM; 2314 if (err < 0 && err != -EBUSY) 2315 return VM_FAULT_SIGBUS; 2316 2317 return VM_FAULT_NOPAGE; 2318 } 2319 2320 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 2321 pfn_t pfn) 2322 { 2323 return __vm_insert_mixed(vma, addr, pfn, false); 2324 } 2325 EXPORT_SYMBOL(vmf_insert_mixed); 2326 2327 /* 2328 * If the insertion of PTE failed because someone else already added a 2329 * different entry in the mean time, we treat that as success as we assume 2330 * the same entry was actually inserted. 2331 */ 2332 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, 2333 unsigned long addr, pfn_t pfn) 2334 { 2335 return __vm_insert_mixed(vma, addr, pfn, true); 2336 } 2337 EXPORT_SYMBOL(vmf_insert_mixed_mkwrite); 2338 2339 /* 2340 * maps a range of physical memory into the requested pages. the old 2341 * mappings are removed. any references to nonexistent pages results 2342 * in null mappings (currently treated as "copy-on-access") 2343 */ 2344 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, 2345 unsigned long addr, unsigned long end, 2346 unsigned long pfn, pgprot_t prot) 2347 { 2348 pte_t *pte, *mapped_pte; 2349 spinlock_t *ptl; 2350 int err = 0; 2351 2352 mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); 2353 if (!pte) 2354 return -ENOMEM; 2355 arch_enter_lazy_mmu_mode(); 2356 do { 2357 BUG_ON(!pte_none(ptep_get(pte))); 2358 if (!pfn_modify_allowed(pfn, prot)) { 2359 err = -EACCES; 2360 break; 2361 } 2362 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); 2363 pfn++; 2364 } while (pte++, addr += PAGE_SIZE, addr != end); 2365 arch_leave_lazy_mmu_mode(); 2366 pte_unmap_unlock(mapped_pte, ptl); 2367 return err; 2368 } 2369 2370 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, 2371 unsigned long addr, unsigned long end, 2372 unsigned long pfn, pgprot_t prot) 2373 { 2374 pmd_t *pmd; 2375 unsigned long next; 2376 int err; 2377 2378 pfn -= addr >> PAGE_SHIFT; 2379 pmd = pmd_alloc(mm, pud, addr); 2380 if (!pmd) 2381 return -ENOMEM; 2382 VM_BUG_ON(pmd_trans_huge(*pmd)); 2383 do { 2384 next = pmd_addr_end(addr, end); 2385 err = remap_pte_range(mm, pmd, addr, next, 2386 pfn + (addr >> PAGE_SHIFT), prot); 2387 if (err) 2388 return err; 2389 } while (pmd++, addr = next, addr != end); 2390 return 0; 2391 } 2392 2393 static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d, 2394 unsigned long addr, unsigned long end, 2395 unsigned long pfn, pgprot_t prot) 2396 { 2397 pud_t *pud; 2398 unsigned long next; 2399 int err; 2400 2401 pfn -= addr >> PAGE_SHIFT; 2402 pud = pud_alloc(mm, p4d, addr); 2403 if (!pud) 2404 return -ENOMEM; 2405 do { 2406 next = pud_addr_end(addr, end); 2407 err = remap_pmd_range(mm, pud, addr, next, 2408 pfn + (addr >> PAGE_SHIFT), prot); 2409 if (err) 2410 return err; 2411 } while (pud++, addr = next, addr != end); 2412 return 0; 2413 } 2414 2415 static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd, 2416 unsigned long addr, unsigned long end, 2417 unsigned long pfn, pgprot_t prot) 2418 { 2419 p4d_t *p4d; 2420 unsigned long next; 2421 int err; 2422 2423 pfn -= addr >> PAGE_SHIFT; 2424 p4d = p4d_alloc(mm, pgd, addr); 2425 if (!p4d) 2426 return -ENOMEM; 2427 do { 2428 next = p4d_addr_end(addr, end); 2429 err = remap_pud_range(mm, p4d, addr, next, 2430 pfn + (addr >> PAGE_SHIFT), prot); 2431 if (err) 2432 return err; 2433 } while (p4d++, addr = next, addr != end); 2434 return 0; 2435 } 2436 2437 /* 2438 * Variant of remap_pfn_range that does not call track_pfn_remap. The caller 2439 * must have pre-validated the caching bits of the pgprot_t. 2440 */ 2441 int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, 2442 unsigned long pfn, unsigned long size, pgprot_t prot) 2443 { 2444 pgd_t *pgd; 2445 unsigned long next; 2446 unsigned long end = addr + PAGE_ALIGN(size); 2447 struct mm_struct *mm = vma->vm_mm; 2448 int err; 2449 2450 if (WARN_ON_ONCE(!PAGE_ALIGNED(addr))) 2451 return -EINVAL; 2452 2453 /* 2454 * Physically remapped pages are special. Tell the 2455 * rest of the world about it: 2456 * VM_IO tells people not to look at these pages 2457 * (accesses can have side effects). 2458 * VM_PFNMAP tells the core MM that the base pages are just 2459 * raw PFN mappings, and do not have a "struct page" associated 2460 * with them. 2461 * VM_DONTEXPAND 2462 * Disable vma merging and expanding with mremap(). 2463 * VM_DONTDUMP 2464 * Omit vma from core dump, even when VM_IO turned off. 2465 * 2466 * There's a horrible special case to handle copy-on-write 2467 * behaviour that some programs depend on. We mark the "original" 2468 * un-COW'ed pages by matching them up with "vma->vm_pgoff". 2469 * See vm_normal_page() for details. 2470 */ 2471 if (is_cow_mapping(vma->vm_flags)) { 2472 if (addr != vma->vm_start || end != vma->vm_end) 2473 return -EINVAL; 2474 vma->vm_pgoff = pfn; 2475 } 2476 2477 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); 2478 2479 BUG_ON(addr >= end); 2480 pfn -= addr >> PAGE_SHIFT; 2481 pgd = pgd_offset(mm, addr); 2482 flush_cache_range(vma, addr, end); 2483 do { 2484 next = pgd_addr_end(addr, end); 2485 err = remap_p4d_range(mm, pgd, addr, next, 2486 pfn + (addr >> PAGE_SHIFT), prot); 2487 if (err) 2488 return err; 2489 } while (pgd++, addr = next, addr != end); 2490 2491 return 0; 2492 } 2493 2494 /** 2495 * remap_pfn_range - remap kernel memory to userspace 2496 * @vma: user vma to map to 2497 * @addr: target page aligned user address to start at 2498 * @pfn: page frame number of kernel physical memory address 2499 * @size: size of mapping area 2500 * @prot: page protection flags for this mapping 2501 * 2502 * Note: this is only safe if the mm semaphore is held when called. 2503 * 2504 * Return: %0 on success, negative error code otherwise. 2505 */ 2506 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, 2507 unsigned long pfn, unsigned long size, pgprot_t prot) 2508 { 2509 int err; 2510 2511 err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size)); 2512 if (err) 2513 return -EINVAL; 2514 2515 err = remap_pfn_range_notrack(vma, addr, pfn, size, prot); 2516 if (err) 2517 untrack_pfn(vma, pfn, PAGE_ALIGN(size), true); 2518 return err; 2519 } 2520 EXPORT_SYMBOL(remap_pfn_range); 2521 2522 /** 2523 * vm_iomap_memory - remap memory to userspace 2524 * @vma: user vma to map to 2525 * @start: start of the physical memory to be mapped 2526 * @len: size of area 2527 * 2528 * This is a simplified io_remap_pfn_range() for common driver use. The 2529 * driver just needs to give us the physical memory range to be mapped, 2530 * we'll figure out the rest from the vma information. 2531 * 2532 * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get 2533 * whatever write-combining details or similar. 2534 * 2535 * Return: %0 on success, negative error code otherwise. 2536 */ 2537 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) 2538 { 2539 unsigned long vm_len, pfn, pages; 2540 2541 /* Check that the physical memory area passed in looks valid */ 2542 if (start + len < start) 2543 return -EINVAL; 2544 /* 2545 * You *really* shouldn't map things that aren't page-aligned, 2546 * but we've historically allowed it because IO memory might 2547 * just have smaller alignment. 2548 */ 2549 len += start & ~PAGE_MASK; 2550 pfn = start >> PAGE_SHIFT; 2551 pages = (len + ~PAGE_MASK) >> PAGE_SHIFT; 2552 if (pfn + pages < pfn) 2553 return -EINVAL; 2554 2555 /* We start the mapping 'vm_pgoff' pages into the area */ 2556 if (vma->vm_pgoff > pages) 2557 return -EINVAL; 2558 pfn += vma->vm_pgoff; 2559 pages -= vma->vm_pgoff; 2560 2561 /* Can we fit all of the mapping? */ 2562 vm_len = vma->vm_end - vma->vm_start; 2563 if (vm_len >> PAGE_SHIFT > pages) 2564 return -EINVAL; 2565 2566 /* Ok, let it rip */ 2567 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); 2568 } 2569 EXPORT_SYMBOL(vm_iomap_memory); 2570 2571 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, 2572 unsigned long addr, unsigned long end, 2573 pte_fn_t fn, void *data, bool create, 2574 pgtbl_mod_mask *mask) 2575 { 2576 pte_t *pte, *mapped_pte; 2577 int err = 0; 2578 spinlock_t *ptl; 2579 2580 if (create) { 2581 mapped_pte = pte = (mm == &init_mm) ? 2582 pte_alloc_kernel_track(pmd, addr, mask) : 2583 pte_alloc_map_lock(mm, pmd, addr, &ptl); 2584 if (!pte) 2585 return -ENOMEM; 2586 } else { 2587 mapped_pte = pte = (mm == &init_mm) ? 2588 pte_offset_kernel(pmd, addr) : 2589 pte_offset_map_lock(mm, pmd, addr, &ptl); 2590 if (!pte) 2591 return -EINVAL; 2592 } 2593 2594 arch_enter_lazy_mmu_mode(); 2595 2596 if (fn) { 2597 do { 2598 if (create || !pte_none(ptep_get(pte))) { 2599 err = fn(pte++, addr, data); 2600 if (err) 2601 break; 2602 } 2603 } while (addr += PAGE_SIZE, addr != end); 2604 } 2605 *mask |= PGTBL_PTE_MODIFIED; 2606 2607 arch_leave_lazy_mmu_mode(); 2608 2609 if (mm != &init_mm) 2610 pte_unmap_unlock(mapped_pte, ptl); 2611 return err; 2612 } 2613 2614 static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, 2615 unsigned long addr, unsigned long end, 2616 pte_fn_t fn, void *data, bool create, 2617 pgtbl_mod_mask *mask) 2618 { 2619 pmd_t *pmd; 2620 unsigned long next; 2621 int err = 0; 2622 2623 BUG_ON(pud_huge(*pud)); 2624 2625 if (create) { 2626 pmd = pmd_alloc_track(mm, pud, addr, mask); 2627 if (!pmd) 2628 return -ENOMEM; 2629 } else { 2630 pmd = pmd_offset(pud, addr); 2631 } 2632 do { 2633 next = pmd_addr_end(addr, end); 2634 if (pmd_none(*pmd) && !create) 2635 continue; 2636 if (WARN_ON_ONCE(pmd_leaf(*pmd))) 2637 return -EINVAL; 2638 if (!pmd_none(*pmd) && WARN_ON_ONCE(pmd_bad(*pmd))) { 2639 if (!create) 2640 continue; 2641 pmd_clear_bad(pmd); 2642 } 2643 err = apply_to_pte_range(mm, pmd, addr, next, 2644 fn, data, create, mask); 2645 if (err) 2646 break; 2647 } while (pmd++, addr = next, addr != end); 2648 2649 return err; 2650 } 2651 2652 static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d, 2653 unsigned long addr, unsigned long end, 2654 pte_fn_t fn, void *data, bool create, 2655 pgtbl_mod_mask *mask) 2656 { 2657 pud_t *pud; 2658 unsigned long next; 2659 int err = 0; 2660 2661 if (create) { 2662 pud = pud_alloc_track(mm, p4d, addr, mask); 2663 if (!pud) 2664 return -ENOMEM; 2665 } else { 2666 pud = pud_offset(p4d, addr); 2667 } 2668 do { 2669 next = pud_addr_end(addr, end); 2670 if (pud_none(*pud) && !create) 2671 continue; 2672 if (WARN_ON_ONCE(pud_leaf(*pud))) 2673 return -EINVAL; 2674 if (!pud_none(*pud) && WARN_ON_ONCE(pud_bad(*pud))) { 2675 if (!create) 2676 continue; 2677 pud_clear_bad(pud); 2678 } 2679 err = apply_to_pmd_range(mm, pud, addr, next, 2680 fn, data, create, mask); 2681 if (err) 2682 break; 2683 } while (pud++, addr = next, addr != end); 2684 2685 return err; 2686 } 2687 2688 static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd, 2689 unsigned long addr, unsigned long end, 2690 pte_fn_t fn, void *data, bool create, 2691 pgtbl_mod_mask *mask) 2692 { 2693 p4d_t *p4d; 2694 unsigned long next; 2695 int err = 0; 2696 2697 if (create) { 2698 p4d = p4d_alloc_track(mm, pgd, addr, mask); 2699 if (!p4d) 2700 return -ENOMEM; 2701 } else { 2702 p4d = p4d_offset(pgd, addr); 2703 } 2704 do { 2705 next = p4d_addr_end(addr, end); 2706 if (p4d_none(*p4d) && !create) 2707 continue; 2708 if (WARN_ON_ONCE(p4d_leaf(*p4d))) 2709 return -EINVAL; 2710 if (!p4d_none(*p4d) && WARN_ON_ONCE(p4d_bad(*p4d))) { 2711 if (!create) 2712 continue; 2713 p4d_clear_bad(p4d); 2714 } 2715 err = apply_to_pud_range(mm, p4d, addr, next, 2716 fn, data, create, mask); 2717 if (err) 2718 break; 2719 } while (p4d++, addr = next, addr != end); 2720 2721 return err; 2722 } 2723 2724 static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr, 2725 unsigned long size, pte_fn_t fn, 2726 void *data, bool create) 2727 { 2728 pgd_t *pgd; 2729 unsigned long start = addr, next; 2730 unsigned long end = addr + size; 2731 pgtbl_mod_mask mask = 0; 2732 int err = 0; 2733 2734 if (WARN_ON(addr >= end)) 2735 return -EINVAL; 2736 2737 pgd = pgd_offset(mm, addr); 2738 do { 2739 next = pgd_addr_end(addr, end); 2740 if (pgd_none(*pgd) && !create) 2741 continue; 2742 if (WARN_ON_ONCE(pgd_leaf(*pgd))) 2743 return -EINVAL; 2744 if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) { 2745 if (!create) 2746 continue; 2747 pgd_clear_bad(pgd); 2748 } 2749 err = apply_to_p4d_range(mm, pgd, addr, next, 2750 fn, data, create, &mask); 2751 if (err) 2752 break; 2753 } while (pgd++, addr = next, addr != end); 2754 2755 if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 2756 arch_sync_kernel_mappings(start, start + size); 2757 2758 return err; 2759 } 2760 2761 /* 2762 * Scan a region of virtual memory, filling in page tables as necessary 2763 * and calling a provided function on each leaf page table. 2764 */ 2765 int apply_to_page_range(struct mm_struct *mm, unsigned long addr, 2766 unsigned long size, pte_fn_t fn, void *data) 2767 { 2768 return __apply_to_page_range(mm, addr, size, fn, data, true); 2769 } 2770 EXPORT_SYMBOL_GPL(apply_to_page_range); 2771 2772 /* 2773 * Scan a region of virtual memory, calling a provided function on 2774 * each leaf page table where it exists. 2775 * 2776 * Unlike apply_to_page_range, this does _not_ fill in page tables 2777 * where they are absent. 2778 */ 2779 int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr, 2780 unsigned long size, pte_fn_t fn, void *data) 2781 { 2782 return __apply_to_page_range(mm, addr, size, fn, data, false); 2783 } 2784 EXPORT_SYMBOL_GPL(apply_to_existing_page_range); 2785 2786 /* 2787 * handle_pte_fault chooses page fault handler according to an entry which was 2788 * read non-atomically. Before making any commitment, on those architectures 2789 * or configurations (e.g. i386 with PAE) which might give a mix of unmatched 2790 * parts, do_swap_page must check under lock before unmapping the pte and 2791 * proceeding (but do_wp_page is only called after already making such a check; 2792 * and do_anonymous_page can safely check later on). 2793 */ 2794 static inline int pte_unmap_same(struct vm_fault *vmf) 2795 { 2796 int same = 1; 2797 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION) 2798 if (sizeof(pte_t) > sizeof(unsigned long)) { 2799 spin_lock(vmf->ptl); 2800 same = pte_same(ptep_get(vmf->pte), vmf->orig_pte); 2801 spin_unlock(vmf->ptl); 2802 } 2803 #endif 2804 pte_unmap(vmf->pte); 2805 vmf->pte = NULL; 2806 return same; 2807 } 2808 2809 /* 2810 * Return: 2811 * 0: copied succeeded 2812 * -EHWPOISON: copy failed due to hwpoison in source page 2813 * -EAGAIN: copied failed (some other reason) 2814 */ 2815 static inline int __wp_page_copy_user(struct page *dst, struct page *src, 2816 struct vm_fault *vmf) 2817 { 2818 int ret; 2819 void *kaddr; 2820 void __user *uaddr; 2821 struct vm_area_struct *vma = vmf->vma; 2822 struct mm_struct *mm = vma->vm_mm; 2823 unsigned long addr = vmf->address; 2824 2825 if (likely(src)) { 2826 if (copy_mc_user_highpage(dst, src, addr, vma)) { 2827 memory_failure_queue(page_to_pfn(src), 0); 2828 return -EHWPOISON; 2829 } 2830 return 0; 2831 } 2832 2833 /* 2834 * If the source page was a PFN mapping, we don't have 2835 * a "struct page" for it. We do a best-effort copy by 2836 * just copying from the original user address. If that 2837 * fails, we just zero-fill it. Live with it. 2838 */ 2839 kaddr = kmap_atomic(dst); 2840 uaddr = (void __user *)(addr & PAGE_MASK); 2841 2842 /* 2843 * On architectures with software "accessed" bits, we would 2844 * take a double page fault, so mark it accessed here. 2845 */ 2846 vmf->pte = NULL; 2847 if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) { 2848 pte_t entry; 2849 2850 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); 2851 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { 2852 /* 2853 * Other thread has already handled the fault 2854 * and update local tlb only 2855 */ 2856 if (vmf->pte) 2857 update_mmu_tlb(vma, addr, vmf->pte); 2858 ret = -EAGAIN; 2859 goto pte_unlock; 2860 } 2861 2862 entry = pte_mkyoung(vmf->orig_pte); 2863 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0)) 2864 update_mmu_cache_range(vmf, vma, addr, vmf->pte, 1); 2865 } 2866 2867 /* 2868 * This really shouldn't fail, because the page is there 2869 * in the page tables. But it might just be unreadable, 2870 * in which case we just give up and fill the result with 2871 * zeroes. 2872 */ 2873 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) { 2874 if (vmf->pte) 2875 goto warn; 2876 2877 /* Re-validate under PTL if the page is still mapped */ 2878 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); 2879 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { 2880 /* The PTE changed under us, update local tlb */ 2881 if (vmf->pte) 2882 update_mmu_tlb(vma, addr, vmf->pte); 2883 ret = -EAGAIN; 2884 goto pte_unlock; 2885 } 2886 2887 /* 2888 * The same page can be mapped back since last copy attempt. 2889 * Try to copy again under PTL. 2890 */ 2891 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) { 2892 /* 2893 * Give a warn in case there can be some obscure 2894 * use-case 2895 */ 2896 warn: 2897 WARN_ON_ONCE(1); 2898 clear_page(kaddr); 2899 } 2900 } 2901 2902 ret = 0; 2903 2904 pte_unlock: 2905 if (vmf->pte) 2906 pte_unmap_unlock(vmf->pte, vmf->ptl); 2907 kunmap_atomic(kaddr); 2908 flush_dcache_page(dst); 2909 2910 return ret; 2911 } 2912 2913 static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma) 2914 { 2915 struct file *vm_file = vma->vm_file; 2916 2917 if (vm_file) 2918 return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO; 2919 2920 /* 2921 * Special mappings (e.g. VDSO) do not have any file so fake 2922 * a default GFP_KERNEL for them. 2923 */ 2924 return GFP_KERNEL; 2925 } 2926 2927 /* 2928 * Notify the address space that the page is about to become writable so that 2929 * it can prohibit this or wait for the page to get into an appropriate state. 2930 * 2931 * We do this without the lock held, so that it can sleep if it needs to. 2932 */ 2933 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf, struct folio *folio) 2934 { 2935 vm_fault_t ret; 2936 unsigned int old_flags = vmf->flags; 2937 2938 vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; 2939 2940 if (vmf->vma->vm_file && 2941 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host)) 2942 return VM_FAULT_SIGBUS; 2943 2944 ret = vmf->vma->vm_ops->page_mkwrite(vmf); 2945 /* Restore original flags so that caller is not surprised */ 2946 vmf->flags = old_flags; 2947 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) 2948 return ret; 2949 if (unlikely(!(ret & VM_FAULT_LOCKED))) { 2950 folio_lock(folio); 2951 if (!folio->mapping) { 2952 folio_unlock(folio); 2953 return 0; /* retry */ 2954 } 2955 ret |= VM_FAULT_LOCKED; 2956 } else 2957 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 2958 return ret; 2959 } 2960 2961 /* 2962 * Handle dirtying of a page in shared file mapping on a write fault. 2963 * 2964 * The function expects the page to be locked and unlocks it. 2965 */ 2966 static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf) 2967 { 2968 struct vm_area_struct *vma = vmf->vma; 2969 struct address_space *mapping; 2970 struct folio *folio = page_folio(vmf->page); 2971 bool dirtied; 2972 bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite; 2973 2974 dirtied = folio_mark_dirty(folio); 2975 VM_BUG_ON_FOLIO(folio_test_anon(folio), folio); 2976 /* 2977 * Take a local copy of the address_space - folio.mapping may be zeroed 2978 * by truncate after folio_unlock(). The address_space itself remains 2979 * pinned by vma->vm_file's reference. We rely on folio_unlock()'s 2980 * release semantics to prevent the compiler from undoing this copying. 2981 */ 2982 mapping = folio_raw_mapping(folio); 2983 folio_unlock(folio); 2984 2985 if (!page_mkwrite) 2986 file_update_time(vma->vm_file); 2987 2988 /* 2989 * Throttle page dirtying rate down to writeback speed. 2990 * 2991 * mapping may be NULL here because some device drivers do not 2992 * set page.mapping but still dirty their pages 2993 * 2994 * Drop the mmap_lock before waiting on IO, if we can. The file 2995 * is pinning the mapping, as per above. 2996 */ 2997 if ((dirtied || page_mkwrite) && mapping) { 2998 struct file *fpin; 2999 3000 fpin = maybe_unlock_mmap_for_io(vmf, NULL); 3001 balance_dirty_pages_ratelimited(mapping); 3002 if (fpin) { 3003 fput(fpin); 3004 return VM_FAULT_COMPLETED; 3005 } 3006 } 3007 3008 return 0; 3009 } 3010 3011 /* 3012 * Handle write page faults for pages that can be reused in the current vma 3013 * 3014 * This can happen either due to the mapping being with the VM_SHARED flag, 3015 * or due to us being the last reference standing to the page. In either 3016 * case, all we need to do here is to mark the page as writable and update 3017 * any related book-keeping. 3018 */ 3019 static inline void wp_page_reuse(struct vm_fault *vmf, struct folio *folio) 3020 __releases(vmf->ptl) 3021 { 3022 struct vm_area_struct *vma = vmf->vma; 3023 pte_t entry; 3024 3025 VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE)); 3026 3027 if (folio) { 3028 VM_BUG_ON(folio_test_anon(folio) && 3029 !PageAnonExclusive(vmf->page)); 3030 /* 3031 * Clear the folio's cpupid information as the existing 3032 * information potentially belongs to a now completely 3033 * unrelated process. 3034 */ 3035 folio_xchg_last_cpupid(folio, (1 << LAST_CPUPID_SHIFT) - 1); 3036 } 3037 3038 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); 3039 entry = pte_mkyoung(vmf->orig_pte); 3040 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 3041 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) 3042 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); 3043 pte_unmap_unlock(vmf->pte, vmf->ptl); 3044 count_vm_event(PGREUSE); 3045 } 3046 3047 /* 3048 * We could add a bitflag somewhere, but for now, we know that all 3049 * vm_ops that have a ->map_pages have been audited and don't need 3050 * the mmap_lock to be held. 3051 */ 3052 static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf) 3053 { 3054 struct vm_area_struct *vma = vmf->vma; 3055 3056 if (vma->vm_ops->map_pages || !(vmf->flags & FAULT_FLAG_VMA_LOCK)) 3057 return 0; 3058 vma_end_read(vma); 3059 return VM_FAULT_RETRY; 3060 } 3061 3062 static vm_fault_t vmf_anon_prepare(struct vm_fault *vmf) 3063 { 3064 struct vm_area_struct *vma = vmf->vma; 3065 3066 if (likely(vma->anon_vma)) 3067 return 0; 3068 if (vmf->flags & FAULT_FLAG_VMA_LOCK) { 3069 vma_end_read(vma); 3070 return VM_FAULT_RETRY; 3071 } 3072 if (__anon_vma_prepare(vma)) 3073 return VM_FAULT_OOM; 3074 return 0; 3075 } 3076 3077 /* 3078 * Handle the case of a page which we actually need to copy to a new page, 3079 * either due to COW or unsharing. 3080 * 3081 * Called with mmap_lock locked and the old page referenced, but 3082 * without the ptl held. 3083 * 3084 * High level logic flow: 3085 * 3086 * - Allocate a page, copy the content of the old page to the new one. 3087 * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc. 3088 * - Take the PTL. If the pte changed, bail out and release the allocated page 3089 * - If the pte is still the way we remember it, update the page table and all 3090 * relevant references. This includes dropping the reference the page-table 3091 * held to the old page, as well as updating the rmap. 3092 * - In any case, unlock the PTL and drop the reference we took to the old page. 3093 */ 3094 static vm_fault_t wp_page_copy(struct vm_fault *vmf) 3095 { 3096 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; 3097 struct vm_area_struct *vma = vmf->vma; 3098 struct mm_struct *mm = vma->vm_mm; 3099 struct folio *old_folio = NULL; 3100 struct folio *new_folio = NULL; 3101 pte_t entry; 3102 int page_copied = 0; 3103 struct mmu_notifier_range range; 3104 vm_fault_t ret; 3105 3106 delayacct_wpcopy_start(); 3107 3108 if (vmf->page) 3109 old_folio = page_folio(vmf->page); 3110 ret = vmf_anon_prepare(vmf); 3111 if (unlikely(ret)) 3112 goto out; 3113 3114 if (is_zero_pfn(pte_pfn(vmf->orig_pte))) { 3115 new_folio = vma_alloc_zeroed_movable_folio(vma, vmf->address); 3116 if (!new_folio) 3117 goto oom; 3118 } else { 3119 int err; 3120 new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, 3121 vmf->address, false); 3122 if (!new_folio) 3123 goto oom; 3124 3125 err = __wp_page_copy_user(&new_folio->page, vmf->page, vmf); 3126 if (err) { 3127 /* 3128 * COW failed, if the fault was solved by other, 3129 * it's fine. If not, userspace would re-fault on 3130 * the same address and we will handle the fault 3131 * from the second attempt. 3132 * The -EHWPOISON case will not be retried. 3133 */ 3134 folio_put(new_folio); 3135 if (old_folio) 3136 folio_put(old_folio); 3137 3138 delayacct_wpcopy_end(); 3139 return err == -EHWPOISON ? VM_FAULT_HWPOISON : 0; 3140 } 3141 kmsan_copy_page_meta(&new_folio->page, vmf->page); 3142 } 3143 3144 if (mem_cgroup_charge(new_folio, mm, GFP_KERNEL)) 3145 goto oom_free_new; 3146 folio_throttle_swaprate(new_folio, GFP_KERNEL); 3147 3148 __folio_mark_uptodate(new_folio); 3149 3150 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, 3151 vmf->address & PAGE_MASK, 3152 (vmf->address & PAGE_MASK) + PAGE_SIZE); 3153 mmu_notifier_invalidate_range_start(&range); 3154 3155 /* 3156 * Re-check the pte - we dropped the lock 3157 */ 3158 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); 3159 if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { 3160 if (old_folio) { 3161 if (!folio_test_anon(old_folio)) { 3162 dec_mm_counter(mm, mm_counter_file(&old_folio->page)); 3163 inc_mm_counter(mm, MM_ANONPAGES); 3164 } 3165 } else { 3166 ksm_might_unmap_zero_page(mm, vmf->orig_pte); 3167 inc_mm_counter(mm, MM_ANONPAGES); 3168 } 3169 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); 3170 entry = mk_pte(&new_folio->page, vma->vm_page_prot); 3171 entry = pte_sw_mkyoung(entry); 3172 if (unlikely(unshare)) { 3173 if (pte_soft_dirty(vmf->orig_pte)) 3174 entry = pte_mksoft_dirty(entry); 3175 if (pte_uffd_wp(vmf->orig_pte)) 3176 entry = pte_mkuffd_wp(entry); 3177 } else { 3178 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 3179 } 3180 3181 /* 3182 * Clear the pte entry and flush it first, before updating the 3183 * pte with the new entry, to keep TLBs on different CPUs in 3184 * sync. This code used to set the new PTE then flush TLBs, but 3185 * that left a window where the new PTE could be loaded into 3186 * some TLBs while the old PTE remains in others. 3187 */ 3188 ptep_clear_flush(vma, vmf->address, vmf->pte); 3189 folio_add_new_anon_rmap(new_folio, vma, vmf->address); 3190 folio_add_lru_vma(new_folio, vma); 3191 /* 3192 * We call the notify macro here because, when using secondary 3193 * mmu page tables (such as kvm shadow page tables), we want the 3194 * new page to be mapped directly into the secondary page table. 3195 */ 3196 BUG_ON(unshare && pte_write(entry)); 3197 set_pte_at_notify(mm, vmf->address, vmf->pte, entry); 3198 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); 3199 if (old_folio) { 3200 /* 3201 * Only after switching the pte to the new page may 3202 * we remove the mapcount here. Otherwise another 3203 * process may come and find the rmap count decremented 3204 * before the pte is switched to the new page, and 3205 * "reuse" the old page writing into it while our pte 3206 * here still points into it and can be read by other 3207 * threads. 3208 * 3209 * The critical issue is to order this 3210 * page_remove_rmap with the ptp_clear_flush above. 3211 * Those stores are ordered by (if nothing else,) 3212 * the barrier present in the atomic_add_negative 3213 * in page_remove_rmap. 3214 * 3215 * Then the TLB flush in ptep_clear_flush ensures that 3216 * no process can access the old page before the 3217 * decremented mapcount is visible. And the old page 3218 * cannot be reused until after the decremented 3219 * mapcount is visible. So transitively, TLBs to 3220 * old page will be flushed before it can be reused. 3221 */ 3222 page_remove_rmap(vmf->page, vma, false); 3223 } 3224 3225 /* Free the old page.. */ 3226 new_folio = old_folio; 3227 page_copied = 1; 3228 pte_unmap_unlock(vmf->pte, vmf->ptl); 3229 } else if (vmf->pte) { 3230 update_mmu_tlb(vma, vmf->address, vmf->pte); 3231 pte_unmap_unlock(vmf->pte, vmf->ptl); 3232 } 3233 3234 mmu_notifier_invalidate_range_end(&range); 3235 3236 if (new_folio) 3237 folio_put(new_folio); 3238 if (old_folio) { 3239 if (page_copied) 3240 free_swap_cache(&old_folio->page); 3241 folio_put(old_folio); 3242 } 3243 3244 delayacct_wpcopy_end(); 3245 return 0; 3246 oom_free_new: 3247 folio_put(new_folio); 3248 oom: 3249 ret = VM_FAULT_OOM; 3250 out: 3251 if (old_folio) 3252 folio_put(old_folio); 3253 3254 delayacct_wpcopy_end(); 3255 return ret; 3256 } 3257 3258 /** 3259 * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE 3260 * writeable once the page is prepared 3261 * 3262 * @vmf: structure describing the fault 3263 * @folio: the folio of vmf->page 3264 * 3265 * This function handles all that is needed to finish a write page fault in a 3266 * shared mapping due to PTE being read-only once the mapped page is prepared. 3267 * It handles locking of PTE and modifying it. 3268 * 3269 * The function expects the page to be locked or other protection against 3270 * concurrent faults / writeback (such as DAX radix tree locks). 3271 * 3272 * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before 3273 * we acquired PTE lock. 3274 */ 3275 static vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf, struct folio *folio) 3276 { 3277 WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); 3278 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, 3279 &vmf->ptl); 3280 if (!vmf->pte) 3281 return VM_FAULT_NOPAGE; 3282 /* 3283 * We might have raced with another page fault while we released the 3284 * pte_offset_map_lock. 3285 */ 3286 if (!pte_same(ptep_get(vmf->pte), vmf->orig_pte)) { 3287 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); 3288 pte_unmap_unlock(vmf->pte, vmf->ptl); 3289 return VM_FAULT_NOPAGE; 3290 } 3291 wp_page_reuse(vmf, folio); 3292 return 0; 3293 } 3294 3295 /* 3296 * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED 3297 * mapping 3298 */ 3299 static vm_fault_t wp_pfn_shared(struct vm_fault *vmf) 3300 { 3301 struct vm_area_struct *vma = vmf->vma; 3302 3303 if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { 3304 vm_fault_t ret; 3305 3306 pte_unmap_unlock(vmf->pte, vmf->ptl); 3307 ret = vmf_can_call_fault(vmf); 3308 if (ret) 3309 return ret; 3310 3311 vmf->flags |= FAULT_FLAG_MKWRITE; 3312 ret = vma->vm_ops->pfn_mkwrite(vmf); 3313 if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)) 3314 return ret; 3315 return finish_mkwrite_fault(vmf, NULL); 3316 } 3317 wp_page_reuse(vmf, NULL); 3318 return 0; 3319 } 3320 3321 static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio) 3322 __releases(vmf->ptl) 3323 { 3324 struct vm_area_struct *vma = vmf->vma; 3325 vm_fault_t ret = 0; 3326 3327 folio_get(folio); 3328 3329 if (vma->vm_ops && vma->vm_ops->page_mkwrite) { 3330 vm_fault_t tmp; 3331 3332 pte_unmap_unlock(vmf->pte, vmf->ptl); 3333 tmp = vmf_can_call_fault(vmf); 3334 if (tmp) { 3335 folio_put(folio); 3336 return tmp; 3337 } 3338 3339 tmp = do_page_mkwrite(vmf, folio); 3340 if (unlikely(!tmp || (tmp & 3341 (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { 3342 folio_put(folio); 3343 return tmp; 3344 } 3345 tmp = finish_mkwrite_fault(vmf, folio); 3346 if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { 3347 folio_unlock(folio); 3348 folio_put(folio); 3349 return tmp; 3350 } 3351 } else { 3352 wp_page_reuse(vmf, folio); 3353 folio_lock(folio); 3354 } 3355 ret |= fault_dirty_shared_page(vmf); 3356 folio_put(folio); 3357 3358 return ret; 3359 } 3360 3361 static bool wp_can_reuse_anon_folio(struct folio *folio, 3362 struct vm_area_struct *vma) 3363 { 3364 /* 3365 * We have to verify under folio lock: these early checks are 3366 * just an optimization to avoid locking the folio and freeing 3367 * the swapcache if there is little hope that we can reuse. 3368 * 3369 * KSM doesn't necessarily raise the folio refcount. 3370 */ 3371 if (folio_test_ksm(folio) || folio_ref_count(folio) > 3) 3372 return false; 3373 if (!folio_test_lru(folio)) 3374 /* 3375 * We cannot easily detect+handle references from 3376 * remote LRU caches or references to LRU folios. 3377 */ 3378 lru_add_drain(); 3379 if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio)) 3380 return false; 3381 if (!folio_trylock(folio)) 3382 return false; 3383 if (folio_test_swapcache(folio)) 3384 folio_free_swap(folio); 3385 if (folio_test_ksm(folio) || folio_ref_count(folio) != 1) { 3386 folio_unlock(folio); 3387 return false; 3388 } 3389 /* 3390 * Ok, we've got the only folio reference from our mapping 3391 * and the folio is locked, it's dark out, and we're wearing 3392 * sunglasses. Hit it. 3393 */ 3394 folio_move_anon_rmap(folio, vma); 3395 folio_unlock(folio); 3396 return true; 3397 } 3398 3399 /* 3400 * This routine handles present pages, when 3401 * * users try to write to a shared page (FAULT_FLAG_WRITE) 3402 * * GUP wants to take a R/O pin on a possibly shared anonymous page 3403 * (FAULT_FLAG_UNSHARE) 3404 * 3405 * It is done by copying the page to a new address and decrementing the 3406 * shared-page counter for the old page. 3407 * 3408 * Note that this routine assumes that the protection checks have been 3409 * done by the caller (the low-level page fault routine in most cases). 3410 * Thus, with FAULT_FLAG_WRITE, we can safely just mark it writable once we've 3411 * done any necessary COW. 3412 * 3413 * In case of FAULT_FLAG_WRITE, we also mark the page dirty at this point even 3414 * though the page will change only once the write actually happens. This 3415 * avoids a few races, and potentially makes it more efficient. 3416 * 3417 * We enter with non-exclusive mmap_lock (to exclude vma changes, 3418 * but allow concurrent faults), with pte both mapped and locked. 3419 * We return with mmap_lock still held, but pte unmapped and unlocked. 3420 */ 3421 static vm_fault_t do_wp_page(struct vm_fault *vmf) 3422 __releases(vmf->ptl) 3423 { 3424 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; 3425 struct vm_area_struct *vma = vmf->vma; 3426 struct folio *folio = NULL; 3427 pte_t pte; 3428 3429 if (likely(!unshare)) { 3430 if (userfaultfd_pte_wp(vma, ptep_get(vmf->pte))) { 3431 if (!userfaultfd_wp_async(vma)) { 3432 pte_unmap_unlock(vmf->pte, vmf->ptl); 3433 return handle_userfault(vmf, VM_UFFD_WP); 3434 } 3435 3436 /* 3437 * Nothing needed (cache flush, TLB invalidations, 3438 * etc.) because we're only removing the uffd-wp bit, 3439 * which is completely invisible to the user. 3440 */ 3441 pte = pte_clear_uffd_wp(ptep_get(vmf->pte)); 3442 3443 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); 3444 /* 3445 * Update this to be prepared for following up CoW 3446 * handling 3447 */ 3448 vmf->orig_pte = pte; 3449 } 3450 3451 /* 3452 * Userfaultfd write-protect can defer flushes. Ensure the TLB 3453 * is flushed in this case before copying. 3454 */ 3455 if (unlikely(userfaultfd_wp(vmf->vma) && 3456 mm_tlb_flush_pending(vmf->vma->vm_mm))) 3457 flush_tlb_page(vmf->vma, vmf->address); 3458 } 3459 3460 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); 3461 3462 if (vmf->page) 3463 folio = page_folio(vmf->page); 3464 3465 /* 3466 * Shared mapping: we are guaranteed to have VM_WRITE and 3467 * FAULT_FLAG_WRITE set at this point. 3468 */ 3469 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { 3470 /* 3471 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a 3472 * VM_PFNMAP VMA. 3473 * 3474 * We should not cow pages in a shared writeable mapping. 3475 * Just mark the pages writable and/or call ops->pfn_mkwrite. 3476 */ 3477 if (!vmf->page) 3478 return wp_pfn_shared(vmf); 3479 return wp_page_shared(vmf, folio); 3480 } 3481 3482 /* 3483 * Private mapping: create an exclusive anonymous page copy if reuse 3484 * is impossible. We might miss VM_WRITE for FOLL_FORCE handling. 3485 * 3486 * If we encounter a page that is marked exclusive, we must reuse 3487 * the page without further checks. 3488 */ 3489 if (folio && folio_test_anon(folio) && 3490 (PageAnonExclusive(vmf->page) || wp_can_reuse_anon_folio(folio, vma))) { 3491 if (!PageAnonExclusive(vmf->page)) 3492 SetPageAnonExclusive(vmf->page); 3493 if (unlikely(unshare)) { 3494 pte_unmap_unlock(vmf->pte, vmf->ptl); 3495 return 0; 3496 } 3497 wp_page_reuse(vmf, folio); 3498 return 0; 3499 } 3500 /* 3501 * Ok, we need to copy. Oh, well.. 3502 */ 3503 if (folio) 3504 folio_get(folio); 3505 3506 pte_unmap_unlock(vmf->pte, vmf->ptl); 3507 #ifdef CONFIG_KSM 3508 if (folio && folio_test_ksm(folio)) 3509 count_vm_event(COW_KSM); 3510 #endif 3511 return wp_page_copy(vmf); 3512 } 3513 3514 static void unmap_mapping_range_vma(struct vm_area_struct *vma, 3515 unsigned long start_addr, unsigned long end_addr, 3516 struct zap_details *details) 3517 { 3518 zap_page_range_single(vma, start_addr, end_addr - start_addr, details); 3519 } 3520 3521 static inline void unmap_mapping_range_tree(struct rb_root_cached *root, 3522 pgoff_t first_index, 3523 pgoff_t last_index, 3524 struct zap_details *details) 3525 { 3526 struct vm_area_struct *vma; 3527 pgoff_t vba, vea, zba, zea; 3528 3529 vma_interval_tree_foreach(vma, root, first_index, last_index) { 3530 vba = vma->vm_pgoff; 3531 vea = vba + vma_pages(vma) - 1; 3532 zba = max(first_index, vba); 3533 zea = min(last_index, vea); 3534 3535 unmap_mapping_range_vma(vma, 3536 ((zba - vba) << PAGE_SHIFT) + vma->vm_start, 3537 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, 3538 details); 3539 } 3540 } 3541 3542 /** 3543 * unmap_mapping_folio() - Unmap single folio from processes. 3544 * @folio: The locked folio to be unmapped. 3545 * 3546 * Unmap this folio from any userspace process which still has it mmaped. 3547 * Typically, for efficiency, the range of nearby pages has already been 3548 * unmapped by unmap_mapping_pages() or unmap_mapping_range(). But once 3549 * truncation or invalidation holds the lock on a folio, it may find that 3550 * the page has been remapped again: and then uses unmap_mapping_folio() 3551 * to unmap it finally. 3552 */ 3553 void unmap_mapping_folio(struct folio *folio) 3554 { 3555 struct address_space *mapping = folio->mapping; 3556 struct zap_details details = { }; 3557 pgoff_t first_index; 3558 pgoff_t last_index; 3559 3560 VM_BUG_ON(!folio_test_locked(folio)); 3561 3562 first_index = folio->index; 3563 last_index = folio_next_index(folio) - 1; 3564 3565 details.even_cows = false; 3566 details.single_folio = folio; 3567 details.zap_flags = ZAP_FLAG_DROP_MARKER; 3568 3569 i_mmap_lock_read(mapping); 3570 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))) 3571 unmap_mapping_range_tree(&mapping->i_mmap, first_index, 3572 last_index, &details); 3573 i_mmap_unlock_read(mapping); 3574 } 3575 3576 /** 3577 * unmap_mapping_pages() - Unmap pages from processes. 3578 * @mapping: The address space containing pages to be unmapped. 3579 * @start: Index of first page to be unmapped. 3580 * @nr: Number of pages to be unmapped. 0 to unmap to end of file. 3581 * @even_cows: Whether to unmap even private COWed pages. 3582 * 3583 * Unmap the pages in this address space from any userspace process which 3584 * has them mmaped. Generally, you want to remove COWed pages as well when 3585 * a file is being truncated, but not when invalidating pages from the page 3586 * cache. 3587 */ 3588 void unmap_mapping_pages(struct address_space *mapping, pgoff_t start, 3589 pgoff_t nr, bool even_cows) 3590 { 3591 struct zap_details details = { }; 3592 pgoff_t first_index = start; 3593 pgoff_t last_index = start + nr - 1; 3594 3595 details.even_cows = even_cows; 3596 if (last_index < first_index) 3597 last_index = ULONG_MAX; 3598 3599 i_mmap_lock_read(mapping); 3600 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))) 3601 unmap_mapping_range_tree(&mapping->i_mmap, first_index, 3602 last_index, &details); 3603 i_mmap_unlock_read(mapping); 3604 } 3605 EXPORT_SYMBOL_GPL(unmap_mapping_pages); 3606 3607 /** 3608 * unmap_mapping_range - unmap the portion of all mmaps in the specified 3609 * address_space corresponding to the specified byte range in the underlying 3610 * file. 3611 * 3612 * @mapping: the address space containing mmaps to be unmapped. 3613 * @holebegin: byte in first page to unmap, relative to the start of 3614 * the underlying file. This will be rounded down to a PAGE_SIZE 3615 * boundary. Note that this is different from truncate_pagecache(), which 3616 * must keep the partial page. In contrast, we must get rid of 3617 * partial pages. 3618 * @holelen: size of prospective hole in bytes. This will be rounded 3619 * up to a PAGE_SIZE boundary. A holelen of zero truncates to the 3620 * end of the file. 3621 * @even_cows: 1 when truncating a file, unmap even private COWed pages; 3622 * but 0 when invalidating pagecache, don't throw away private data. 3623 */ 3624 void unmap_mapping_range(struct address_space *mapping, 3625 loff_t const holebegin, loff_t const holelen, int even_cows) 3626 { 3627 pgoff_t hba = (pgoff_t)(holebegin) >> PAGE_SHIFT; 3628 pgoff_t hlen = ((pgoff_t)(holelen) + PAGE_SIZE - 1) >> PAGE_SHIFT; 3629 3630 /* Check for overflow. */ 3631 if (sizeof(holelen) > sizeof(hlen)) { 3632 long long holeend = 3633 (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; 3634 if (holeend & ~(long long)ULONG_MAX) 3635 hlen = ULONG_MAX - hba + 1; 3636 } 3637 3638 unmap_mapping_pages(mapping, hba, hlen, even_cows); 3639 } 3640 EXPORT_SYMBOL(unmap_mapping_range); 3641 3642 /* 3643 * Restore a potential device exclusive pte to a working pte entry 3644 */ 3645 static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf) 3646 { 3647 struct folio *folio = page_folio(vmf->page); 3648 struct vm_area_struct *vma = vmf->vma; 3649 struct mmu_notifier_range range; 3650 vm_fault_t ret; 3651 3652 /* 3653 * We need a reference to lock the folio because we don't hold 3654 * the PTL so a racing thread can remove the device-exclusive 3655 * entry and unmap it. If the folio is free the entry must 3656 * have been removed already. If it happens to have already 3657 * been re-allocated after being freed all we do is lock and 3658 * unlock it. 3659 */ 3660 if (!folio_try_get(folio)) 3661 return 0; 3662 3663 ret = folio_lock_or_retry(folio, vmf); 3664 if (ret) { 3665 folio_put(folio); 3666 return ret; 3667 } 3668 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, 3669 vma->vm_mm, vmf->address & PAGE_MASK, 3670 (vmf->address & PAGE_MASK) + PAGE_SIZE, NULL); 3671 mmu_notifier_invalidate_range_start(&range); 3672 3673 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, 3674 &vmf->ptl); 3675 if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) 3676 restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte); 3677 3678 if (vmf->pte) 3679 pte_unmap_unlock(vmf->pte, vmf->ptl); 3680 folio_unlock(folio); 3681 folio_put(folio); 3682 3683 mmu_notifier_invalidate_range_end(&range); 3684 return 0; 3685 } 3686 3687 static inline bool should_try_to_free_swap(struct folio *folio, 3688 struct vm_area_struct *vma, 3689 unsigned int fault_flags) 3690 { 3691 if (!folio_test_swapcache(folio)) 3692 return false; 3693 if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) || 3694 folio_test_mlocked(folio)) 3695 return true; 3696 /* 3697 * If we want to map a page that's in the swapcache writable, we 3698 * have to detect via the refcount if we're really the exclusive 3699 * user. Try freeing the swapcache to get rid of the swapcache 3700 * reference only in case it's likely that we'll be the exlusive user. 3701 */ 3702 return (fault_flags & FAULT_FLAG_WRITE) && !folio_test_ksm(folio) && 3703 folio_ref_count(folio) == 2; 3704 } 3705 3706 static vm_fault_t pte_marker_clear(struct vm_fault *vmf) 3707 { 3708 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, 3709 vmf->address, &vmf->ptl); 3710 if (!vmf->pte) 3711 return 0; 3712 /* 3713 * Be careful so that we will only recover a special uffd-wp pte into a 3714 * none pte. Otherwise it means the pte could have changed, so retry. 3715 * 3716 * This should also cover the case where e.g. the pte changed 3717 * quickly from a PTE_MARKER_UFFD_WP into PTE_MARKER_POISONED. 3718 * So is_pte_marker() check is not enough to safely drop the pte. 3719 */ 3720 if (pte_same(vmf->orig_pte, ptep_get(vmf->pte))) 3721 pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte); 3722 pte_unmap_unlock(vmf->pte, vmf->ptl); 3723 return 0; 3724 } 3725 3726 static vm_fault_t do_pte_missing(struct vm_fault *vmf) 3727 { 3728 if (vma_is_anonymous(vmf->vma)) 3729 return do_anonymous_page(vmf); 3730 else 3731 return do_fault(vmf); 3732 } 3733 3734 /* 3735 * This is actually a page-missing access, but with uffd-wp special pte 3736 * installed. It means this pte was wr-protected before being unmapped. 3737 */ 3738 static vm_fault_t pte_marker_handle_uffd_wp(struct vm_fault *vmf) 3739 { 3740 /* 3741 * Just in case there're leftover special ptes even after the region 3742 * got unregistered - we can simply clear them. 3743 */ 3744 if (unlikely(!userfaultfd_wp(vmf->vma))) 3745 return pte_marker_clear(vmf); 3746 3747 return do_pte_missing(vmf); 3748 } 3749 3750 static vm_fault_t handle_pte_marker(struct vm_fault *vmf) 3751 { 3752 swp_entry_t entry = pte_to_swp_entry(vmf->orig_pte); 3753 unsigned long marker = pte_marker_get(entry); 3754 3755 /* 3756 * PTE markers should never be empty. If anything weird happened, 3757 * the best thing to do is to kill the process along with its mm. 3758 */ 3759 if (WARN_ON_ONCE(!marker)) 3760 return VM_FAULT_SIGBUS; 3761 3762 /* Higher priority than uffd-wp when data corrupted */ 3763 if (marker & PTE_MARKER_POISONED) 3764 return VM_FAULT_HWPOISON; 3765 3766 if (pte_marker_entry_uffd_wp(entry)) 3767 return pte_marker_handle_uffd_wp(vmf); 3768 3769 /* This is an unknown pte marker */ 3770 return VM_FAULT_SIGBUS; 3771 } 3772 3773 /* 3774 * We enter with non-exclusive mmap_lock (to exclude vma changes, 3775 * but allow concurrent faults), and pte mapped but not yet locked. 3776 * We return with pte unmapped and unlocked. 3777 * 3778 * We return with the mmap_lock locked or unlocked in the same cases 3779 * as does filemap_fault(). 3780 */ 3781 vm_fault_t do_swap_page(struct vm_fault *vmf) 3782 { 3783 struct vm_area_struct *vma = vmf->vma; 3784 struct folio *swapcache, *folio = NULL; 3785 struct page *page; 3786 struct swap_info_struct *si = NULL; 3787 rmap_t rmap_flags = RMAP_NONE; 3788 bool exclusive = false; 3789 swp_entry_t entry; 3790 pte_t pte; 3791 vm_fault_t ret = 0; 3792 void *shadow = NULL; 3793 3794 if (!pte_unmap_same(vmf)) 3795 goto out; 3796 3797 entry = pte_to_swp_entry(vmf->orig_pte); 3798 if (unlikely(non_swap_entry(entry))) { 3799 if (is_migration_entry(entry)) { 3800 migration_entry_wait(vma->vm_mm, vmf->pmd, 3801 vmf->address); 3802 } else if (is_device_exclusive_entry(entry)) { 3803 vmf->page = pfn_swap_entry_to_page(entry); 3804 ret = remove_device_exclusive_entry(vmf); 3805 } else if (is_device_private_entry(entry)) { 3806 if (vmf->flags & FAULT_FLAG_VMA_LOCK) { 3807 /* 3808 * migrate_to_ram is not yet ready to operate 3809 * under VMA lock. 3810 */ 3811 vma_end_read(vma); 3812 ret = VM_FAULT_RETRY; 3813 goto out; 3814 } 3815 3816 vmf->page = pfn_swap_entry_to_page(entry); 3817 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 3818 vmf->address, &vmf->ptl); 3819 if (unlikely(!vmf->pte || 3820 !pte_same(ptep_get(vmf->pte), 3821 vmf->orig_pte))) 3822 goto unlock; 3823 3824 /* 3825 * Get a page reference while we know the page can't be 3826 * freed. 3827 */ 3828 get_page(vmf->page); 3829 pte_unmap_unlock(vmf->pte, vmf->ptl); 3830 ret = vmf->page->pgmap->ops->migrate_to_ram(vmf); 3831 put_page(vmf->page); 3832 } else if (is_hwpoison_entry(entry)) { 3833 ret = VM_FAULT_HWPOISON; 3834 } else if (is_pte_marker_entry(entry)) { 3835 ret = handle_pte_marker(vmf); 3836 } else { 3837 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL); 3838 ret = VM_FAULT_SIGBUS; 3839 } 3840 goto out; 3841 } 3842 3843 /* Prevent swapoff from happening to us. */ 3844 si = get_swap_device(entry); 3845 if (unlikely(!si)) 3846 goto out; 3847 3848 folio = swap_cache_get_folio(entry, vma, vmf->address); 3849 if (folio) 3850 page = folio_file_page(folio, swp_offset(entry)); 3851 swapcache = folio; 3852 3853 if (!folio) { 3854 if (data_race(si->flags & SWP_SYNCHRONOUS_IO) && 3855 __swap_count(entry) == 1) { 3856 /* skip swapcache */ 3857 folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, 3858 vma, vmf->address, false); 3859 page = &folio->page; 3860 if (folio) { 3861 __folio_set_locked(folio); 3862 __folio_set_swapbacked(folio); 3863 3864 if (mem_cgroup_swapin_charge_folio(folio, 3865 vma->vm_mm, GFP_KERNEL, 3866 entry)) { 3867 ret = VM_FAULT_OOM; 3868 goto out_page; 3869 } 3870 mem_cgroup_swapin_uncharge_swap(entry); 3871 3872 shadow = get_shadow_from_swap_cache(entry); 3873 if (shadow) 3874 workingset_refault(folio, shadow); 3875 3876 folio_add_lru(folio); 3877 3878 /* To provide entry to swap_readpage() */ 3879 folio->swap = entry; 3880 swap_readpage(page, true, NULL); 3881 folio->private = NULL; 3882 } 3883 } else { 3884 page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, 3885 vmf); 3886 if (page) 3887 folio = page_folio(page); 3888 swapcache = folio; 3889 } 3890 3891 if (!folio) { 3892 /* 3893 * Back out if somebody else faulted in this pte 3894 * while we released the pte lock. 3895 */ 3896 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 3897 vmf->address, &vmf->ptl); 3898 if (likely(vmf->pte && 3899 pte_same(ptep_get(vmf->pte), vmf->orig_pte))) 3900 ret = VM_FAULT_OOM; 3901 goto unlock; 3902 } 3903 3904 /* Had to read the page from swap area: Major fault */ 3905 ret = VM_FAULT_MAJOR; 3906 count_vm_event(PGMAJFAULT); 3907 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); 3908 } else if (PageHWPoison(page)) { 3909 /* 3910 * hwpoisoned dirty swapcache pages are kept for killing 3911 * owner processes (which may be unknown at hwpoison time) 3912 */ 3913 ret = VM_FAULT_HWPOISON; 3914 goto out_release; 3915 } 3916 3917 ret |= folio_lock_or_retry(folio, vmf); 3918 if (ret & VM_FAULT_RETRY) 3919 goto out_release; 3920 3921 if (swapcache) { 3922 /* 3923 * Make sure folio_free_swap() or swapoff did not release the 3924 * swapcache from under us. The page pin, and pte_same test 3925 * below, are not enough to exclude that. Even if it is still 3926 * swapcache, we need to check that the page's swap has not 3927 * changed. 3928 */ 3929 if (unlikely(!folio_test_swapcache(folio) || 3930 page_swap_entry(page).val != entry.val)) 3931 goto out_page; 3932 3933 /* 3934 * KSM sometimes has to copy on read faults, for example, if 3935 * page->index of !PageKSM() pages would be nonlinear inside the 3936 * anon VMA -- PageKSM() is lost on actual swapout. 3937 */ 3938 page = ksm_might_need_to_copy(page, vma, vmf->address); 3939 if (unlikely(!page)) { 3940 ret = VM_FAULT_OOM; 3941 goto out_page; 3942 } else if (unlikely(PTR_ERR(page) == -EHWPOISON)) { 3943 ret = VM_FAULT_HWPOISON; 3944 goto out_page; 3945 } 3946 folio = page_folio(page); 3947 3948 /* 3949 * If we want to map a page that's in the swapcache writable, we 3950 * have to detect via the refcount if we're really the exclusive 3951 * owner. Try removing the extra reference from the local LRU 3952 * caches if required. 3953 */ 3954 if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache && 3955 !folio_test_ksm(folio) && !folio_test_lru(folio)) 3956 lru_add_drain(); 3957 } 3958 3959 folio_throttle_swaprate(folio, GFP_KERNEL); 3960 3961 /* 3962 * Back out if somebody else already faulted in this pte. 3963 */ 3964 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, 3965 &vmf->ptl); 3966 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) 3967 goto out_nomap; 3968 3969 if (unlikely(!folio_test_uptodate(folio))) { 3970 ret = VM_FAULT_SIGBUS; 3971 goto out_nomap; 3972 } 3973 3974 /* 3975 * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte 3976 * must never point at an anonymous page in the swapcache that is 3977 * PG_anon_exclusive. Sanity check that this holds and especially, that 3978 * no filesystem set PG_mappedtodisk on a page in the swapcache. Sanity 3979 * check after taking the PT lock and making sure that nobody 3980 * concurrently faulted in this page and set PG_anon_exclusive. 3981 */ 3982 BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio)); 3983 BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page)); 3984 3985 /* 3986 * Check under PT lock (to protect against concurrent fork() sharing 3987 * the swap entry concurrently) for certainly exclusive pages. 3988 */ 3989 if (!folio_test_ksm(folio)) { 3990 exclusive = pte_swp_exclusive(vmf->orig_pte); 3991 if (folio != swapcache) { 3992 /* 3993 * We have a fresh page that is not exposed to the 3994 * swapcache -> certainly exclusive. 3995 */ 3996 exclusive = true; 3997 } else if (exclusive && folio_test_writeback(folio) && 3998 data_race(si->flags & SWP_STABLE_WRITES)) { 3999 /* 4000 * This is tricky: not all swap backends support 4001 * concurrent page modifications while under writeback. 4002 * 4003 * So if we stumble over such a page in the swapcache 4004 * we must not set the page exclusive, otherwise we can 4005 * map it writable without further checks and modify it 4006 * while still under writeback. 4007 * 4008 * For these problematic swap backends, simply drop the 4009 * exclusive marker: this is perfectly fine as we start 4010 * writeback only if we fully unmapped the page and 4011 * there are no unexpected references on the page after 4012 * unmapping succeeded. After fully unmapped, no 4013 * further GUP references (FOLL_GET and FOLL_PIN) can 4014 * appear, so dropping the exclusive marker and mapping 4015 * it only R/O is fine. 4016 */ 4017 exclusive = false; 4018 } 4019 } 4020 4021 /* 4022 * Some architectures may have to restore extra metadata to the page 4023 * when reading from swap. This metadata may be indexed by swap entry 4024 * so this must be called before swap_free(). 4025 */ 4026 arch_swap_restore(entry, folio); 4027 4028 /* 4029 * Remove the swap entry and conditionally try to free up the swapcache. 4030 * We're already holding a reference on the page but haven't mapped it 4031 * yet. 4032 */ 4033 swap_free(entry); 4034 if (should_try_to_free_swap(folio, vma, vmf->flags)) 4035 folio_free_swap(folio); 4036 4037 inc_mm_counter(vma->vm_mm, MM_ANONPAGES); 4038 dec_mm_counter(vma->vm_mm, MM_SWAPENTS); 4039 pte = mk_pte(page, vma->vm_page_prot); 4040 4041 /* 4042 * Same logic as in do_wp_page(); however, optimize for pages that are 4043 * certainly not shared either because we just allocated them without 4044 * exposing them to the swapcache or because the swap entry indicates 4045 * exclusivity. 4046 */ 4047 if (!folio_test_ksm(folio) && 4048 (exclusive || folio_ref_count(folio) == 1)) { 4049 if (vmf->flags & FAULT_FLAG_WRITE) { 4050 pte = maybe_mkwrite(pte_mkdirty(pte), vma); 4051 vmf->flags &= ~FAULT_FLAG_WRITE; 4052 } 4053 rmap_flags |= RMAP_EXCLUSIVE; 4054 } 4055 flush_icache_page(vma, page); 4056 if (pte_swp_soft_dirty(vmf->orig_pte)) 4057 pte = pte_mksoft_dirty(pte); 4058 if (pte_swp_uffd_wp(vmf->orig_pte)) 4059 pte = pte_mkuffd_wp(pte); 4060 vmf->orig_pte = pte; 4061 4062 /* ksm created a completely new copy */ 4063 if (unlikely(folio != swapcache && swapcache)) { 4064 page_add_new_anon_rmap(page, vma, vmf->address); 4065 folio_add_lru_vma(folio, vma); 4066 } else { 4067 page_add_anon_rmap(page, vma, vmf->address, rmap_flags); 4068 } 4069 4070 VM_BUG_ON(!folio_test_anon(folio) || 4071 (pte_write(pte) && !PageAnonExclusive(page))); 4072 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); 4073 arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte); 4074 4075 folio_unlock(folio); 4076 if (folio != swapcache && swapcache) { 4077 /* 4078 * Hold the lock to avoid the swap entry to be reused 4079 * until we take the PT lock for the pte_same() check 4080 * (to avoid false positives from pte_same). For 4081 * further safety release the lock after the swap_free 4082 * so that the swap count won't change under a 4083 * parallel locked swapcache. 4084 */ 4085 folio_unlock(swapcache); 4086 folio_put(swapcache); 4087 } 4088 4089 if (vmf->flags & FAULT_FLAG_WRITE) { 4090 ret |= do_wp_page(vmf); 4091 if (ret & VM_FAULT_ERROR) 4092 ret &= VM_FAULT_ERROR; 4093 goto out; 4094 } 4095 4096 /* No need to invalidate - it was non-present before */ 4097 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); 4098 unlock: 4099 if (vmf->pte) 4100 pte_unmap_unlock(vmf->pte, vmf->ptl); 4101 out: 4102 if (si) 4103 put_swap_device(si); 4104 return ret; 4105 out_nomap: 4106 if (vmf->pte) 4107 pte_unmap_unlock(vmf->pte, vmf->ptl); 4108 out_page: 4109 folio_unlock(folio); 4110 out_release: 4111 folio_put(folio); 4112 if (folio != swapcache && swapcache) { 4113 folio_unlock(swapcache); 4114 folio_put(swapcache); 4115 } 4116 if (si) 4117 put_swap_device(si); 4118 return ret; 4119 } 4120 4121 /* 4122 * We enter with non-exclusive mmap_lock (to exclude vma changes, 4123 * but allow concurrent faults), and pte mapped but not yet locked. 4124 * We return with mmap_lock still held, but pte unmapped and unlocked. 4125 */ 4126 static vm_fault_t do_anonymous_page(struct vm_fault *vmf) 4127 { 4128 bool uffd_wp = vmf_orig_pte_uffd_wp(vmf); 4129 struct vm_area_struct *vma = vmf->vma; 4130 struct folio *folio; 4131 vm_fault_t ret = 0; 4132 pte_t entry; 4133 4134 /* File mapping without ->vm_ops ? */ 4135 if (vma->vm_flags & VM_SHARED) 4136 return VM_FAULT_SIGBUS; 4137 4138 /* 4139 * Use pte_alloc() instead of pte_alloc_map(), so that OOM can 4140 * be distinguished from a transient failure of pte_offset_map(). 4141 */ 4142 if (pte_alloc(vma->vm_mm, vmf->pmd)) 4143 return VM_FAULT_OOM; 4144 4145 /* Use the zero-page for reads */ 4146 if (!(vmf->flags & FAULT_FLAG_WRITE) && 4147 !mm_forbids_zeropage(vma->vm_mm)) { 4148 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address), 4149 vma->vm_page_prot)); 4150 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 4151 vmf->address, &vmf->ptl); 4152 if (!vmf->pte) 4153 goto unlock; 4154 if (vmf_pte_changed(vmf)) { 4155 update_mmu_tlb(vma, vmf->address, vmf->pte); 4156 goto unlock; 4157 } 4158 ret = check_stable_address_space(vma->vm_mm); 4159 if (ret) 4160 goto unlock; 4161 /* Deliver the page fault to userland, check inside PT lock */ 4162 if (userfaultfd_missing(vma)) { 4163 pte_unmap_unlock(vmf->pte, vmf->ptl); 4164 return handle_userfault(vmf, VM_UFFD_MISSING); 4165 } 4166 goto setpte; 4167 } 4168 4169 /* Allocate our own private page. */ 4170 if (unlikely(anon_vma_prepare(vma))) 4171 goto oom; 4172 folio = vma_alloc_zeroed_movable_folio(vma, vmf->address); 4173 if (!folio) 4174 goto oom; 4175 4176 if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL)) 4177 goto oom_free_page; 4178 folio_throttle_swaprate(folio, GFP_KERNEL); 4179 4180 /* 4181 * The memory barrier inside __folio_mark_uptodate makes sure that 4182 * preceding stores to the page contents become visible before 4183 * the set_pte_at() write. 4184 */ 4185 __folio_mark_uptodate(folio); 4186 4187 entry = mk_pte(&folio->page, vma->vm_page_prot); 4188 entry = pte_sw_mkyoung(entry); 4189 if (vma->vm_flags & VM_WRITE) 4190 entry = pte_mkwrite(pte_mkdirty(entry), vma); 4191 4192 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, 4193 &vmf->ptl); 4194 if (!vmf->pte) 4195 goto release; 4196 if (vmf_pte_changed(vmf)) { 4197 update_mmu_tlb(vma, vmf->address, vmf->pte); 4198 goto release; 4199 } 4200 4201 ret = check_stable_address_space(vma->vm_mm); 4202 if (ret) 4203 goto release; 4204 4205 /* Deliver the page fault to userland, check inside PT lock */ 4206 if (userfaultfd_missing(vma)) { 4207 pte_unmap_unlock(vmf->pte, vmf->ptl); 4208 folio_put(folio); 4209 return handle_userfault(vmf, VM_UFFD_MISSING); 4210 } 4211 4212 inc_mm_counter(vma->vm_mm, MM_ANONPAGES); 4213 folio_add_new_anon_rmap(folio, vma, vmf->address); 4214 folio_add_lru_vma(folio, vma); 4215 setpte: 4216 if (uffd_wp) 4217 entry = pte_mkuffd_wp(entry); 4218 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); 4219 4220 /* No need to invalidate - it was non-present before */ 4221 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); 4222 unlock: 4223 if (vmf->pte) 4224 pte_unmap_unlock(vmf->pte, vmf->ptl); 4225 return ret; 4226 release: 4227 folio_put(folio); 4228 goto unlock; 4229 oom_free_page: 4230 folio_put(folio); 4231 oom: 4232 return VM_FAULT_OOM; 4233 } 4234 4235 /* 4236 * The mmap_lock must have been held on entry, and may have been 4237 * released depending on flags and vma->vm_ops->fault() return value. 4238 * See filemap_fault() and __lock_page_retry(). 4239 */ 4240 static vm_fault_t __do_fault(struct vm_fault *vmf) 4241 { 4242 struct vm_area_struct *vma = vmf->vma; 4243 vm_fault_t ret; 4244 4245 /* 4246 * Preallocate pte before we take page_lock because this might lead to 4247 * deadlocks for memcg reclaim which waits for pages under writeback: 4248 * lock_page(A) 4249 * SetPageWriteback(A) 4250 * unlock_page(A) 4251 * lock_page(B) 4252 * lock_page(B) 4253 * pte_alloc_one 4254 * shrink_page_list 4255 * wait_on_page_writeback(A) 4256 * SetPageWriteback(B) 4257 * unlock_page(B) 4258 * # flush A, B to clear the writeback 4259 */ 4260 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) { 4261 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); 4262 if (!vmf->prealloc_pte) 4263 return VM_FAULT_OOM; 4264 } 4265 4266 ret = vma->vm_ops->fault(vmf); 4267 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY | 4268 VM_FAULT_DONE_COW))) 4269 return ret; 4270 4271 if (unlikely(PageHWPoison(vmf->page))) { 4272 struct page *page = vmf->page; 4273 vm_fault_t poisonret = VM_FAULT_HWPOISON; 4274 if (ret & VM_FAULT_LOCKED) { 4275 if (page_mapped(page)) 4276 unmap_mapping_pages(page_mapping(page), 4277 page->index, 1, false); 4278 /* Retry if a clean page was removed from the cache. */ 4279 if (invalidate_inode_page(page)) 4280 poisonret = VM_FAULT_NOPAGE; 4281 unlock_page(page); 4282 } 4283 put_page(page); 4284 vmf->page = NULL; 4285 return poisonret; 4286 } 4287 4288 if (unlikely(!(ret & VM_FAULT_LOCKED))) 4289 lock_page(vmf->page); 4290 else 4291 VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page); 4292 4293 return ret; 4294 } 4295 4296 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4297 static void deposit_prealloc_pte(struct vm_fault *vmf) 4298 { 4299 struct vm_area_struct *vma = vmf->vma; 4300 4301 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); 4302 /* 4303 * We are going to consume the prealloc table, 4304 * count that as nr_ptes. 4305 */ 4306 mm_inc_nr_ptes(vma->vm_mm); 4307 vmf->prealloc_pte = NULL; 4308 } 4309 4310 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) 4311 { 4312 struct vm_area_struct *vma = vmf->vma; 4313 bool write = vmf->flags & FAULT_FLAG_WRITE; 4314 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 4315 pmd_t entry; 4316 vm_fault_t ret = VM_FAULT_FALLBACK; 4317 4318 if (!transhuge_vma_suitable(vma, haddr)) 4319 return ret; 4320 4321 page = compound_head(page); 4322 if (compound_order(page) != HPAGE_PMD_ORDER) 4323 return ret; 4324 4325 /* 4326 * Just backoff if any subpage of a THP is corrupted otherwise 4327 * the corrupted page may mapped by PMD silently to escape the 4328 * check. This kind of THP just can be PTE mapped. Access to 4329 * the corrupted subpage should trigger SIGBUS as expected. 4330 */ 4331 if (unlikely(PageHasHWPoisoned(page))) 4332 return ret; 4333 4334 /* 4335 * Archs like ppc64 need additional space to store information 4336 * related to pte entry. Use the preallocated table for that. 4337 */ 4338 if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) { 4339 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); 4340 if (!vmf->prealloc_pte) 4341 return VM_FAULT_OOM; 4342 } 4343 4344 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 4345 if (unlikely(!pmd_none(*vmf->pmd))) 4346 goto out; 4347 4348 flush_icache_pages(vma, page, HPAGE_PMD_NR); 4349 4350 entry = mk_huge_pmd(page, vma->vm_page_prot); 4351 if (write) 4352 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 4353 4354 add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR); 4355 page_add_file_rmap(page, vma, true); 4356 4357 /* 4358 * deposit and withdraw with pmd lock held 4359 */ 4360 if (arch_needs_pgtable_deposit()) 4361 deposit_prealloc_pte(vmf); 4362 4363 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); 4364 4365 update_mmu_cache_pmd(vma, haddr, vmf->pmd); 4366 4367 /* fault is handled */ 4368 ret = 0; 4369 count_vm_event(THP_FILE_MAPPED); 4370 out: 4371 spin_unlock(vmf->ptl); 4372 return ret; 4373 } 4374 #else 4375 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) 4376 { 4377 return VM_FAULT_FALLBACK; 4378 } 4379 #endif 4380 4381 /** 4382 * set_pte_range - Set a range of PTEs to point to pages in a folio. 4383 * @vmf: Fault decription. 4384 * @folio: The folio that contains @page. 4385 * @page: The first page to create a PTE for. 4386 * @nr: The number of PTEs to create. 4387 * @addr: The first address to create a PTE for. 4388 */ 4389 void set_pte_range(struct vm_fault *vmf, struct folio *folio, 4390 struct page *page, unsigned int nr, unsigned long addr) 4391 { 4392 struct vm_area_struct *vma = vmf->vma; 4393 bool uffd_wp = vmf_orig_pte_uffd_wp(vmf); 4394 bool write = vmf->flags & FAULT_FLAG_WRITE; 4395 bool prefault = in_range(vmf->address, addr, nr * PAGE_SIZE); 4396 pte_t entry; 4397 4398 flush_icache_pages(vma, page, nr); 4399 entry = mk_pte(page, vma->vm_page_prot); 4400 4401 if (prefault && arch_wants_old_prefaulted_pte()) 4402 entry = pte_mkold(entry); 4403 else 4404 entry = pte_sw_mkyoung(entry); 4405 4406 if (write) 4407 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 4408 if (unlikely(uffd_wp)) 4409 entry = pte_mkuffd_wp(entry); 4410 /* copy-on-write page */ 4411 if (write && !(vma->vm_flags & VM_SHARED)) { 4412 add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr); 4413 VM_BUG_ON_FOLIO(nr != 1, folio); 4414 folio_add_new_anon_rmap(folio, vma, addr); 4415 folio_add_lru_vma(folio, vma); 4416 } else { 4417 add_mm_counter(vma->vm_mm, mm_counter_file(page), nr); 4418 folio_add_file_rmap_range(folio, page, nr, vma, false); 4419 } 4420 set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr); 4421 4422 /* no need to invalidate: a not-present page won't be cached */ 4423 update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr); 4424 } 4425 4426 static bool vmf_pte_changed(struct vm_fault *vmf) 4427 { 4428 if (vmf->flags & FAULT_FLAG_ORIG_PTE_VALID) 4429 return !pte_same(ptep_get(vmf->pte), vmf->orig_pte); 4430 4431 return !pte_none(ptep_get(vmf->pte)); 4432 } 4433 4434 /** 4435 * finish_fault - finish page fault once we have prepared the page to fault 4436 * 4437 * @vmf: structure describing the fault 4438 * 4439 * This function handles all that is needed to finish a page fault once the 4440 * page to fault in is prepared. It handles locking of PTEs, inserts PTE for 4441 * given page, adds reverse page mapping, handles memcg charges and LRU 4442 * addition. 4443 * 4444 * The function expects the page to be locked and on success it consumes a 4445 * reference of a page being mapped (for the PTE which maps it). 4446 * 4447 * Return: %0 on success, %VM_FAULT_ code in case of error. 4448 */ 4449 vm_fault_t finish_fault(struct vm_fault *vmf) 4450 { 4451 struct vm_area_struct *vma = vmf->vma; 4452 struct page *page; 4453 vm_fault_t ret; 4454 4455 /* Did we COW the page? */ 4456 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) 4457 page = vmf->cow_page; 4458 else 4459 page = vmf->page; 4460 4461 /* 4462 * check even for read faults because we might have lost our CoWed 4463 * page 4464 */ 4465 if (!(vma->vm_flags & VM_SHARED)) { 4466 ret = check_stable_address_space(vma->vm_mm); 4467 if (ret) 4468 return ret; 4469 } 4470 4471 if (pmd_none(*vmf->pmd)) { 4472 if (PageTransCompound(page)) { 4473 ret = do_set_pmd(vmf, page); 4474 if (ret != VM_FAULT_FALLBACK) 4475 return ret; 4476 } 4477 4478 if (vmf->prealloc_pte) 4479 pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte); 4480 else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) 4481 return VM_FAULT_OOM; 4482 } 4483 4484 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 4485 vmf->address, &vmf->ptl); 4486 if (!vmf->pte) 4487 return VM_FAULT_NOPAGE; 4488 4489 /* Re-check under ptl */ 4490 if (likely(!vmf_pte_changed(vmf))) { 4491 struct folio *folio = page_folio(page); 4492 4493 set_pte_range(vmf, folio, page, 1, vmf->address); 4494 ret = 0; 4495 } else { 4496 update_mmu_tlb(vma, vmf->address, vmf->pte); 4497 ret = VM_FAULT_NOPAGE; 4498 } 4499 4500 pte_unmap_unlock(vmf->pte, vmf->ptl); 4501 return ret; 4502 } 4503 4504 static unsigned long fault_around_pages __read_mostly = 4505 65536 >> PAGE_SHIFT; 4506 4507 #ifdef CONFIG_DEBUG_FS 4508 static int fault_around_bytes_get(void *data, u64 *val) 4509 { 4510 *val = fault_around_pages << PAGE_SHIFT; 4511 return 0; 4512 } 4513 4514 /* 4515 * fault_around_bytes must be rounded down to the nearest page order as it's 4516 * what do_fault_around() expects to see. 4517 */ 4518 static int fault_around_bytes_set(void *data, u64 val) 4519 { 4520 if (val / PAGE_SIZE > PTRS_PER_PTE) 4521 return -EINVAL; 4522 4523 /* 4524 * The minimum value is 1 page, however this results in no fault-around 4525 * at all. See should_fault_around(). 4526 */ 4527 fault_around_pages = max(rounddown_pow_of_two(val) >> PAGE_SHIFT, 1UL); 4528 4529 return 0; 4530 } 4531 DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops, 4532 fault_around_bytes_get, fault_around_bytes_set, "%llu\n"); 4533 4534 static int __init fault_around_debugfs(void) 4535 { 4536 debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL, 4537 &fault_around_bytes_fops); 4538 return 0; 4539 } 4540 late_initcall(fault_around_debugfs); 4541 #endif 4542 4543 /* 4544 * do_fault_around() tries to map few pages around the fault address. The hope 4545 * is that the pages will be needed soon and this will lower the number of 4546 * faults to handle. 4547 * 4548 * It uses vm_ops->map_pages() to map the pages, which skips the page if it's 4549 * not ready to be mapped: not up-to-date, locked, etc. 4550 * 4551 * This function doesn't cross VMA or page table boundaries, in order to call 4552 * map_pages() and acquire a PTE lock only once. 4553 * 4554 * fault_around_pages defines how many pages we'll try to map. 4555 * do_fault_around() expects it to be set to a power of two less than or equal 4556 * to PTRS_PER_PTE. 4557 * 4558 * The virtual address of the area that we map is naturally aligned to 4559 * fault_around_pages * PAGE_SIZE rounded down to the machine page size 4560 * (and therefore to page order). This way it's easier to guarantee 4561 * that we don't cross page table boundaries. 4562 */ 4563 static vm_fault_t do_fault_around(struct vm_fault *vmf) 4564 { 4565 pgoff_t nr_pages = READ_ONCE(fault_around_pages); 4566 pgoff_t pte_off = pte_index(vmf->address); 4567 /* The page offset of vmf->address within the VMA. */ 4568 pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff; 4569 pgoff_t from_pte, to_pte; 4570 vm_fault_t ret; 4571 4572 /* The PTE offset of the start address, clamped to the VMA. */ 4573 from_pte = max(ALIGN_DOWN(pte_off, nr_pages), 4574 pte_off - min(pte_off, vma_off)); 4575 4576 /* The PTE offset of the end address, clamped to the VMA and PTE. */ 4577 to_pte = min3(from_pte + nr_pages, (pgoff_t)PTRS_PER_PTE, 4578 pte_off + vma_pages(vmf->vma) - vma_off) - 1; 4579 4580 if (pmd_none(*vmf->pmd)) { 4581 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); 4582 if (!vmf->prealloc_pte) 4583 return VM_FAULT_OOM; 4584 } 4585 4586 rcu_read_lock(); 4587 ret = vmf->vma->vm_ops->map_pages(vmf, 4588 vmf->pgoff + from_pte - pte_off, 4589 vmf->pgoff + to_pte - pte_off); 4590 rcu_read_unlock(); 4591 4592 return ret; 4593 } 4594 4595 /* Return true if we should do read fault-around, false otherwise */ 4596 static inline bool should_fault_around(struct vm_fault *vmf) 4597 { 4598 /* No ->map_pages? No way to fault around... */ 4599 if (!vmf->vma->vm_ops->map_pages) 4600 return false; 4601 4602 if (uffd_disable_fault_around(vmf->vma)) 4603 return false; 4604 4605 /* A single page implies no faulting 'around' at all. */ 4606 return fault_around_pages > 1; 4607 } 4608 4609 static vm_fault_t do_read_fault(struct vm_fault *vmf) 4610 { 4611 vm_fault_t ret = 0; 4612 struct folio *folio; 4613 4614 /* 4615 * Let's call ->map_pages() first and use ->fault() as fallback 4616 * if page by the offset is not ready to be mapped (cold cache or 4617 * something). 4618 */ 4619 if (should_fault_around(vmf)) { 4620 ret = do_fault_around(vmf); 4621 if (ret) 4622 return ret; 4623 } 4624 4625 ret = vmf_can_call_fault(vmf); 4626 if (ret) 4627 return ret; 4628 4629 ret = __do_fault(vmf); 4630 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 4631 return ret; 4632 4633 ret |= finish_fault(vmf); 4634 folio = page_folio(vmf->page); 4635 folio_unlock(folio); 4636 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 4637 folio_put(folio); 4638 return ret; 4639 } 4640 4641 static vm_fault_t do_cow_fault(struct vm_fault *vmf) 4642 { 4643 struct vm_area_struct *vma = vmf->vma; 4644 vm_fault_t ret; 4645 4646 ret = vmf_can_call_fault(vmf); 4647 if (!ret) 4648 ret = vmf_anon_prepare(vmf); 4649 if (ret) 4650 return ret; 4651 4652 vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); 4653 if (!vmf->cow_page) 4654 return VM_FAULT_OOM; 4655 4656 if (mem_cgroup_charge(page_folio(vmf->cow_page), vma->vm_mm, 4657 GFP_KERNEL)) { 4658 put_page(vmf->cow_page); 4659 return VM_FAULT_OOM; 4660 } 4661 folio_throttle_swaprate(page_folio(vmf->cow_page), GFP_KERNEL); 4662 4663 ret = __do_fault(vmf); 4664 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 4665 goto uncharge_out; 4666 if (ret & VM_FAULT_DONE_COW) 4667 return ret; 4668 4669 copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); 4670 __SetPageUptodate(vmf->cow_page); 4671 4672 ret |= finish_fault(vmf); 4673 unlock_page(vmf->page); 4674 put_page(vmf->page); 4675 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 4676 goto uncharge_out; 4677 return ret; 4678 uncharge_out: 4679 put_page(vmf->cow_page); 4680 return ret; 4681 } 4682 4683 static vm_fault_t do_shared_fault(struct vm_fault *vmf) 4684 { 4685 struct vm_area_struct *vma = vmf->vma; 4686 vm_fault_t ret, tmp; 4687 struct folio *folio; 4688 4689 ret = vmf_can_call_fault(vmf); 4690 if (ret) 4691 return ret; 4692 4693 ret = __do_fault(vmf); 4694 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 4695 return ret; 4696 4697 folio = page_folio(vmf->page); 4698 4699 /* 4700 * Check if the backing address space wants to know that the page is 4701 * about to become writable 4702 */ 4703 if (vma->vm_ops->page_mkwrite) { 4704 folio_unlock(folio); 4705 tmp = do_page_mkwrite(vmf, folio); 4706 if (unlikely(!tmp || 4707 (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { 4708 folio_put(folio); 4709 return tmp; 4710 } 4711 } 4712 4713 ret |= finish_fault(vmf); 4714 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | 4715 VM_FAULT_RETRY))) { 4716 folio_unlock(folio); 4717 folio_put(folio); 4718 return ret; 4719 } 4720 4721 ret |= fault_dirty_shared_page(vmf); 4722 return ret; 4723 } 4724 4725 /* 4726 * We enter with non-exclusive mmap_lock (to exclude vma changes, 4727 * but allow concurrent faults). 4728 * The mmap_lock may have been released depending on flags and our 4729 * return value. See filemap_fault() and __folio_lock_or_retry(). 4730 * If mmap_lock is released, vma may become invalid (for example 4731 * by other thread calling munmap()). 4732 */ 4733 static vm_fault_t do_fault(struct vm_fault *vmf) 4734 { 4735 struct vm_area_struct *vma = vmf->vma; 4736 struct mm_struct *vm_mm = vma->vm_mm; 4737 vm_fault_t ret; 4738 4739 /* 4740 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND 4741 */ 4742 if (!vma->vm_ops->fault) { 4743 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, 4744 vmf->address, &vmf->ptl); 4745 if (unlikely(!vmf->pte)) 4746 ret = VM_FAULT_SIGBUS; 4747 else { 4748 /* 4749 * Make sure this is not a temporary clearing of pte 4750 * by holding ptl and checking again. A R/M/W update 4751 * of pte involves: take ptl, clearing the pte so that 4752 * we don't have concurrent modification by hardware 4753 * followed by an update. 4754 */ 4755 if (unlikely(pte_none(ptep_get(vmf->pte)))) 4756 ret = VM_FAULT_SIGBUS; 4757 else 4758 ret = VM_FAULT_NOPAGE; 4759 4760 pte_unmap_unlock(vmf->pte, vmf->ptl); 4761 } 4762 } else if (!(vmf->flags & FAULT_FLAG_WRITE)) 4763 ret = do_read_fault(vmf); 4764 else if (!(vma->vm_flags & VM_SHARED)) 4765 ret = do_cow_fault(vmf); 4766 else 4767 ret = do_shared_fault(vmf); 4768 4769 /* preallocated pagetable is unused: free it */ 4770 if (vmf->prealloc_pte) { 4771 pte_free(vm_mm, vmf->prealloc_pte); 4772 vmf->prealloc_pte = NULL; 4773 } 4774 return ret; 4775 } 4776 4777 int numa_migrate_prep(struct folio *folio, struct vm_area_struct *vma, 4778 unsigned long addr, int page_nid, int *flags) 4779 { 4780 folio_get(folio); 4781 4782 /* Record the current PID acceesing VMA */ 4783 vma_set_access_pid_bit(vma); 4784 4785 count_vm_numa_event(NUMA_HINT_FAULTS); 4786 if (page_nid == numa_node_id()) { 4787 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); 4788 *flags |= TNF_FAULT_LOCAL; 4789 } 4790 4791 return mpol_misplaced(folio, vma, addr); 4792 } 4793 4794 static vm_fault_t do_numa_page(struct vm_fault *vmf) 4795 { 4796 struct vm_area_struct *vma = vmf->vma; 4797 struct folio *folio = NULL; 4798 int nid = NUMA_NO_NODE; 4799 bool writable = false; 4800 int last_cpupid; 4801 int target_nid; 4802 pte_t pte, old_pte; 4803 int flags = 0; 4804 4805 /* 4806 * The "pte" at this point cannot be used safely without 4807 * validation through pte_unmap_same(). It's of NUMA type but 4808 * the pfn may be screwed if the read is non atomic. 4809 */ 4810 spin_lock(vmf->ptl); 4811 if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { 4812 pte_unmap_unlock(vmf->pte, vmf->ptl); 4813 goto out; 4814 } 4815 4816 /* Get the normal PTE */ 4817 old_pte = ptep_get(vmf->pte); 4818 pte = pte_modify(old_pte, vma->vm_page_prot); 4819 4820 /* 4821 * Detect now whether the PTE could be writable; this information 4822 * is only valid while holding the PT lock. 4823 */ 4824 writable = pte_write(pte); 4825 if (!writable && vma_wants_manual_pte_write_upgrade(vma) && 4826 can_change_pte_writable(vma, vmf->address, pte)) 4827 writable = true; 4828 4829 folio = vm_normal_folio(vma, vmf->address, pte); 4830 if (!folio || folio_is_zone_device(folio)) 4831 goto out_map; 4832 4833 /* TODO: handle PTE-mapped THP */ 4834 if (folio_test_large(folio)) 4835 goto out_map; 4836 4837 /* 4838 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as 4839 * much anyway since they can be in shared cache state. This misses 4840 * the case where a mapping is writable but the process never writes 4841 * to it but pte_write gets cleared during protection updates and 4842 * pte_dirty has unpredictable behaviour between PTE scan updates, 4843 * background writeback, dirty balancing and application behaviour. 4844 */ 4845 if (!writable) 4846 flags |= TNF_NO_GROUP; 4847 4848 /* 4849 * Flag if the folio is shared between multiple address spaces. This 4850 * is later used when determining whether to group tasks together 4851 */ 4852 if (folio_estimated_sharers(folio) > 1 && (vma->vm_flags & VM_SHARED)) 4853 flags |= TNF_SHARED; 4854 4855 nid = folio_nid(folio); 4856 /* 4857 * For memory tiering mode, cpupid of slow memory page is used 4858 * to record page access time. So use default value. 4859 */ 4860 if ((sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) && 4861 !node_is_toptier(nid)) 4862 last_cpupid = (-1 & LAST_CPUPID_MASK); 4863 else 4864 last_cpupid = folio_last_cpupid(folio); 4865 target_nid = numa_migrate_prep(folio, vma, vmf->address, nid, &flags); 4866 if (target_nid == NUMA_NO_NODE) { 4867 folio_put(folio); 4868 goto out_map; 4869 } 4870 pte_unmap_unlock(vmf->pte, vmf->ptl); 4871 writable = false; 4872 4873 /* Migrate to the requested node */ 4874 if (migrate_misplaced_folio(folio, vma, target_nid)) { 4875 nid = target_nid; 4876 flags |= TNF_MIGRATED; 4877 } else { 4878 flags |= TNF_MIGRATE_FAIL; 4879 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 4880 vmf->address, &vmf->ptl); 4881 if (unlikely(!vmf->pte)) 4882 goto out; 4883 if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { 4884 pte_unmap_unlock(vmf->pte, vmf->ptl); 4885 goto out; 4886 } 4887 goto out_map; 4888 } 4889 4890 out: 4891 if (nid != NUMA_NO_NODE) 4892 task_numa_fault(last_cpupid, nid, 1, flags); 4893 return 0; 4894 out_map: 4895 /* 4896 * Make it present again, depending on how arch implements 4897 * non-accessible ptes, some can allow access by kernel mode. 4898 */ 4899 old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte); 4900 pte = pte_modify(old_pte, vma->vm_page_prot); 4901 pte = pte_mkyoung(pte); 4902 if (writable) 4903 pte = pte_mkwrite(pte, vma); 4904 ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte); 4905 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); 4906 pte_unmap_unlock(vmf->pte, vmf->ptl); 4907 goto out; 4908 } 4909 4910 static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf) 4911 { 4912 struct vm_area_struct *vma = vmf->vma; 4913 if (vma_is_anonymous(vma)) 4914 return do_huge_pmd_anonymous_page(vmf); 4915 if (vma->vm_ops->huge_fault) 4916 return vma->vm_ops->huge_fault(vmf, PMD_ORDER); 4917 return VM_FAULT_FALLBACK; 4918 } 4919 4920 /* `inline' is required to avoid gcc 4.1.2 build error */ 4921 static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf) 4922 { 4923 struct vm_area_struct *vma = vmf->vma; 4924 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; 4925 vm_fault_t ret; 4926 4927 if (vma_is_anonymous(vma)) { 4928 if (likely(!unshare) && 4929 userfaultfd_huge_pmd_wp(vma, vmf->orig_pmd)) { 4930 if (userfaultfd_wp_async(vmf->vma)) 4931 goto split; 4932 return handle_userfault(vmf, VM_UFFD_WP); 4933 } 4934 return do_huge_pmd_wp_page(vmf); 4935 } 4936 4937 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { 4938 if (vma->vm_ops->huge_fault) { 4939 ret = vma->vm_ops->huge_fault(vmf, PMD_ORDER); 4940 if (!(ret & VM_FAULT_FALLBACK)) 4941 return ret; 4942 } 4943 } 4944 4945 split: 4946 /* COW or write-notify handled on pte level: split pmd. */ 4947 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL); 4948 4949 return VM_FAULT_FALLBACK; 4950 } 4951 4952 static vm_fault_t create_huge_pud(struct vm_fault *vmf) 4953 { 4954 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ 4955 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 4956 struct vm_area_struct *vma = vmf->vma; 4957 /* No support for anonymous transparent PUD pages yet */ 4958 if (vma_is_anonymous(vma)) 4959 return VM_FAULT_FALLBACK; 4960 if (vma->vm_ops->huge_fault) 4961 return vma->vm_ops->huge_fault(vmf, PUD_ORDER); 4962 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 4963 return VM_FAULT_FALLBACK; 4964 } 4965 4966 static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud) 4967 { 4968 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ 4969 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 4970 struct vm_area_struct *vma = vmf->vma; 4971 vm_fault_t ret; 4972 4973 /* No support for anonymous transparent PUD pages yet */ 4974 if (vma_is_anonymous(vma)) 4975 goto split; 4976 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { 4977 if (vma->vm_ops->huge_fault) { 4978 ret = vma->vm_ops->huge_fault(vmf, PUD_ORDER); 4979 if (!(ret & VM_FAULT_FALLBACK)) 4980 return ret; 4981 } 4982 } 4983 split: 4984 /* COW or write-notify not handled on PUD level: split pud.*/ 4985 __split_huge_pud(vma, vmf->pud, vmf->address); 4986 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 4987 return VM_FAULT_FALLBACK; 4988 } 4989 4990 /* 4991 * These routines also need to handle stuff like marking pages dirty 4992 * and/or accessed for architectures that don't do it in hardware (most 4993 * RISC architectures). The early dirtying is also good on the i386. 4994 * 4995 * There is also a hook called "update_mmu_cache()" that architectures 4996 * with external mmu caches can use to update those (ie the Sparc or 4997 * PowerPC hashed page tables that act as extended TLBs). 4998 * 4999 * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow 5000 * concurrent faults). 5001 * 5002 * The mmap_lock may have been released depending on flags and our return value. 5003 * See filemap_fault() and __folio_lock_or_retry(). 5004 */ 5005 static vm_fault_t handle_pte_fault(struct vm_fault *vmf) 5006 { 5007 pte_t entry; 5008 5009 if (unlikely(pmd_none(*vmf->pmd))) { 5010 /* 5011 * Leave __pte_alloc() until later: because vm_ops->fault may 5012 * want to allocate huge page, and if we expose page table 5013 * for an instant, it will be difficult to retract from 5014 * concurrent faults and from rmap lookups. 5015 */ 5016 vmf->pte = NULL; 5017 vmf->flags &= ~FAULT_FLAG_ORIG_PTE_VALID; 5018 } else { 5019 /* 5020 * A regular pmd is established and it can't morph into a huge 5021 * pmd by anon khugepaged, since that takes mmap_lock in write 5022 * mode; but shmem or file collapse to THP could still morph 5023 * it into a huge pmd: just retry later if so. 5024 */ 5025 vmf->pte = pte_offset_map_nolock(vmf->vma->vm_mm, vmf->pmd, 5026 vmf->address, &vmf->ptl); 5027 if (unlikely(!vmf->pte)) 5028 return 0; 5029 vmf->orig_pte = ptep_get_lockless(vmf->pte); 5030 vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID; 5031 5032 if (pte_none(vmf->orig_pte)) { 5033 pte_unmap(vmf->pte); 5034 vmf->pte = NULL; 5035 } 5036 } 5037 5038 if (!vmf->pte) 5039 return do_pte_missing(vmf); 5040 5041 if (!pte_present(vmf->orig_pte)) 5042 return do_swap_page(vmf); 5043 5044 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) 5045 return do_numa_page(vmf); 5046 5047 spin_lock(vmf->ptl); 5048 entry = vmf->orig_pte; 5049 if (unlikely(!pte_same(ptep_get(vmf->pte), entry))) { 5050 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); 5051 goto unlock; 5052 } 5053 if (vmf->flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) { 5054 if (!pte_write(entry)) 5055 return do_wp_page(vmf); 5056 else if (likely(vmf->flags & FAULT_FLAG_WRITE)) 5057 entry = pte_mkdirty(entry); 5058 } 5059 entry = pte_mkyoung(entry); 5060 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry, 5061 vmf->flags & FAULT_FLAG_WRITE)) { 5062 update_mmu_cache_range(vmf, vmf->vma, vmf->address, 5063 vmf->pte, 1); 5064 } else { 5065 /* Skip spurious TLB flush for retried page fault */ 5066 if (vmf->flags & FAULT_FLAG_TRIED) 5067 goto unlock; 5068 /* 5069 * This is needed only for protection faults but the arch code 5070 * is not yet telling us if this is a protection fault or not. 5071 * This still avoids useless tlb flushes for .text page faults 5072 * with threads. 5073 */ 5074 if (vmf->flags & FAULT_FLAG_WRITE) 5075 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address, 5076 vmf->pte); 5077 } 5078 unlock: 5079 pte_unmap_unlock(vmf->pte, vmf->ptl); 5080 return 0; 5081 } 5082 5083 /* 5084 * On entry, we hold either the VMA lock or the mmap_lock 5085 * (FAULT_FLAG_VMA_LOCK tells you which). If VM_FAULT_RETRY is set in 5086 * the result, the mmap_lock is not held on exit. See filemap_fault() 5087 * and __folio_lock_or_retry(). 5088 */ 5089 static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, 5090 unsigned long address, unsigned int flags) 5091 { 5092 struct vm_fault vmf = { 5093 .vma = vma, 5094 .address = address & PAGE_MASK, 5095 .real_address = address, 5096 .flags = flags, 5097 .pgoff = linear_page_index(vma, address), 5098 .gfp_mask = __get_fault_gfp_mask(vma), 5099 }; 5100 struct mm_struct *mm = vma->vm_mm; 5101 unsigned long vm_flags = vma->vm_flags; 5102 pgd_t *pgd; 5103 p4d_t *p4d; 5104 vm_fault_t ret; 5105 5106 pgd = pgd_offset(mm, address); 5107 p4d = p4d_alloc(mm, pgd, address); 5108 if (!p4d) 5109 return VM_FAULT_OOM; 5110 5111 vmf.pud = pud_alloc(mm, p4d, address); 5112 if (!vmf.pud) 5113 return VM_FAULT_OOM; 5114 retry_pud: 5115 if (pud_none(*vmf.pud) && 5116 hugepage_vma_check(vma, vm_flags, false, true, true)) { 5117 ret = create_huge_pud(&vmf); 5118 if (!(ret & VM_FAULT_FALLBACK)) 5119 return ret; 5120 } else { 5121 pud_t orig_pud = *vmf.pud; 5122 5123 barrier(); 5124 if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) { 5125 5126 /* 5127 * TODO once we support anonymous PUDs: NUMA case and 5128 * FAULT_FLAG_UNSHARE handling. 5129 */ 5130 if ((flags & FAULT_FLAG_WRITE) && !pud_write(orig_pud)) { 5131 ret = wp_huge_pud(&vmf, orig_pud); 5132 if (!(ret & VM_FAULT_FALLBACK)) 5133 return ret; 5134 } else { 5135 huge_pud_set_accessed(&vmf, orig_pud); 5136 return 0; 5137 } 5138 } 5139 } 5140 5141 vmf.pmd = pmd_alloc(mm, vmf.pud, address); 5142 if (!vmf.pmd) 5143 return VM_FAULT_OOM; 5144 5145 /* Huge pud page fault raced with pmd_alloc? */ 5146 if (pud_trans_unstable(vmf.pud)) 5147 goto retry_pud; 5148 5149 if (pmd_none(*vmf.pmd) && 5150 hugepage_vma_check(vma, vm_flags, false, true, true)) { 5151 ret = create_huge_pmd(&vmf); 5152 if (!(ret & VM_FAULT_FALLBACK)) 5153 return ret; 5154 } else { 5155 vmf.orig_pmd = pmdp_get_lockless(vmf.pmd); 5156 5157 if (unlikely(is_swap_pmd(vmf.orig_pmd))) { 5158 VM_BUG_ON(thp_migration_supported() && 5159 !is_pmd_migration_entry(vmf.orig_pmd)); 5160 if (is_pmd_migration_entry(vmf.orig_pmd)) 5161 pmd_migration_entry_wait(mm, vmf.pmd); 5162 return 0; 5163 } 5164 if (pmd_trans_huge(vmf.orig_pmd) || pmd_devmap(vmf.orig_pmd)) { 5165 if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma)) 5166 return do_huge_pmd_numa_page(&vmf); 5167 5168 if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) && 5169 !pmd_write(vmf.orig_pmd)) { 5170 ret = wp_huge_pmd(&vmf); 5171 if (!(ret & VM_FAULT_FALLBACK)) 5172 return ret; 5173 } else { 5174 huge_pmd_set_accessed(&vmf); 5175 return 0; 5176 } 5177 } 5178 } 5179 5180 return handle_pte_fault(&vmf); 5181 } 5182 5183 /** 5184 * mm_account_fault - Do page fault accounting 5185 * @mm: mm from which memcg should be extracted. It can be NULL. 5186 * @regs: the pt_regs struct pointer. When set to NULL, will skip accounting 5187 * of perf event counters, but we'll still do the per-task accounting to 5188 * the task who triggered this page fault. 5189 * @address: the faulted address. 5190 * @flags: the fault flags. 5191 * @ret: the fault retcode. 5192 * 5193 * This will take care of most of the page fault accounting. Meanwhile, it 5194 * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter 5195 * updates. However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should 5196 * still be in per-arch page fault handlers at the entry of page fault. 5197 */ 5198 static inline void mm_account_fault(struct mm_struct *mm, struct pt_regs *regs, 5199 unsigned long address, unsigned int flags, 5200 vm_fault_t ret) 5201 { 5202 bool major; 5203 5204 /* Incomplete faults will be accounted upon completion. */ 5205 if (ret & VM_FAULT_RETRY) 5206 return; 5207 5208 /* 5209 * To preserve the behavior of older kernels, PGFAULT counters record 5210 * both successful and failed faults, as opposed to perf counters, 5211 * which ignore failed cases. 5212 */ 5213 count_vm_event(PGFAULT); 5214 count_memcg_event_mm(mm, PGFAULT); 5215 5216 /* 5217 * Do not account for unsuccessful faults (e.g. when the address wasn't 5218 * valid). That includes arch_vma_access_permitted() failing before 5219 * reaching here. So this is not a "this many hardware page faults" 5220 * counter. We should use the hw profiling for that. 5221 */ 5222 if (ret & VM_FAULT_ERROR) 5223 return; 5224 5225 /* 5226 * We define the fault as a major fault when the final successful fault 5227 * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't 5228 * handle it immediately previously). 5229 */ 5230 major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED); 5231 5232 if (major) 5233 current->maj_flt++; 5234 else 5235 current->min_flt++; 5236 5237 /* 5238 * If the fault is done for GUP, regs will be NULL. We only do the 5239 * accounting for the per thread fault counters who triggered the 5240 * fault, and we skip the perf event updates. 5241 */ 5242 if (!regs) 5243 return; 5244 5245 if (major) 5246 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); 5247 else 5248 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); 5249 } 5250 5251 #ifdef CONFIG_LRU_GEN 5252 static void lru_gen_enter_fault(struct vm_area_struct *vma) 5253 { 5254 /* the LRU algorithm only applies to accesses with recency */ 5255 current->in_lru_fault = vma_has_recency(vma); 5256 } 5257 5258 static void lru_gen_exit_fault(void) 5259 { 5260 current->in_lru_fault = false; 5261 } 5262 #else 5263 static void lru_gen_enter_fault(struct vm_area_struct *vma) 5264 { 5265 } 5266 5267 static void lru_gen_exit_fault(void) 5268 { 5269 } 5270 #endif /* CONFIG_LRU_GEN */ 5271 5272 static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma, 5273 unsigned int *flags) 5274 { 5275 if (unlikely(*flags & FAULT_FLAG_UNSHARE)) { 5276 if (WARN_ON_ONCE(*flags & FAULT_FLAG_WRITE)) 5277 return VM_FAULT_SIGSEGV; 5278 /* 5279 * FAULT_FLAG_UNSHARE only applies to COW mappings. Let's 5280 * just treat it like an ordinary read-fault otherwise. 5281 */ 5282 if (!is_cow_mapping(vma->vm_flags)) 5283 *flags &= ~FAULT_FLAG_UNSHARE; 5284 } else if (*flags & FAULT_FLAG_WRITE) { 5285 /* Write faults on read-only mappings are impossible ... */ 5286 if (WARN_ON_ONCE(!(vma->vm_flags & VM_MAYWRITE))) 5287 return VM_FAULT_SIGSEGV; 5288 /* ... and FOLL_FORCE only applies to COW mappings. */ 5289 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE) && 5290 !is_cow_mapping(vma->vm_flags))) 5291 return VM_FAULT_SIGSEGV; 5292 } 5293 #ifdef CONFIG_PER_VMA_LOCK 5294 /* 5295 * Per-VMA locks can't be used with FAULT_FLAG_RETRY_NOWAIT because of 5296 * the assumption that lock is dropped on VM_FAULT_RETRY. 5297 */ 5298 if (WARN_ON_ONCE((*flags & 5299 (FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT)) == 5300 (FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT))) 5301 return VM_FAULT_SIGSEGV; 5302 #endif 5303 5304 return 0; 5305 } 5306 5307 /* 5308 * By the time we get here, we already hold the mm semaphore 5309 * 5310 * The mmap_lock may have been released depending on flags and our 5311 * return value. See filemap_fault() and __folio_lock_or_retry(). 5312 */ 5313 vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, 5314 unsigned int flags, struct pt_regs *regs) 5315 { 5316 /* If the fault handler drops the mmap_lock, vma may be freed */ 5317 struct mm_struct *mm = vma->vm_mm; 5318 vm_fault_t ret; 5319 5320 __set_current_state(TASK_RUNNING); 5321 5322 ret = sanitize_fault_flags(vma, &flags); 5323 if (ret) 5324 goto out; 5325 5326 if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE, 5327 flags & FAULT_FLAG_INSTRUCTION, 5328 flags & FAULT_FLAG_REMOTE)) { 5329 ret = VM_FAULT_SIGSEGV; 5330 goto out; 5331 } 5332 5333 /* 5334 * Enable the memcg OOM handling for faults triggered in user 5335 * space. Kernel faults are handled more gracefully. 5336 */ 5337 if (flags & FAULT_FLAG_USER) 5338 mem_cgroup_enter_user_fault(); 5339 5340 lru_gen_enter_fault(vma); 5341 5342 if (unlikely(is_vm_hugetlb_page(vma))) 5343 ret = hugetlb_fault(vma->vm_mm, vma, address, flags); 5344 else 5345 ret = __handle_mm_fault(vma, address, flags); 5346 5347 lru_gen_exit_fault(); 5348 5349 if (flags & FAULT_FLAG_USER) { 5350 mem_cgroup_exit_user_fault(); 5351 /* 5352 * The task may have entered a memcg OOM situation but 5353 * if the allocation error was handled gracefully (no 5354 * VM_FAULT_OOM), there is no need to kill anything. 5355 * Just clean up the OOM state peacefully. 5356 */ 5357 if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM)) 5358 mem_cgroup_oom_synchronize(false); 5359 } 5360 out: 5361 mm_account_fault(mm, regs, address, flags, ret); 5362 5363 return ret; 5364 } 5365 EXPORT_SYMBOL_GPL(handle_mm_fault); 5366 5367 #ifdef CONFIG_LOCK_MM_AND_FIND_VMA 5368 #include <linux/extable.h> 5369 5370 static inline bool get_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs) 5371 { 5372 if (likely(mmap_read_trylock(mm))) 5373 return true; 5374 5375 if (regs && !user_mode(regs)) { 5376 unsigned long ip = instruction_pointer(regs); 5377 if (!search_exception_tables(ip)) 5378 return false; 5379 } 5380 5381 return !mmap_read_lock_killable(mm); 5382 } 5383 5384 static inline bool mmap_upgrade_trylock(struct mm_struct *mm) 5385 { 5386 /* 5387 * We don't have this operation yet. 5388 * 5389 * It should be easy enough to do: it's basically a 5390 * atomic_long_try_cmpxchg_acquire() 5391 * from RWSEM_READER_BIAS -> RWSEM_WRITER_LOCKED, but 5392 * it also needs the proper lockdep magic etc. 5393 */ 5394 return false; 5395 } 5396 5397 static inline bool upgrade_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs) 5398 { 5399 mmap_read_unlock(mm); 5400 if (regs && !user_mode(regs)) { 5401 unsigned long ip = instruction_pointer(regs); 5402 if (!search_exception_tables(ip)) 5403 return false; 5404 } 5405 return !mmap_write_lock_killable(mm); 5406 } 5407 5408 /* 5409 * Helper for page fault handling. 5410 * 5411 * This is kind of equivalend to "mmap_read_lock()" followed 5412 * by "find_extend_vma()", except it's a lot more careful about 5413 * the locking (and will drop the lock on failure). 5414 * 5415 * For example, if we have a kernel bug that causes a page 5416 * fault, we don't want to just use mmap_read_lock() to get 5417 * the mm lock, because that would deadlock if the bug were 5418 * to happen while we're holding the mm lock for writing. 5419 * 5420 * So this checks the exception tables on kernel faults in 5421 * order to only do this all for instructions that are actually 5422 * expected to fault. 5423 * 5424 * We can also actually take the mm lock for writing if we 5425 * need to extend the vma, which helps the VM layer a lot. 5426 */ 5427 struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm, 5428 unsigned long addr, struct pt_regs *regs) 5429 { 5430 struct vm_area_struct *vma; 5431 5432 if (!get_mmap_lock_carefully(mm, regs)) 5433 return NULL; 5434 5435 vma = find_vma(mm, addr); 5436 if (likely(vma && (vma->vm_start <= addr))) 5437 return vma; 5438 5439 /* 5440 * Well, dang. We might still be successful, but only 5441 * if we can extend a vma to do so. 5442 */ 5443 if (!vma || !(vma->vm_flags & VM_GROWSDOWN)) { 5444 mmap_read_unlock(mm); 5445 return NULL; 5446 } 5447 5448 /* 5449 * We can try to upgrade the mmap lock atomically, 5450 * in which case we can continue to use the vma 5451 * we already looked up. 5452 * 5453 * Otherwise we'll have to drop the mmap lock and 5454 * re-take it, and also look up the vma again, 5455 * re-checking it. 5456 */ 5457 if (!mmap_upgrade_trylock(mm)) { 5458 if (!upgrade_mmap_lock_carefully(mm, regs)) 5459 return NULL; 5460 5461 vma = find_vma(mm, addr); 5462 if (!vma) 5463 goto fail; 5464 if (vma->vm_start <= addr) 5465 goto success; 5466 if (!(vma->vm_flags & VM_GROWSDOWN)) 5467 goto fail; 5468 } 5469 5470 if (expand_stack_locked(vma, addr)) 5471 goto fail; 5472 5473 success: 5474 mmap_write_downgrade(mm); 5475 return vma; 5476 5477 fail: 5478 mmap_write_unlock(mm); 5479 return NULL; 5480 } 5481 #endif 5482 5483 #ifdef CONFIG_PER_VMA_LOCK 5484 /* 5485 * Lookup and lock a VMA under RCU protection. Returned VMA is guaranteed to be 5486 * stable and not isolated. If the VMA is not found or is being modified the 5487 * function returns NULL. 5488 */ 5489 struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, 5490 unsigned long address) 5491 { 5492 MA_STATE(mas, &mm->mm_mt, address, address); 5493 struct vm_area_struct *vma; 5494 5495 rcu_read_lock(); 5496 retry: 5497 vma = mas_walk(&mas); 5498 if (!vma) 5499 goto inval; 5500 5501 if (!vma_start_read(vma)) 5502 goto inval; 5503 5504 /* 5505 * find_mergeable_anon_vma uses adjacent vmas which are not locked. 5506 * This check must happen after vma_start_read(); otherwise, a 5507 * concurrent mremap() with MREMAP_DONTUNMAP could dissociate the VMA 5508 * from its anon_vma. 5509 */ 5510 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) 5511 goto inval_end_read; 5512 5513 /* Check since vm_start/vm_end might change before we lock the VMA */ 5514 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) 5515 goto inval_end_read; 5516 5517 /* Check if the VMA got isolated after we found it */ 5518 if (vma->detached) { 5519 vma_end_read(vma); 5520 count_vm_vma_lock_event(VMA_LOCK_MISS); 5521 /* The area was replaced with another one */ 5522 goto retry; 5523 } 5524 5525 rcu_read_unlock(); 5526 return vma; 5527 5528 inval_end_read: 5529 vma_end_read(vma); 5530 inval: 5531 rcu_read_unlock(); 5532 count_vm_vma_lock_event(VMA_LOCK_ABORT); 5533 return NULL; 5534 } 5535 #endif /* CONFIG_PER_VMA_LOCK */ 5536 5537 #ifndef __PAGETABLE_P4D_FOLDED 5538 /* 5539 * Allocate p4d page table. 5540 * We've already handled the fast-path in-line. 5541 */ 5542 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) 5543 { 5544 p4d_t *new = p4d_alloc_one(mm, address); 5545 if (!new) 5546 return -ENOMEM; 5547 5548 spin_lock(&mm->page_table_lock); 5549 if (pgd_present(*pgd)) { /* Another has populated it */ 5550 p4d_free(mm, new); 5551 } else { 5552 smp_wmb(); /* See comment in pmd_install() */ 5553 pgd_populate(mm, pgd, new); 5554 } 5555 spin_unlock(&mm->page_table_lock); 5556 return 0; 5557 } 5558 #endif /* __PAGETABLE_P4D_FOLDED */ 5559 5560 #ifndef __PAGETABLE_PUD_FOLDED 5561 /* 5562 * Allocate page upper directory. 5563 * We've already handled the fast-path in-line. 5564 */ 5565 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address) 5566 { 5567 pud_t *new = pud_alloc_one(mm, address); 5568 if (!new) 5569 return -ENOMEM; 5570 5571 spin_lock(&mm->page_table_lock); 5572 if (!p4d_present(*p4d)) { 5573 mm_inc_nr_puds(mm); 5574 smp_wmb(); /* See comment in pmd_install() */ 5575 p4d_populate(mm, p4d, new); 5576 } else /* Another has populated it */ 5577 pud_free(mm, new); 5578 spin_unlock(&mm->page_table_lock); 5579 return 0; 5580 } 5581 #endif /* __PAGETABLE_PUD_FOLDED */ 5582 5583 #ifndef __PAGETABLE_PMD_FOLDED 5584 /* 5585 * Allocate page middle directory. 5586 * We've already handled the fast-path in-line. 5587 */ 5588 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 5589 { 5590 spinlock_t *ptl; 5591 pmd_t *new = pmd_alloc_one(mm, address); 5592 if (!new) 5593 return -ENOMEM; 5594 5595 ptl = pud_lock(mm, pud); 5596 if (!pud_present(*pud)) { 5597 mm_inc_nr_pmds(mm); 5598 smp_wmb(); /* See comment in pmd_install() */ 5599 pud_populate(mm, pud, new); 5600 } else { /* Another has populated it */ 5601 pmd_free(mm, new); 5602 } 5603 spin_unlock(ptl); 5604 return 0; 5605 } 5606 #endif /* __PAGETABLE_PMD_FOLDED */ 5607 5608 /** 5609 * follow_pte - look up PTE at a user virtual address 5610 * @mm: the mm_struct of the target address space 5611 * @address: user virtual address 5612 * @ptepp: location to store found PTE 5613 * @ptlp: location to store the lock for the PTE 5614 * 5615 * On a successful return, the pointer to the PTE is stored in @ptepp; 5616 * the corresponding lock is taken and its location is stored in @ptlp. 5617 * The contents of the PTE are only stable until @ptlp is released; 5618 * any further use, if any, must be protected against invalidation 5619 * with MMU notifiers. 5620 * 5621 * Only IO mappings and raw PFN mappings are allowed. The mmap semaphore 5622 * should be taken for read. 5623 * 5624 * KVM uses this function. While it is arguably less bad than ``follow_pfn``, 5625 * it is not a good general-purpose API. 5626 * 5627 * Return: zero on success, -ve otherwise. 5628 */ 5629 int follow_pte(struct mm_struct *mm, unsigned long address, 5630 pte_t **ptepp, spinlock_t **ptlp) 5631 { 5632 pgd_t *pgd; 5633 p4d_t *p4d; 5634 pud_t *pud; 5635 pmd_t *pmd; 5636 pte_t *ptep; 5637 5638 pgd = pgd_offset(mm, address); 5639 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 5640 goto out; 5641 5642 p4d = p4d_offset(pgd, address); 5643 if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d))) 5644 goto out; 5645 5646 pud = pud_offset(p4d, address); 5647 if (pud_none(*pud) || unlikely(pud_bad(*pud))) 5648 goto out; 5649 5650 pmd = pmd_offset(pud, address); 5651 VM_BUG_ON(pmd_trans_huge(*pmd)); 5652 5653 ptep = pte_offset_map_lock(mm, pmd, address, ptlp); 5654 if (!ptep) 5655 goto out; 5656 if (!pte_present(ptep_get(ptep))) 5657 goto unlock; 5658 *ptepp = ptep; 5659 return 0; 5660 unlock: 5661 pte_unmap_unlock(ptep, *ptlp); 5662 out: 5663 return -EINVAL; 5664 } 5665 EXPORT_SYMBOL_GPL(follow_pte); 5666 5667 /** 5668 * follow_pfn - look up PFN at a user virtual address 5669 * @vma: memory mapping 5670 * @address: user virtual address 5671 * @pfn: location to store found PFN 5672 * 5673 * Only IO mappings and raw PFN mappings are allowed. 5674 * 5675 * This function does not allow the caller to read the permissions 5676 * of the PTE. Do not use it. 5677 * 5678 * Return: zero and the pfn at @pfn on success, -ve otherwise. 5679 */ 5680 int follow_pfn(struct vm_area_struct *vma, unsigned long address, 5681 unsigned long *pfn) 5682 { 5683 int ret = -EINVAL; 5684 spinlock_t *ptl; 5685 pte_t *ptep; 5686 5687 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) 5688 return ret; 5689 5690 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); 5691 if (ret) 5692 return ret; 5693 *pfn = pte_pfn(ptep_get(ptep)); 5694 pte_unmap_unlock(ptep, ptl); 5695 return 0; 5696 } 5697 EXPORT_SYMBOL(follow_pfn); 5698 5699 #ifdef CONFIG_HAVE_IOREMAP_PROT 5700 int follow_phys(struct vm_area_struct *vma, 5701 unsigned long address, unsigned int flags, 5702 unsigned long *prot, resource_size_t *phys) 5703 { 5704 int ret = -EINVAL; 5705 pte_t *ptep, pte; 5706 spinlock_t *ptl; 5707 5708 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) 5709 goto out; 5710 5711 if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) 5712 goto out; 5713 pte = ptep_get(ptep); 5714 5715 if ((flags & FOLL_WRITE) && !pte_write(pte)) 5716 goto unlock; 5717 5718 *prot = pgprot_val(pte_pgprot(pte)); 5719 *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT; 5720 5721 ret = 0; 5722 unlock: 5723 pte_unmap_unlock(ptep, ptl); 5724 out: 5725 return ret; 5726 } 5727 5728 /** 5729 * generic_access_phys - generic implementation for iomem mmap access 5730 * @vma: the vma to access 5731 * @addr: userspace address, not relative offset within @vma 5732 * @buf: buffer to read/write 5733 * @len: length of transfer 5734 * @write: set to FOLL_WRITE when writing, otherwise reading 5735 * 5736 * This is a generic implementation for &vm_operations_struct.access for an 5737 * iomem mapping. This callback is used by access_process_vm() when the @vma is 5738 * not page based. 5739 */ 5740 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, 5741 void *buf, int len, int write) 5742 { 5743 resource_size_t phys_addr; 5744 unsigned long prot = 0; 5745 void __iomem *maddr; 5746 pte_t *ptep, pte; 5747 spinlock_t *ptl; 5748 int offset = offset_in_page(addr); 5749 int ret = -EINVAL; 5750 5751 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) 5752 return -EINVAL; 5753 5754 retry: 5755 if (follow_pte(vma->vm_mm, addr, &ptep, &ptl)) 5756 return -EINVAL; 5757 pte = ptep_get(ptep); 5758 pte_unmap_unlock(ptep, ptl); 5759 5760 prot = pgprot_val(pte_pgprot(pte)); 5761 phys_addr = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT; 5762 5763 if ((write & FOLL_WRITE) && !pte_write(pte)) 5764 return -EINVAL; 5765 5766 maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot); 5767 if (!maddr) 5768 return -ENOMEM; 5769 5770 if (follow_pte(vma->vm_mm, addr, &ptep, &ptl)) 5771 goto out_unmap; 5772 5773 if (!pte_same(pte, ptep_get(ptep))) { 5774 pte_unmap_unlock(ptep, ptl); 5775 iounmap(maddr); 5776 5777 goto retry; 5778 } 5779 5780 if (write) 5781 memcpy_toio(maddr + offset, buf, len); 5782 else 5783 memcpy_fromio(buf, maddr + offset, len); 5784 ret = len; 5785 pte_unmap_unlock(ptep, ptl); 5786 out_unmap: 5787 iounmap(maddr); 5788 5789 return ret; 5790 } 5791 EXPORT_SYMBOL_GPL(generic_access_phys); 5792 #endif 5793 5794 /* 5795 * Access another process' address space as given in mm. 5796 */ 5797 static int __access_remote_vm(struct mm_struct *mm, unsigned long addr, 5798 void *buf, int len, unsigned int gup_flags) 5799 { 5800 void *old_buf = buf; 5801 int write = gup_flags & FOLL_WRITE; 5802 5803 if (mmap_read_lock_killable(mm)) 5804 return 0; 5805 5806 /* Untag the address before looking up the VMA */ 5807 addr = untagged_addr_remote(mm, addr); 5808 5809 /* Avoid triggering the temporary warning in __get_user_pages */ 5810 if (!vma_lookup(mm, addr) && !expand_stack(mm, addr)) 5811 return 0; 5812 5813 /* ignore errors, just check how much was successfully transferred */ 5814 while (len) { 5815 int bytes, offset; 5816 void *maddr; 5817 struct vm_area_struct *vma = NULL; 5818 struct page *page = get_user_page_vma_remote(mm, addr, 5819 gup_flags, &vma); 5820 5821 if (IS_ERR(page)) { 5822 /* We might need to expand the stack to access it */ 5823 vma = vma_lookup(mm, addr); 5824 if (!vma) { 5825 vma = expand_stack(mm, addr); 5826 5827 /* mmap_lock was dropped on failure */ 5828 if (!vma) 5829 return buf - old_buf; 5830 5831 /* Try again if stack expansion worked */ 5832 continue; 5833 } 5834 5835 /* 5836 * Check if this is a VM_IO | VM_PFNMAP VMA, which 5837 * we can access using slightly different code. 5838 */ 5839 bytes = 0; 5840 #ifdef CONFIG_HAVE_IOREMAP_PROT 5841 if (vma->vm_ops && vma->vm_ops->access) 5842 bytes = vma->vm_ops->access(vma, addr, buf, 5843 len, write); 5844 #endif 5845 if (bytes <= 0) 5846 break; 5847 } else { 5848 bytes = len; 5849 offset = addr & (PAGE_SIZE-1); 5850 if (bytes > PAGE_SIZE-offset) 5851 bytes = PAGE_SIZE-offset; 5852 5853 maddr = kmap(page); 5854 if (write) { 5855 copy_to_user_page(vma, page, addr, 5856 maddr + offset, buf, bytes); 5857 set_page_dirty_lock(page); 5858 } else { 5859 copy_from_user_page(vma, page, addr, 5860 buf, maddr + offset, bytes); 5861 } 5862 kunmap(page); 5863 put_page(page); 5864 } 5865 len -= bytes; 5866 buf += bytes; 5867 addr += bytes; 5868 } 5869 mmap_read_unlock(mm); 5870 5871 return buf - old_buf; 5872 } 5873 5874 /** 5875 * access_remote_vm - access another process' address space 5876 * @mm: the mm_struct of the target address space 5877 * @addr: start address to access 5878 * @buf: source or destination buffer 5879 * @len: number of bytes to transfer 5880 * @gup_flags: flags modifying lookup behaviour 5881 * 5882 * The caller must hold a reference on @mm. 5883 * 5884 * Return: number of bytes copied from source to destination. 5885 */ 5886 int access_remote_vm(struct mm_struct *mm, unsigned long addr, 5887 void *buf, int len, unsigned int gup_flags) 5888 { 5889 return __access_remote_vm(mm, addr, buf, len, gup_flags); 5890 } 5891 5892 /* 5893 * Access another process' address space. 5894 * Source/target buffer must be kernel space, 5895 * Do not walk the page table directly, use get_user_pages 5896 */ 5897 int access_process_vm(struct task_struct *tsk, unsigned long addr, 5898 void *buf, int len, unsigned int gup_flags) 5899 { 5900 struct mm_struct *mm; 5901 int ret; 5902 5903 mm = get_task_mm(tsk); 5904 if (!mm) 5905 return 0; 5906 5907 ret = __access_remote_vm(mm, addr, buf, len, gup_flags); 5908 5909 mmput(mm); 5910 5911 return ret; 5912 } 5913 EXPORT_SYMBOL_GPL(access_process_vm); 5914 5915 /* 5916 * Print the name of a VMA. 5917 */ 5918 void print_vma_addr(char *prefix, unsigned long ip) 5919 { 5920 struct mm_struct *mm = current->mm; 5921 struct vm_area_struct *vma; 5922 5923 /* 5924 * we might be running from an atomic context so we cannot sleep 5925 */ 5926 if (!mmap_read_trylock(mm)) 5927 return; 5928 5929 vma = find_vma(mm, ip); 5930 if (vma && vma->vm_file) { 5931 struct file *f = vma->vm_file; 5932 char *buf = (char *)__get_free_page(GFP_NOWAIT); 5933 if (buf) { 5934 char *p; 5935 5936 p = file_path(f, buf, PAGE_SIZE); 5937 if (IS_ERR(p)) 5938 p = "?"; 5939 printk("%s%s[%lx+%lx]", prefix, kbasename(p), 5940 vma->vm_start, 5941 vma->vm_end - vma->vm_start); 5942 free_page((unsigned long)buf); 5943 } 5944 } 5945 mmap_read_unlock(mm); 5946 } 5947 5948 #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP) 5949 void __might_fault(const char *file, int line) 5950 { 5951 if (pagefault_disabled()) 5952 return; 5953 __might_sleep(file, line); 5954 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) 5955 if (current->mm) 5956 might_lock_read(¤t->mm->mmap_lock); 5957 #endif 5958 } 5959 EXPORT_SYMBOL(__might_fault); 5960 #endif 5961 5962 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) 5963 /* 5964 * Process all subpages of the specified huge page with the specified 5965 * operation. The target subpage will be processed last to keep its 5966 * cache lines hot. 5967 */ 5968 static inline int process_huge_page( 5969 unsigned long addr_hint, unsigned int pages_per_huge_page, 5970 int (*process_subpage)(unsigned long addr, int idx, void *arg), 5971 void *arg) 5972 { 5973 int i, n, base, l, ret; 5974 unsigned long addr = addr_hint & 5975 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1); 5976 5977 /* Process target subpage last to keep its cache lines hot */ 5978 might_sleep(); 5979 n = (addr_hint - addr) / PAGE_SIZE; 5980 if (2 * n <= pages_per_huge_page) { 5981 /* If target subpage in first half of huge page */ 5982 base = 0; 5983 l = n; 5984 /* Process subpages at the end of huge page */ 5985 for (i = pages_per_huge_page - 1; i >= 2 * n; i--) { 5986 cond_resched(); 5987 ret = process_subpage(addr + i * PAGE_SIZE, i, arg); 5988 if (ret) 5989 return ret; 5990 } 5991 } else { 5992 /* If target subpage in second half of huge page */ 5993 base = pages_per_huge_page - 2 * (pages_per_huge_page - n); 5994 l = pages_per_huge_page - n; 5995 /* Process subpages at the begin of huge page */ 5996 for (i = 0; i < base; i++) { 5997 cond_resched(); 5998 ret = process_subpage(addr + i * PAGE_SIZE, i, arg); 5999 if (ret) 6000 return ret; 6001 } 6002 } 6003 /* 6004 * Process remaining subpages in left-right-left-right pattern 6005 * towards the target subpage 6006 */ 6007 for (i = 0; i < l; i++) { 6008 int left_idx = base + i; 6009 int right_idx = base + 2 * l - 1 - i; 6010 6011 cond_resched(); 6012 ret = process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg); 6013 if (ret) 6014 return ret; 6015 cond_resched(); 6016 ret = process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg); 6017 if (ret) 6018 return ret; 6019 } 6020 return 0; 6021 } 6022 6023 static void clear_gigantic_page(struct page *page, 6024 unsigned long addr, 6025 unsigned int pages_per_huge_page) 6026 { 6027 int i; 6028 struct page *p; 6029 6030 might_sleep(); 6031 for (i = 0; i < pages_per_huge_page; i++) { 6032 p = nth_page(page, i); 6033 cond_resched(); 6034 clear_user_highpage(p, addr + i * PAGE_SIZE); 6035 } 6036 } 6037 6038 static int clear_subpage(unsigned long addr, int idx, void *arg) 6039 { 6040 struct page *page = arg; 6041 6042 clear_user_highpage(page + idx, addr); 6043 return 0; 6044 } 6045 6046 void clear_huge_page(struct page *page, 6047 unsigned long addr_hint, unsigned int pages_per_huge_page) 6048 { 6049 unsigned long addr = addr_hint & 6050 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1); 6051 6052 if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) { 6053 clear_gigantic_page(page, addr, pages_per_huge_page); 6054 return; 6055 } 6056 6057 process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page); 6058 } 6059 6060 static int copy_user_gigantic_page(struct folio *dst, struct folio *src, 6061 unsigned long addr, 6062 struct vm_area_struct *vma, 6063 unsigned int pages_per_huge_page) 6064 { 6065 int i; 6066 struct page *dst_page; 6067 struct page *src_page; 6068 6069 for (i = 0; i < pages_per_huge_page; i++) { 6070 dst_page = folio_page(dst, i); 6071 src_page = folio_page(src, i); 6072 6073 cond_resched(); 6074 if (copy_mc_user_highpage(dst_page, src_page, 6075 addr + i*PAGE_SIZE, vma)) { 6076 memory_failure_queue(page_to_pfn(src_page), 0); 6077 return -EHWPOISON; 6078 } 6079 } 6080 return 0; 6081 } 6082 6083 struct copy_subpage_arg { 6084 struct page *dst; 6085 struct page *src; 6086 struct vm_area_struct *vma; 6087 }; 6088 6089 static int copy_subpage(unsigned long addr, int idx, void *arg) 6090 { 6091 struct copy_subpage_arg *copy_arg = arg; 6092 6093 if (copy_mc_user_highpage(copy_arg->dst + idx, copy_arg->src + idx, 6094 addr, copy_arg->vma)) { 6095 memory_failure_queue(page_to_pfn(copy_arg->src + idx), 0); 6096 return -EHWPOISON; 6097 } 6098 return 0; 6099 } 6100 6101 int copy_user_large_folio(struct folio *dst, struct folio *src, 6102 unsigned long addr_hint, struct vm_area_struct *vma) 6103 { 6104 unsigned int pages_per_huge_page = folio_nr_pages(dst); 6105 unsigned long addr = addr_hint & 6106 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1); 6107 struct copy_subpage_arg arg = { 6108 .dst = &dst->page, 6109 .src = &src->page, 6110 .vma = vma, 6111 }; 6112 6113 if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) 6114 return copy_user_gigantic_page(dst, src, addr, vma, 6115 pages_per_huge_page); 6116 6117 return process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg); 6118 } 6119 6120 long copy_folio_from_user(struct folio *dst_folio, 6121 const void __user *usr_src, 6122 bool allow_pagefault) 6123 { 6124 void *kaddr; 6125 unsigned long i, rc = 0; 6126 unsigned int nr_pages = folio_nr_pages(dst_folio); 6127 unsigned long ret_val = nr_pages * PAGE_SIZE; 6128 struct page *subpage; 6129 6130 for (i = 0; i < nr_pages; i++) { 6131 subpage = folio_page(dst_folio, i); 6132 kaddr = kmap_local_page(subpage); 6133 if (!allow_pagefault) 6134 pagefault_disable(); 6135 rc = copy_from_user(kaddr, usr_src + i * PAGE_SIZE, PAGE_SIZE); 6136 if (!allow_pagefault) 6137 pagefault_enable(); 6138 kunmap_local(kaddr); 6139 6140 ret_val -= (PAGE_SIZE - rc); 6141 if (rc) 6142 break; 6143 6144 flush_dcache_page(subpage); 6145 6146 cond_resched(); 6147 } 6148 return ret_val; 6149 } 6150 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ 6151 6152 #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS 6153 6154 static struct kmem_cache *page_ptl_cachep; 6155 6156 void __init ptlock_cache_init(void) 6157 { 6158 page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0, 6159 SLAB_PANIC, NULL); 6160 } 6161 6162 bool ptlock_alloc(struct ptdesc *ptdesc) 6163 { 6164 spinlock_t *ptl; 6165 6166 ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL); 6167 if (!ptl) 6168 return false; 6169 ptdesc->ptl = ptl; 6170 return true; 6171 } 6172 6173 void ptlock_free(struct ptdesc *ptdesc) 6174 { 6175 kmem_cache_free(page_ptl_cachep, ptdesc->ptl); 6176 } 6177 #endif 6178