1 2 // SPDX-License-Identifier: GPL-2.0-only 3 /* 4 * linux/mm/memory.c 5 * 6 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 7 */ 8 9 /* 10 * demand-loading started 01.12.91 - seems it is high on the list of 11 * things wanted, and it should be easy to implement. - Linus 12 */ 13 14 /* 15 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared 16 * pages started 02.12.91, seems to work. - Linus. 17 * 18 * Tested sharing by executing about 30 /bin/sh: under the old kernel it 19 * would have taken more than the 6M I have free, but it worked well as 20 * far as I could see. 21 * 22 * Also corrected some "invalidate()"s - I wasn't doing enough of them. 23 */ 24 25 /* 26 * Real VM (paging to/from disk) started 18.12.91. Much more work and 27 * thought has to go into this. Oh, well.. 28 * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why. 29 * Found it. Everything seems to work now. 30 * 20.12.91 - Ok, making the swap-device changeable like the root. 31 */ 32 33 /* 34 * 05.04.94 - Multi-page memory management added for v1.1. 35 * Idea by Alex Bligh (alex@cconcepts.co.uk) 36 * 37 * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG 38 * (Gerhard.Wichert@pdb.siemens.de) 39 * 40 * Aug/Sep 2004 Changed to four level page tables (Andi Kleen) 41 */ 42 43 #include <linux/kernel_stat.h> 44 #include <linux/mm.h> 45 #include <linux/mm_inline.h> 46 #include <linux/sched/mm.h> 47 #include <linux/sched/coredump.h> 48 #include <linux/sched/numa_balancing.h> 49 #include <linux/sched/task.h> 50 #include <linux/hugetlb.h> 51 #include <linux/mman.h> 52 #include <linux/swap.h> 53 #include <linux/highmem.h> 54 #include <linux/pagemap.h> 55 #include <linux/memremap.h> 56 #include <linux/kmsan.h> 57 #include <linux/ksm.h> 58 #include <linux/rmap.h> 59 #include <linux/export.h> 60 #include <linux/delayacct.h> 61 #include <linux/init.h> 62 #include <linux/pfn_t.h> 63 #include <linux/writeback.h> 64 #include <linux/memcontrol.h> 65 #include <linux/mmu_notifier.h> 66 #include <linux/swapops.h> 67 #include <linux/elf.h> 68 #include <linux/gfp.h> 69 #include <linux/migrate.h> 70 #include <linux/string.h> 71 #include <linux/memory-tiers.h> 72 #include <linux/debugfs.h> 73 #include <linux/userfaultfd_k.h> 74 #include <linux/dax.h> 75 #include <linux/oom.h> 76 #include <linux/numa.h> 77 #include <linux/perf_event.h> 78 #include <linux/ptrace.h> 79 #include <linux/vmalloc.h> 80 #include <linux/sched/sysctl.h> 81 82 #include <trace/events/kmem.h> 83 84 #include <asm/io.h> 85 #include <asm/mmu_context.h> 86 #include <asm/pgalloc.h> 87 #include <linux/uaccess.h> 88 #include <asm/tlb.h> 89 #include <asm/tlbflush.h> 90 91 #include "pgalloc-track.h" 92 #include "internal.h" 93 #include "swap.h" 94 95 #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST) 96 #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid. 97 #endif 98 99 #ifndef CONFIG_NUMA 100 unsigned long max_mapnr; 101 EXPORT_SYMBOL(max_mapnr); 102 103 struct page *mem_map; 104 EXPORT_SYMBOL(mem_map); 105 #endif 106 107 static vm_fault_t do_fault(struct vm_fault *vmf); 108 static vm_fault_t do_anonymous_page(struct vm_fault *vmf); 109 static bool vmf_pte_changed(struct vm_fault *vmf); 110 111 /* 112 * Return true if the original pte was a uffd-wp pte marker (so the pte was 113 * wr-protected). 114 */ 115 static __always_inline bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf) 116 { 117 if (!userfaultfd_wp(vmf->vma)) 118 return false; 119 if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)) 120 return false; 121 122 return pte_marker_uffd_wp(vmf->orig_pte); 123 } 124 125 /* 126 * A number of key systems in x86 including ioremap() rely on the assumption 127 * that high_memory defines the upper bound on direct map memory, then end 128 * of ZONE_NORMAL. 129 */ 130 void *high_memory; 131 EXPORT_SYMBOL(high_memory); 132 133 /* 134 * Randomize the address space (stacks, mmaps, brk, etc.). 135 * 136 * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization, 137 * as ancient (libc5 based) binaries can segfault. ) 138 */ 139 int randomize_va_space __read_mostly = 140 #ifdef CONFIG_COMPAT_BRK 141 1; 142 #else 143 2; 144 #endif 145 146 #ifndef arch_wants_old_prefaulted_pte 147 static inline bool arch_wants_old_prefaulted_pte(void) 148 { 149 /* 150 * Transitioning a PTE from 'old' to 'young' can be expensive on 151 * some architectures, even if it's performed in hardware. By 152 * default, "false" means prefaulted entries will be 'young'. 153 */ 154 return false; 155 } 156 #endif 157 158 static int __init disable_randmaps(char *s) 159 { 160 randomize_va_space = 0; 161 return 1; 162 } 163 __setup("norandmaps", disable_randmaps); 164 165 unsigned long zero_pfn __read_mostly; 166 EXPORT_SYMBOL(zero_pfn); 167 168 unsigned long highest_memmap_pfn __read_mostly; 169 170 /* 171 * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() 172 */ 173 static int __init init_zero_pfn(void) 174 { 175 zero_pfn = page_to_pfn(ZERO_PAGE(0)); 176 return 0; 177 } 178 early_initcall(init_zero_pfn); 179 180 void mm_trace_rss_stat(struct mm_struct *mm, int member) 181 { 182 trace_rss_stat(mm, member); 183 } 184 185 /* 186 * Note: this doesn't free the actual pages themselves. That 187 * has been handled earlier when unmapping all the memory regions. 188 */ 189 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, 190 unsigned long addr) 191 { 192 pgtable_t token = pmd_pgtable(*pmd); 193 pmd_clear(pmd); 194 pte_free_tlb(tlb, token, addr); 195 mm_dec_nr_ptes(tlb->mm); 196 } 197 198 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, 199 unsigned long addr, unsigned long end, 200 unsigned long floor, unsigned long ceiling) 201 { 202 pmd_t *pmd; 203 unsigned long next; 204 unsigned long start; 205 206 start = addr; 207 pmd = pmd_offset(pud, addr); 208 do { 209 next = pmd_addr_end(addr, end); 210 if (pmd_none_or_clear_bad(pmd)) 211 continue; 212 free_pte_range(tlb, pmd, addr); 213 } while (pmd++, addr = next, addr != end); 214 215 start &= PUD_MASK; 216 if (start < floor) 217 return; 218 if (ceiling) { 219 ceiling &= PUD_MASK; 220 if (!ceiling) 221 return; 222 } 223 if (end - 1 > ceiling - 1) 224 return; 225 226 pmd = pmd_offset(pud, start); 227 pud_clear(pud); 228 pmd_free_tlb(tlb, pmd, start); 229 mm_dec_nr_pmds(tlb->mm); 230 } 231 232 static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d, 233 unsigned long addr, unsigned long end, 234 unsigned long floor, unsigned long ceiling) 235 { 236 pud_t *pud; 237 unsigned long next; 238 unsigned long start; 239 240 start = addr; 241 pud = pud_offset(p4d, addr); 242 do { 243 next = pud_addr_end(addr, end); 244 if (pud_none_or_clear_bad(pud)) 245 continue; 246 free_pmd_range(tlb, pud, addr, next, floor, ceiling); 247 } while (pud++, addr = next, addr != end); 248 249 start &= P4D_MASK; 250 if (start < floor) 251 return; 252 if (ceiling) { 253 ceiling &= P4D_MASK; 254 if (!ceiling) 255 return; 256 } 257 if (end - 1 > ceiling - 1) 258 return; 259 260 pud = pud_offset(p4d, start); 261 p4d_clear(p4d); 262 pud_free_tlb(tlb, pud, start); 263 mm_dec_nr_puds(tlb->mm); 264 } 265 266 static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd, 267 unsigned long addr, unsigned long end, 268 unsigned long floor, unsigned long ceiling) 269 { 270 p4d_t *p4d; 271 unsigned long next; 272 unsigned long start; 273 274 start = addr; 275 p4d = p4d_offset(pgd, addr); 276 do { 277 next = p4d_addr_end(addr, end); 278 if (p4d_none_or_clear_bad(p4d)) 279 continue; 280 free_pud_range(tlb, p4d, addr, next, floor, ceiling); 281 } while (p4d++, addr = next, addr != end); 282 283 start &= PGDIR_MASK; 284 if (start < floor) 285 return; 286 if (ceiling) { 287 ceiling &= PGDIR_MASK; 288 if (!ceiling) 289 return; 290 } 291 if (end - 1 > ceiling - 1) 292 return; 293 294 p4d = p4d_offset(pgd, start); 295 pgd_clear(pgd); 296 p4d_free_tlb(tlb, p4d, start); 297 } 298 299 /* 300 * This function frees user-level page tables of a process. 301 */ 302 void free_pgd_range(struct mmu_gather *tlb, 303 unsigned long addr, unsigned long end, 304 unsigned long floor, unsigned long ceiling) 305 { 306 pgd_t *pgd; 307 unsigned long next; 308 309 /* 310 * The next few lines have given us lots of grief... 311 * 312 * Why are we testing PMD* at this top level? Because often 313 * there will be no work to do at all, and we'd prefer not to 314 * go all the way down to the bottom just to discover that. 315 * 316 * Why all these "- 1"s? Because 0 represents both the bottom 317 * of the address space and the top of it (using -1 for the 318 * top wouldn't help much: the masks would do the wrong thing). 319 * The rule is that addr 0 and floor 0 refer to the bottom of 320 * the address space, but end 0 and ceiling 0 refer to the top 321 * Comparisons need to use "end - 1" and "ceiling - 1" (though 322 * that end 0 case should be mythical). 323 * 324 * Wherever addr is brought up or ceiling brought down, we must 325 * be careful to reject "the opposite 0" before it confuses the 326 * subsequent tests. But what about where end is brought down 327 * by PMD_SIZE below? no, end can't go down to 0 there. 328 * 329 * Whereas we round start (addr) and ceiling down, by different 330 * masks at different levels, in order to test whether a table 331 * now has no other vmas using it, so can be freed, we don't 332 * bother to round floor or end up - the tests don't need that. 333 */ 334 335 addr &= PMD_MASK; 336 if (addr < floor) { 337 addr += PMD_SIZE; 338 if (!addr) 339 return; 340 } 341 if (ceiling) { 342 ceiling &= PMD_MASK; 343 if (!ceiling) 344 return; 345 } 346 if (end - 1 > ceiling - 1) 347 end -= PMD_SIZE; 348 if (addr > end - 1) 349 return; 350 /* 351 * We add page table cache pages with PAGE_SIZE, 352 * (see pte_free_tlb()), flush the tlb if we need 353 */ 354 tlb_change_page_size(tlb, PAGE_SIZE); 355 pgd = pgd_offset(tlb->mm, addr); 356 do { 357 next = pgd_addr_end(addr, end); 358 if (pgd_none_or_clear_bad(pgd)) 359 continue; 360 free_p4d_range(tlb, pgd, addr, next, floor, ceiling); 361 } while (pgd++, addr = next, addr != end); 362 } 363 364 void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, 365 struct vm_area_struct *vma, unsigned long floor, 366 unsigned long ceiling, bool mm_wr_locked) 367 { 368 do { 369 unsigned long addr = vma->vm_start; 370 struct vm_area_struct *next; 371 372 /* 373 * Note: USER_PGTABLES_CEILING may be passed as ceiling and may 374 * be 0. This will underflow and is okay. 375 */ 376 next = mas_find(mas, ceiling - 1); 377 if (unlikely(xa_is_zero(next))) 378 next = NULL; 379 380 /* 381 * Hide vma from rmap and truncate_pagecache before freeing 382 * pgtables 383 */ 384 if (mm_wr_locked) 385 vma_start_write(vma); 386 unlink_anon_vmas(vma); 387 unlink_file_vma(vma); 388 389 if (is_vm_hugetlb_page(vma)) { 390 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, 391 floor, next ? next->vm_start : ceiling); 392 } else { 393 /* 394 * Optimization: gather nearby vmas into one call down 395 */ 396 while (next && next->vm_start <= vma->vm_end + PMD_SIZE 397 && !is_vm_hugetlb_page(next)) { 398 vma = next; 399 next = mas_find(mas, ceiling - 1); 400 if (unlikely(xa_is_zero(next))) 401 next = NULL; 402 if (mm_wr_locked) 403 vma_start_write(vma); 404 unlink_anon_vmas(vma); 405 unlink_file_vma(vma); 406 } 407 free_pgd_range(tlb, addr, vma->vm_end, 408 floor, next ? next->vm_start : ceiling); 409 } 410 vma = next; 411 } while (vma); 412 } 413 414 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte) 415 { 416 spinlock_t *ptl = pmd_lock(mm, pmd); 417 418 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ 419 mm_inc_nr_ptes(mm); 420 /* 421 * Ensure all pte setup (eg. pte page lock and page clearing) are 422 * visible before the pte is made visible to other CPUs by being 423 * put into page tables. 424 * 425 * The other side of the story is the pointer chasing in the page 426 * table walking code (when walking the page table without locking; 427 * ie. most of the time). Fortunately, these data accesses consist 428 * of a chain of data-dependent loads, meaning most CPUs (alpha 429 * being the notable exception) will already guarantee loads are 430 * seen in-order. See the alpha page table accessors for the 431 * smp_rmb() barriers in page table walking code. 432 */ 433 smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ 434 pmd_populate(mm, pmd, *pte); 435 *pte = NULL; 436 } 437 spin_unlock(ptl); 438 } 439 440 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd) 441 { 442 pgtable_t new = pte_alloc_one(mm); 443 if (!new) 444 return -ENOMEM; 445 446 pmd_install(mm, pmd, &new); 447 if (new) 448 pte_free(mm, new); 449 return 0; 450 } 451 452 int __pte_alloc_kernel(pmd_t *pmd) 453 { 454 pte_t *new = pte_alloc_one_kernel(&init_mm); 455 if (!new) 456 return -ENOMEM; 457 458 spin_lock(&init_mm.page_table_lock); 459 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ 460 smp_wmb(); /* See comment in pmd_install() */ 461 pmd_populate_kernel(&init_mm, pmd, new); 462 new = NULL; 463 } 464 spin_unlock(&init_mm.page_table_lock); 465 if (new) 466 pte_free_kernel(&init_mm, new); 467 return 0; 468 } 469 470 static inline void init_rss_vec(int *rss) 471 { 472 memset(rss, 0, sizeof(int) * NR_MM_COUNTERS); 473 } 474 475 static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss) 476 { 477 int i; 478 479 for (i = 0; i < NR_MM_COUNTERS; i++) 480 if (rss[i]) 481 add_mm_counter(mm, i, rss[i]); 482 } 483 484 /* 485 * This function is called to print an error when a bad pte 486 * is found. For example, we might have a PFN-mapped pte in 487 * a region that doesn't allow it. 488 * 489 * The calling function must still handle the error. 490 */ 491 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, 492 pte_t pte, struct page *page) 493 { 494 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); 495 p4d_t *p4d = p4d_offset(pgd, addr); 496 pud_t *pud = pud_offset(p4d, addr); 497 pmd_t *pmd = pmd_offset(pud, addr); 498 struct address_space *mapping; 499 pgoff_t index; 500 static unsigned long resume; 501 static unsigned long nr_shown; 502 static unsigned long nr_unshown; 503 504 /* 505 * Allow a burst of 60 reports, then keep quiet for that minute; 506 * or allow a steady drip of one report per second. 507 */ 508 if (nr_shown == 60) { 509 if (time_before(jiffies, resume)) { 510 nr_unshown++; 511 return; 512 } 513 if (nr_unshown) { 514 pr_alert("BUG: Bad page map: %lu messages suppressed\n", 515 nr_unshown); 516 nr_unshown = 0; 517 } 518 nr_shown = 0; 519 } 520 if (nr_shown++ == 0) 521 resume = jiffies + 60 * HZ; 522 523 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; 524 index = linear_page_index(vma, addr); 525 526 pr_alert("BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n", 527 current->comm, 528 (long long)pte_val(pte), (long long)pmd_val(*pmd)); 529 if (page) 530 dump_page(page, "bad pte"); 531 pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n", 532 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); 533 pr_alert("file:%pD fault:%ps mmap:%ps read_folio:%ps\n", 534 vma->vm_file, 535 vma->vm_ops ? vma->vm_ops->fault : NULL, 536 vma->vm_file ? vma->vm_file->f_op->mmap : NULL, 537 mapping ? mapping->a_ops->read_folio : NULL); 538 dump_stack(); 539 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 540 } 541 542 /* 543 * vm_normal_page -- This function gets the "struct page" associated with a pte. 544 * 545 * "Special" mappings do not wish to be associated with a "struct page" (either 546 * it doesn't exist, or it exists but they don't want to touch it). In this 547 * case, NULL is returned here. "Normal" mappings do have a struct page. 548 * 549 * There are 2 broad cases. Firstly, an architecture may define a pte_special() 550 * pte bit, in which case this function is trivial. Secondly, an architecture 551 * may not have a spare pte bit, which requires a more complicated scheme, 552 * described below. 553 * 554 * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a 555 * special mapping (even if there are underlying and valid "struct pages"). 556 * COWed pages of a VM_PFNMAP are always normal. 557 * 558 * The way we recognize COWed pages within VM_PFNMAP mappings is through the 559 * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit 560 * set, and the vm_pgoff will point to the first PFN mapped: thus every special 561 * mapping will always honor the rule 562 * 563 * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT) 564 * 565 * And for normal mappings this is false. 566 * 567 * This restricts such mappings to be a linear translation from virtual address 568 * to pfn. To get around this restriction, we allow arbitrary mappings so long 569 * as the vma is not a COW mapping; in that case, we know that all ptes are 570 * special (because none can have been COWed). 571 * 572 * 573 * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP. 574 * 575 * VM_MIXEDMAP mappings can likewise contain memory with or without "struct 576 * page" backing, however the difference is that _all_ pages with a struct 577 * page (that is, those where pfn_valid is true) are refcounted and considered 578 * normal pages by the VM. The disadvantage is that pages are refcounted 579 * (which can be slower and simply not an option for some PFNMAP users). The 580 * advantage is that we don't have to follow the strict linearity rule of 581 * PFNMAP mappings in order to support COWable mappings. 582 * 583 */ 584 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 585 pte_t pte) 586 { 587 unsigned long pfn = pte_pfn(pte); 588 589 if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) { 590 if (likely(!pte_special(pte))) 591 goto check_pfn; 592 if (vma->vm_ops && vma->vm_ops->find_special_page) 593 return vma->vm_ops->find_special_page(vma, addr); 594 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) 595 return NULL; 596 if (is_zero_pfn(pfn)) 597 return NULL; 598 if (pte_devmap(pte)) 599 /* 600 * NOTE: New users of ZONE_DEVICE will not set pte_devmap() 601 * and will have refcounts incremented on their struct pages 602 * when they are inserted into PTEs, thus they are safe to 603 * return here. Legacy ZONE_DEVICE pages that set pte_devmap() 604 * do not have refcounts. Example of legacy ZONE_DEVICE is 605 * MEMORY_DEVICE_FS_DAX type in pmem or virtio_fs drivers. 606 */ 607 return NULL; 608 609 print_bad_pte(vma, addr, pte, NULL); 610 return NULL; 611 } 612 613 /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */ 614 615 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { 616 if (vma->vm_flags & VM_MIXEDMAP) { 617 if (!pfn_valid(pfn)) 618 return NULL; 619 goto out; 620 } else { 621 unsigned long off; 622 off = (addr - vma->vm_start) >> PAGE_SHIFT; 623 if (pfn == vma->vm_pgoff + off) 624 return NULL; 625 if (!is_cow_mapping(vma->vm_flags)) 626 return NULL; 627 } 628 } 629 630 if (is_zero_pfn(pfn)) 631 return NULL; 632 633 check_pfn: 634 if (unlikely(pfn > highest_memmap_pfn)) { 635 print_bad_pte(vma, addr, pte, NULL); 636 return NULL; 637 } 638 639 /* 640 * NOTE! We still have PageReserved() pages in the page tables. 641 * eg. VDSO mappings can cause them to exist. 642 */ 643 out: 644 return pfn_to_page(pfn); 645 } 646 647 struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr, 648 pte_t pte) 649 { 650 struct page *page = vm_normal_page(vma, addr, pte); 651 652 if (page) 653 return page_folio(page); 654 return NULL; 655 } 656 657 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 658 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, 659 pmd_t pmd) 660 { 661 unsigned long pfn = pmd_pfn(pmd); 662 663 /* 664 * There is no pmd_special() but there may be special pmds, e.g. 665 * in a direct-access (dax) mapping, so let's just replicate the 666 * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here. 667 */ 668 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { 669 if (vma->vm_flags & VM_MIXEDMAP) { 670 if (!pfn_valid(pfn)) 671 return NULL; 672 goto out; 673 } else { 674 unsigned long off; 675 off = (addr - vma->vm_start) >> PAGE_SHIFT; 676 if (pfn == vma->vm_pgoff + off) 677 return NULL; 678 if (!is_cow_mapping(vma->vm_flags)) 679 return NULL; 680 } 681 } 682 683 if (pmd_devmap(pmd)) 684 return NULL; 685 if (is_huge_zero_pmd(pmd)) 686 return NULL; 687 if (unlikely(pfn > highest_memmap_pfn)) 688 return NULL; 689 690 /* 691 * NOTE! We still have PageReserved() pages in the page tables. 692 * eg. VDSO mappings can cause them to exist. 693 */ 694 out: 695 return pfn_to_page(pfn); 696 } 697 698 struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma, 699 unsigned long addr, pmd_t pmd) 700 { 701 struct page *page = vm_normal_page_pmd(vma, addr, pmd); 702 703 if (page) 704 return page_folio(page); 705 return NULL; 706 } 707 #endif 708 709 static void restore_exclusive_pte(struct vm_area_struct *vma, 710 struct page *page, unsigned long address, 711 pte_t *ptep) 712 { 713 struct folio *folio = page_folio(page); 714 pte_t orig_pte; 715 pte_t pte; 716 swp_entry_t entry; 717 718 orig_pte = ptep_get(ptep); 719 pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot))); 720 if (pte_swp_soft_dirty(orig_pte)) 721 pte = pte_mksoft_dirty(pte); 722 723 entry = pte_to_swp_entry(orig_pte); 724 if (pte_swp_uffd_wp(orig_pte)) 725 pte = pte_mkuffd_wp(pte); 726 else if (is_writable_device_exclusive_entry(entry)) 727 pte = maybe_mkwrite(pte_mkdirty(pte), vma); 728 729 VM_BUG_ON_FOLIO(pte_write(pte) && (!folio_test_anon(folio) && 730 PageAnonExclusive(page)), folio); 731 732 /* 733 * No need to take a page reference as one was already 734 * created when the swap entry was made. 735 */ 736 if (folio_test_anon(folio)) 737 folio_add_anon_rmap_pte(folio, page, vma, address, RMAP_NONE); 738 else 739 /* 740 * Currently device exclusive access only supports anonymous 741 * memory so the entry shouldn't point to a filebacked page. 742 */ 743 WARN_ON_ONCE(1); 744 745 set_pte_at(vma->vm_mm, address, ptep, pte); 746 747 /* 748 * No need to invalidate - it was non-present before. However 749 * secondary CPUs may have mappings that need invalidating. 750 */ 751 update_mmu_cache(vma, address, ptep); 752 } 753 754 /* 755 * Tries to restore an exclusive pte if the page lock can be acquired without 756 * sleeping. 757 */ 758 static int 759 try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma, 760 unsigned long addr) 761 { 762 swp_entry_t entry = pte_to_swp_entry(ptep_get(src_pte)); 763 struct page *page = pfn_swap_entry_to_page(entry); 764 765 if (trylock_page(page)) { 766 restore_exclusive_pte(vma, page, addr, src_pte); 767 unlock_page(page); 768 return 0; 769 } 770 771 return -EBUSY; 772 } 773 774 /* 775 * copy one vm_area from one task to the other. Assumes the page tables 776 * already present in the new task to be cleared in the whole range 777 * covered by this vma. 778 */ 779 780 static unsigned long 781 copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, 782 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma, 783 struct vm_area_struct *src_vma, unsigned long addr, int *rss) 784 { 785 unsigned long vm_flags = dst_vma->vm_flags; 786 pte_t orig_pte = ptep_get(src_pte); 787 pte_t pte = orig_pte; 788 struct folio *folio; 789 struct page *page; 790 swp_entry_t entry = pte_to_swp_entry(orig_pte); 791 792 if (likely(!non_swap_entry(entry))) { 793 if (swap_duplicate(entry) < 0) 794 return -EIO; 795 796 /* make sure dst_mm is on swapoff's mmlist. */ 797 if (unlikely(list_empty(&dst_mm->mmlist))) { 798 spin_lock(&mmlist_lock); 799 if (list_empty(&dst_mm->mmlist)) 800 list_add(&dst_mm->mmlist, 801 &src_mm->mmlist); 802 spin_unlock(&mmlist_lock); 803 } 804 /* Mark the swap entry as shared. */ 805 if (pte_swp_exclusive(orig_pte)) { 806 pte = pte_swp_clear_exclusive(orig_pte); 807 set_pte_at(src_mm, addr, src_pte, pte); 808 } 809 rss[MM_SWAPENTS]++; 810 } else if (is_migration_entry(entry)) { 811 folio = pfn_swap_entry_folio(entry); 812 813 rss[mm_counter(folio)]++; 814 815 if (!is_readable_migration_entry(entry) && 816 is_cow_mapping(vm_flags)) { 817 /* 818 * COW mappings require pages in both parent and child 819 * to be set to read. A previously exclusive entry is 820 * now shared. 821 */ 822 entry = make_readable_migration_entry( 823 swp_offset(entry)); 824 pte = swp_entry_to_pte(entry); 825 if (pte_swp_soft_dirty(orig_pte)) 826 pte = pte_swp_mksoft_dirty(pte); 827 if (pte_swp_uffd_wp(orig_pte)) 828 pte = pte_swp_mkuffd_wp(pte); 829 set_pte_at(src_mm, addr, src_pte, pte); 830 } 831 } else if (is_device_private_entry(entry)) { 832 page = pfn_swap_entry_to_page(entry); 833 folio = page_folio(page); 834 835 /* 836 * Update rss count even for unaddressable pages, as 837 * they should treated just like normal pages in this 838 * respect. 839 * 840 * We will likely want to have some new rss counters 841 * for unaddressable pages, at some point. But for now 842 * keep things as they are. 843 */ 844 folio_get(folio); 845 rss[mm_counter(folio)]++; 846 /* Cannot fail as these pages cannot get pinned. */ 847 folio_try_dup_anon_rmap_pte(folio, page, src_vma); 848 849 /* 850 * We do not preserve soft-dirty information, because so 851 * far, checkpoint/restore is the only feature that 852 * requires that. And checkpoint/restore does not work 853 * when a device driver is involved (you cannot easily 854 * save and restore device driver state). 855 */ 856 if (is_writable_device_private_entry(entry) && 857 is_cow_mapping(vm_flags)) { 858 entry = make_readable_device_private_entry( 859 swp_offset(entry)); 860 pte = swp_entry_to_pte(entry); 861 if (pte_swp_uffd_wp(orig_pte)) 862 pte = pte_swp_mkuffd_wp(pte); 863 set_pte_at(src_mm, addr, src_pte, pte); 864 } 865 } else if (is_device_exclusive_entry(entry)) { 866 /* 867 * Make device exclusive entries present by restoring the 868 * original entry then copying as for a present pte. Device 869 * exclusive entries currently only support private writable 870 * (ie. COW) mappings. 871 */ 872 VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags)); 873 if (try_restore_exclusive_pte(src_pte, src_vma, addr)) 874 return -EBUSY; 875 return -ENOENT; 876 } else if (is_pte_marker_entry(entry)) { 877 pte_marker marker = copy_pte_marker(entry, dst_vma); 878 879 if (marker) 880 set_pte_at(dst_mm, addr, dst_pte, 881 make_pte_marker(marker)); 882 return 0; 883 } 884 if (!userfaultfd_wp(dst_vma)) 885 pte = pte_swp_clear_uffd_wp(pte); 886 set_pte_at(dst_mm, addr, dst_pte, pte); 887 return 0; 888 } 889 890 /* 891 * Copy a present and normal page. 892 * 893 * NOTE! The usual case is that this isn't required; 894 * instead, the caller can just increase the page refcount 895 * and re-use the pte the traditional way. 896 * 897 * And if we need a pre-allocated page but don't yet have 898 * one, return a negative error to let the preallocation 899 * code know so that it can do so outside the page table 900 * lock. 901 */ 902 static inline int 903 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 904 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss, 905 struct folio **prealloc, struct page *page) 906 { 907 struct folio *new_folio; 908 pte_t pte; 909 910 new_folio = *prealloc; 911 if (!new_folio) 912 return -EAGAIN; 913 914 /* 915 * We have a prealloc page, all good! Take it 916 * over and copy the page & arm it. 917 */ 918 *prealloc = NULL; 919 copy_user_highpage(&new_folio->page, page, addr, src_vma); 920 __folio_mark_uptodate(new_folio); 921 folio_add_new_anon_rmap(new_folio, dst_vma, addr); 922 folio_add_lru_vma(new_folio, dst_vma); 923 rss[MM_ANONPAGES]++; 924 925 /* All done, just insert the new page copy in the child */ 926 pte = mk_pte(&new_folio->page, dst_vma->vm_page_prot); 927 pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma); 928 if (userfaultfd_pte_wp(dst_vma, ptep_get(src_pte))) 929 /* Uffd-wp needs to be delivered to dest pte as well */ 930 pte = pte_mkuffd_wp(pte); 931 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); 932 return 0; 933 } 934 935 static __always_inline void __copy_present_ptes(struct vm_area_struct *dst_vma, 936 struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte, 937 pte_t pte, unsigned long addr, int nr) 938 { 939 struct mm_struct *src_mm = src_vma->vm_mm; 940 941 /* If it's a COW mapping, write protect it both processes. */ 942 if (is_cow_mapping(src_vma->vm_flags) && pte_write(pte)) { 943 wrprotect_ptes(src_mm, addr, src_pte, nr); 944 pte = pte_wrprotect(pte); 945 } 946 947 /* If it's a shared mapping, mark it clean in the child. */ 948 if (src_vma->vm_flags & VM_SHARED) 949 pte = pte_mkclean(pte); 950 pte = pte_mkold(pte); 951 952 if (!userfaultfd_wp(dst_vma)) 953 pte = pte_clear_uffd_wp(pte); 954 955 set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr); 956 } 957 958 /* 959 * Copy one present PTE, trying to batch-process subsequent PTEs that map 960 * consecutive pages of the same folio by copying them as well. 961 * 962 * Returns -EAGAIN if one preallocated page is required to copy the next PTE. 963 * Otherwise, returns the number of copied PTEs (at least 1). 964 */ 965 static inline int 966 copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 967 pte_t *dst_pte, pte_t *src_pte, pte_t pte, unsigned long addr, 968 int max_nr, int *rss, struct folio **prealloc) 969 { 970 struct page *page; 971 struct folio *folio; 972 bool any_writable; 973 fpb_t flags = 0; 974 int err, nr; 975 976 page = vm_normal_page(src_vma, addr, pte); 977 if (unlikely(!page)) 978 goto copy_pte; 979 980 folio = page_folio(page); 981 982 /* 983 * If we likely have to copy, just don't bother with batching. Make 984 * sure that the common "small folio" case is as fast as possible 985 * by keeping the batching logic separate. 986 */ 987 if (unlikely(!*prealloc && folio_test_large(folio) && max_nr != 1)) { 988 if (src_vma->vm_flags & VM_SHARED) 989 flags |= FPB_IGNORE_DIRTY; 990 if (!vma_soft_dirty_enabled(src_vma)) 991 flags |= FPB_IGNORE_SOFT_DIRTY; 992 993 nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr, flags, 994 &any_writable, NULL, NULL); 995 folio_ref_add(folio, nr); 996 if (folio_test_anon(folio)) { 997 if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page, 998 nr, src_vma))) { 999 folio_ref_sub(folio, nr); 1000 return -EAGAIN; 1001 } 1002 rss[MM_ANONPAGES] += nr; 1003 VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio); 1004 } else { 1005 folio_dup_file_rmap_ptes(folio, page, nr); 1006 rss[mm_counter_file(folio)] += nr; 1007 } 1008 if (any_writable) 1009 pte = pte_mkwrite(pte, src_vma); 1010 __copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte, 1011 addr, nr); 1012 return nr; 1013 } 1014 1015 folio_get(folio); 1016 if (folio_test_anon(folio)) { 1017 /* 1018 * If this page may have been pinned by the parent process, 1019 * copy the page immediately for the child so that we'll always 1020 * guarantee the pinned page won't be randomly replaced in the 1021 * future. 1022 */ 1023 if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, src_vma))) { 1024 /* Page may be pinned, we have to copy. */ 1025 folio_put(folio); 1026 err = copy_present_page(dst_vma, src_vma, dst_pte, src_pte, 1027 addr, rss, prealloc, page); 1028 return err ? err : 1; 1029 } 1030 rss[MM_ANONPAGES]++; 1031 VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio); 1032 } else { 1033 folio_dup_file_rmap_pte(folio, page); 1034 rss[mm_counter_file(folio)]++; 1035 } 1036 1037 copy_pte: 1038 __copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte, addr, 1); 1039 return 1; 1040 } 1041 1042 static inline struct folio *folio_prealloc(struct mm_struct *src_mm, 1043 struct vm_area_struct *vma, unsigned long addr, bool need_zero) 1044 { 1045 struct folio *new_folio; 1046 1047 if (need_zero) 1048 new_folio = vma_alloc_zeroed_movable_folio(vma, addr); 1049 else 1050 new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, 1051 addr, false); 1052 1053 if (!new_folio) 1054 return NULL; 1055 1056 if (mem_cgroup_charge(new_folio, src_mm, GFP_KERNEL)) { 1057 folio_put(new_folio); 1058 return NULL; 1059 } 1060 folio_throttle_swaprate(new_folio, GFP_KERNEL); 1061 1062 return new_folio; 1063 } 1064 1065 static int 1066 copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 1067 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 1068 unsigned long end) 1069 { 1070 struct mm_struct *dst_mm = dst_vma->vm_mm; 1071 struct mm_struct *src_mm = src_vma->vm_mm; 1072 pte_t *orig_src_pte, *orig_dst_pte; 1073 pte_t *src_pte, *dst_pte; 1074 pte_t ptent; 1075 spinlock_t *src_ptl, *dst_ptl; 1076 int progress, max_nr, ret = 0; 1077 int rss[NR_MM_COUNTERS]; 1078 swp_entry_t entry = (swp_entry_t){0}; 1079 struct folio *prealloc = NULL; 1080 int nr; 1081 1082 again: 1083 progress = 0; 1084 init_rss_vec(rss); 1085 1086 /* 1087 * copy_pmd_range()'s prior pmd_none_or_clear_bad(src_pmd), and the 1088 * error handling here, assume that exclusive mmap_lock on dst and src 1089 * protects anon from unexpected THP transitions; with shmem and file 1090 * protected by mmap_lock-less collapse skipping areas with anon_vma 1091 * (whereas vma_needs_copy() skips areas without anon_vma). A rework 1092 * can remove such assumptions later, but this is good enough for now. 1093 */ 1094 dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); 1095 if (!dst_pte) { 1096 ret = -ENOMEM; 1097 goto out; 1098 } 1099 src_pte = pte_offset_map_nolock(src_mm, src_pmd, addr, &src_ptl); 1100 if (!src_pte) { 1101 pte_unmap_unlock(dst_pte, dst_ptl); 1102 /* ret == 0 */ 1103 goto out; 1104 } 1105 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 1106 orig_src_pte = src_pte; 1107 orig_dst_pte = dst_pte; 1108 arch_enter_lazy_mmu_mode(); 1109 1110 do { 1111 nr = 1; 1112 1113 /* 1114 * We are holding two locks at this point - either of them 1115 * could generate latencies in another task on another CPU. 1116 */ 1117 if (progress >= 32) { 1118 progress = 0; 1119 if (need_resched() || 1120 spin_needbreak(src_ptl) || spin_needbreak(dst_ptl)) 1121 break; 1122 } 1123 ptent = ptep_get(src_pte); 1124 if (pte_none(ptent)) { 1125 progress++; 1126 continue; 1127 } 1128 if (unlikely(!pte_present(ptent))) { 1129 ret = copy_nonpresent_pte(dst_mm, src_mm, 1130 dst_pte, src_pte, 1131 dst_vma, src_vma, 1132 addr, rss); 1133 if (ret == -EIO) { 1134 entry = pte_to_swp_entry(ptep_get(src_pte)); 1135 break; 1136 } else if (ret == -EBUSY) { 1137 break; 1138 } else if (!ret) { 1139 progress += 8; 1140 continue; 1141 } 1142 ptent = ptep_get(src_pte); 1143 VM_WARN_ON_ONCE(!pte_present(ptent)); 1144 1145 /* 1146 * Device exclusive entry restored, continue by copying 1147 * the now present pte. 1148 */ 1149 WARN_ON_ONCE(ret != -ENOENT); 1150 } 1151 /* copy_present_ptes() will clear `*prealloc' if consumed */ 1152 max_nr = (end - addr) / PAGE_SIZE; 1153 ret = copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, 1154 ptent, addr, max_nr, rss, &prealloc); 1155 /* 1156 * If we need a pre-allocated page for this pte, drop the 1157 * locks, allocate, and try again. 1158 */ 1159 if (unlikely(ret == -EAGAIN)) 1160 break; 1161 if (unlikely(prealloc)) { 1162 /* 1163 * pre-alloc page cannot be reused by next time so as 1164 * to strictly follow mempolicy (e.g., alloc_page_vma() 1165 * will allocate page according to address). This 1166 * could only happen if one pinned pte changed. 1167 */ 1168 folio_put(prealloc); 1169 prealloc = NULL; 1170 } 1171 nr = ret; 1172 progress += 8 * nr; 1173 } while (dst_pte += nr, src_pte += nr, addr += PAGE_SIZE * nr, 1174 addr != end); 1175 1176 arch_leave_lazy_mmu_mode(); 1177 pte_unmap_unlock(orig_src_pte, src_ptl); 1178 add_mm_rss_vec(dst_mm, rss); 1179 pte_unmap_unlock(orig_dst_pte, dst_ptl); 1180 cond_resched(); 1181 1182 if (ret == -EIO) { 1183 VM_WARN_ON_ONCE(!entry.val); 1184 if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) { 1185 ret = -ENOMEM; 1186 goto out; 1187 } 1188 entry.val = 0; 1189 } else if (ret == -EBUSY) { 1190 goto out; 1191 } else if (ret == -EAGAIN) { 1192 prealloc = folio_prealloc(src_mm, src_vma, addr, false); 1193 if (!prealloc) 1194 return -ENOMEM; 1195 } else if (ret < 0) { 1196 VM_WARN_ON_ONCE(1); 1197 } 1198 1199 /* We've captured and resolved the error. Reset, try again. */ 1200 ret = 0; 1201 1202 if (addr != end) 1203 goto again; 1204 out: 1205 if (unlikely(prealloc)) 1206 folio_put(prealloc); 1207 return ret; 1208 } 1209 1210 static inline int 1211 copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 1212 pud_t *dst_pud, pud_t *src_pud, unsigned long addr, 1213 unsigned long end) 1214 { 1215 struct mm_struct *dst_mm = dst_vma->vm_mm; 1216 struct mm_struct *src_mm = src_vma->vm_mm; 1217 pmd_t *src_pmd, *dst_pmd; 1218 unsigned long next; 1219 1220 dst_pmd = pmd_alloc(dst_mm, dst_pud, addr); 1221 if (!dst_pmd) 1222 return -ENOMEM; 1223 src_pmd = pmd_offset(src_pud, addr); 1224 do { 1225 next = pmd_addr_end(addr, end); 1226 if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd) 1227 || pmd_devmap(*src_pmd)) { 1228 int err; 1229 VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma); 1230 err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd, 1231 addr, dst_vma, src_vma); 1232 if (err == -ENOMEM) 1233 return -ENOMEM; 1234 if (!err) 1235 continue; 1236 /* fall through */ 1237 } 1238 if (pmd_none_or_clear_bad(src_pmd)) 1239 continue; 1240 if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd, 1241 addr, next)) 1242 return -ENOMEM; 1243 } while (dst_pmd++, src_pmd++, addr = next, addr != end); 1244 return 0; 1245 } 1246 1247 static inline int 1248 copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 1249 p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr, 1250 unsigned long end) 1251 { 1252 struct mm_struct *dst_mm = dst_vma->vm_mm; 1253 struct mm_struct *src_mm = src_vma->vm_mm; 1254 pud_t *src_pud, *dst_pud; 1255 unsigned long next; 1256 1257 dst_pud = pud_alloc(dst_mm, dst_p4d, addr); 1258 if (!dst_pud) 1259 return -ENOMEM; 1260 src_pud = pud_offset(src_p4d, addr); 1261 do { 1262 next = pud_addr_end(addr, end); 1263 if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) { 1264 int err; 1265 1266 VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma); 1267 err = copy_huge_pud(dst_mm, src_mm, 1268 dst_pud, src_pud, addr, src_vma); 1269 if (err == -ENOMEM) 1270 return -ENOMEM; 1271 if (!err) 1272 continue; 1273 /* fall through */ 1274 } 1275 if (pud_none_or_clear_bad(src_pud)) 1276 continue; 1277 if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud, 1278 addr, next)) 1279 return -ENOMEM; 1280 } while (dst_pud++, src_pud++, addr = next, addr != end); 1281 return 0; 1282 } 1283 1284 static inline int 1285 copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 1286 pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr, 1287 unsigned long end) 1288 { 1289 struct mm_struct *dst_mm = dst_vma->vm_mm; 1290 p4d_t *src_p4d, *dst_p4d; 1291 unsigned long next; 1292 1293 dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr); 1294 if (!dst_p4d) 1295 return -ENOMEM; 1296 src_p4d = p4d_offset(src_pgd, addr); 1297 do { 1298 next = p4d_addr_end(addr, end); 1299 if (p4d_none_or_clear_bad(src_p4d)) 1300 continue; 1301 if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d, 1302 addr, next)) 1303 return -ENOMEM; 1304 } while (dst_p4d++, src_p4d++, addr = next, addr != end); 1305 return 0; 1306 } 1307 1308 /* 1309 * Return true if the vma needs to copy the pgtable during this fork(). Return 1310 * false when we can speed up fork() by allowing lazy page faults later until 1311 * when the child accesses the memory range. 1312 */ 1313 static bool 1314 vma_needs_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) 1315 { 1316 /* 1317 * Always copy pgtables when dst_vma has uffd-wp enabled even if it's 1318 * file-backed (e.g. shmem). Because when uffd-wp is enabled, pgtable 1319 * contains uffd-wp protection information, that's something we can't 1320 * retrieve from page cache, and skip copying will lose those info. 1321 */ 1322 if (userfaultfd_wp(dst_vma)) 1323 return true; 1324 1325 if (src_vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) 1326 return true; 1327 1328 if (src_vma->anon_vma) 1329 return true; 1330 1331 /* 1332 * Don't copy ptes where a page fault will fill them correctly. Fork 1333 * becomes much lighter when there are big shared or private readonly 1334 * mappings. The tradeoff is that copy_page_range is more efficient 1335 * than faulting. 1336 */ 1337 return false; 1338 } 1339 1340 int 1341 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) 1342 { 1343 pgd_t *src_pgd, *dst_pgd; 1344 unsigned long next; 1345 unsigned long addr = src_vma->vm_start; 1346 unsigned long end = src_vma->vm_end; 1347 struct mm_struct *dst_mm = dst_vma->vm_mm; 1348 struct mm_struct *src_mm = src_vma->vm_mm; 1349 struct mmu_notifier_range range; 1350 bool is_cow; 1351 int ret; 1352 1353 if (!vma_needs_copy(dst_vma, src_vma)) 1354 return 0; 1355 1356 if (is_vm_hugetlb_page(src_vma)) 1357 return copy_hugetlb_page_range(dst_mm, src_mm, dst_vma, src_vma); 1358 1359 if (unlikely(src_vma->vm_flags & VM_PFNMAP)) { 1360 /* 1361 * We do not free on error cases below as remove_vma 1362 * gets called on error from higher level routine 1363 */ 1364 ret = track_pfn_copy(src_vma); 1365 if (ret) 1366 return ret; 1367 } 1368 1369 /* 1370 * We need to invalidate the secondary MMU mappings only when 1371 * there could be a permission downgrade on the ptes of the 1372 * parent mm. And a permission downgrade will only happen if 1373 * is_cow_mapping() returns true. 1374 */ 1375 is_cow = is_cow_mapping(src_vma->vm_flags); 1376 1377 if (is_cow) { 1378 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 1379 0, src_mm, addr, end); 1380 mmu_notifier_invalidate_range_start(&range); 1381 /* 1382 * Disabling preemption is not needed for the write side, as 1383 * the read side doesn't spin, but goes to the mmap_lock. 1384 * 1385 * Use the raw variant of the seqcount_t write API to avoid 1386 * lockdep complaining about preemptibility. 1387 */ 1388 vma_assert_write_locked(src_vma); 1389 raw_write_seqcount_begin(&src_mm->write_protect_seq); 1390 } 1391 1392 ret = 0; 1393 dst_pgd = pgd_offset(dst_mm, addr); 1394 src_pgd = pgd_offset(src_mm, addr); 1395 do { 1396 next = pgd_addr_end(addr, end); 1397 if (pgd_none_or_clear_bad(src_pgd)) 1398 continue; 1399 if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd, 1400 addr, next))) { 1401 untrack_pfn_clear(dst_vma); 1402 ret = -ENOMEM; 1403 break; 1404 } 1405 } while (dst_pgd++, src_pgd++, addr = next, addr != end); 1406 1407 if (is_cow) { 1408 raw_write_seqcount_end(&src_mm->write_protect_seq); 1409 mmu_notifier_invalidate_range_end(&range); 1410 } 1411 return ret; 1412 } 1413 1414 /* Whether we should zap all COWed (private) pages too */ 1415 static inline bool should_zap_cows(struct zap_details *details) 1416 { 1417 /* By default, zap all pages */ 1418 if (!details) 1419 return true; 1420 1421 /* Or, we zap COWed pages only if the caller wants to */ 1422 return details->even_cows; 1423 } 1424 1425 /* Decides whether we should zap this folio with the folio pointer specified */ 1426 static inline bool should_zap_folio(struct zap_details *details, 1427 struct folio *folio) 1428 { 1429 /* If we can make a decision without *folio.. */ 1430 if (should_zap_cows(details)) 1431 return true; 1432 1433 /* Otherwise we should only zap non-anon folios */ 1434 return !folio_test_anon(folio); 1435 } 1436 1437 static inline bool zap_drop_file_uffd_wp(struct zap_details *details) 1438 { 1439 if (!details) 1440 return false; 1441 1442 return details->zap_flags & ZAP_FLAG_DROP_MARKER; 1443 } 1444 1445 /* 1446 * This function makes sure that we'll replace the none pte with an uffd-wp 1447 * swap special pte marker when necessary. Must be with the pgtable lock held. 1448 */ 1449 static inline void 1450 zap_install_uffd_wp_if_needed(struct vm_area_struct *vma, 1451 unsigned long addr, pte_t *pte, int nr, 1452 struct zap_details *details, pte_t pteval) 1453 { 1454 /* Zap on anonymous always means dropping everything */ 1455 if (vma_is_anonymous(vma)) 1456 return; 1457 1458 if (zap_drop_file_uffd_wp(details)) 1459 return; 1460 1461 for (;;) { 1462 /* the PFN in the PTE is irrelevant. */ 1463 pte_install_uffd_wp_if_needed(vma, addr, pte, pteval); 1464 if (--nr == 0) 1465 break; 1466 pte++; 1467 addr += PAGE_SIZE; 1468 } 1469 } 1470 1471 static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb, 1472 struct vm_area_struct *vma, struct folio *folio, 1473 struct page *page, pte_t *pte, pte_t ptent, unsigned int nr, 1474 unsigned long addr, struct zap_details *details, int *rss, 1475 bool *force_flush, bool *force_break) 1476 { 1477 struct mm_struct *mm = tlb->mm; 1478 bool delay_rmap = false; 1479 1480 if (!folio_test_anon(folio)) { 1481 ptent = get_and_clear_full_ptes(mm, addr, pte, nr, tlb->fullmm); 1482 if (pte_dirty(ptent)) { 1483 folio_mark_dirty(folio); 1484 if (tlb_delay_rmap(tlb)) { 1485 delay_rmap = true; 1486 *force_flush = true; 1487 } 1488 } 1489 if (pte_young(ptent) && likely(vma_has_recency(vma))) 1490 folio_mark_accessed(folio); 1491 rss[mm_counter(folio)] -= nr; 1492 } else { 1493 /* We don't need up-to-date accessed/dirty bits. */ 1494 clear_full_ptes(mm, addr, pte, nr, tlb->fullmm); 1495 rss[MM_ANONPAGES] -= nr; 1496 } 1497 /* Checking a single PTE in a batch is sufficient. */ 1498 arch_check_zapped_pte(vma, ptent); 1499 tlb_remove_tlb_entries(tlb, pte, nr, addr); 1500 if (unlikely(userfaultfd_pte_wp(vma, ptent))) 1501 zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, 1502 ptent); 1503 1504 if (!delay_rmap) { 1505 folio_remove_rmap_ptes(folio, page, nr, vma); 1506 1507 if (unlikely(folio_mapcount(folio) < 0)) 1508 print_bad_pte(vma, addr, ptent, page); 1509 } 1510 if (unlikely(__tlb_remove_folio_pages(tlb, page, nr, delay_rmap))) { 1511 *force_flush = true; 1512 *force_break = true; 1513 } 1514 } 1515 1516 /* 1517 * Zap or skip at least one present PTE, trying to batch-process subsequent 1518 * PTEs that map consecutive pages of the same folio. 1519 * 1520 * Returns the number of processed (skipped or zapped) PTEs (at least 1). 1521 */ 1522 static inline int zap_present_ptes(struct mmu_gather *tlb, 1523 struct vm_area_struct *vma, pte_t *pte, pte_t ptent, 1524 unsigned int max_nr, unsigned long addr, 1525 struct zap_details *details, int *rss, bool *force_flush, 1526 bool *force_break) 1527 { 1528 const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY; 1529 struct mm_struct *mm = tlb->mm; 1530 struct folio *folio; 1531 struct page *page; 1532 int nr; 1533 1534 page = vm_normal_page(vma, addr, ptent); 1535 if (!page) { 1536 /* We don't need up-to-date accessed/dirty bits. */ 1537 ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm); 1538 arch_check_zapped_pte(vma, ptent); 1539 tlb_remove_tlb_entry(tlb, pte, addr); 1540 if (userfaultfd_pte_wp(vma, ptent)) 1541 zap_install_uffd_wp_if_needed(vma, addr, pte, 1, 1542 details, ptent); 1543 ksm_might_unmap_zero_page(mm, ptent); 1544 return 1; 1545 } 1546 1547 folio = page_folio(page); 1548 if (unlikely(!should_zap_folio(details, folio))) 1549 return 1; 1550 1551 /* 1552 * Make sure that the common "small folio" case is as fast as possible 1553 * by keeping the batching logic separate. 1554 */ 1555 if (unlikely(folio_test_large(folio) && max_nr != 1)) { 1556 nr = folio_pte_batch(folio, addr, pte, ptent, max_nr, fpb_flags, 1557 NULL, NULL, NULL); 1558 1559 zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr, 1560 addr, details, rss, force_flush, 1561 force_break); 1562 return nr; 1563 } 1564 zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, 1, addr, 1565 details, rss, force_flush, force_break); 1566 return 1; 1567 } 1568 1569 static unsigned long zap_pte_range(struct mmu_gather *tlb, 1570 struct vm_area_struct *vma, pmd_t *pmd, 1571 unsigned long addr, unsigned long end, 1572 struct zap_details *details) 1573 { 1574 bool force_flush = false, force_break = false; 1575 struct mm_struct *mm = tlb->mm; 1576 int rss[NR_MM_COUNTERS]; 1577 spinlock_t *ptl; 1578 pte_t *start_pte; 1579 pte_t *pte; 1580 swp_entry_t entry; 1581 int nr; 1582 1583 tlb_change_page_size(tlb, PAGE_SIZE); 1584 init_rss_vec(rss); 1585 start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 1586 if (!pte) 1587 return addr; 1588 1589 flush_tlb_batched_pending(mm); 1590 arch_enter_lazy_mmu_mode(); 1591 do { 1592 pte_t ptent = ptep_get(pte); 1593 struct folio *folio; 1594 struct page *page; 1595 int max_nr; 1596 1597 nr = 1; 1598 if (pte_none(ptent)) 1599 continue; 1600 1601 if (need_resched()) 1602 break; 1603 1604 if (pte_present(ptent)) { 1605 max_nr = (end - addr) / PAGE_SIZE; 1606 nr = zap_present_ptes(tlb, vma, pte, ptent, max_nr, 1607 addr, details, rss, &force_flush, 1608 &force_break); 1609 if (unlikely(force_break)) { 1610 addr += nr * PAGE_SIZE; 1611 break; 1612 } 1613 continue; 1614 } 1615 1616 entry = pte_to_swp_entry(ptent); 1617 if (is_device_private_entry(entry) || 1618 is_device_exclusive_entry(entry)) { 1619 page = pfn_swap_entry_to_page(entry); 1620 folio = page_folio(page); 1621 if (unlikely(!should_zap_folio(details, folio))) 1622 continue; 1623 /* 1624 * Both device private/exclusive mappings should only 1625 * work with anonymous page so far, so we don't need to 1626 * consider uffd-wp bit when zap. For more information, 1627 * see zap_install_uffd_wp_if_needed(). 1628 */ 1629 WARN_ON_ONCE(!vma_is_anonymous(vma)); 1630 rss[mm_counter(folio)]--; 1631 if (is_device_private_entry(entry)) 1632 folio_remove_rmap_pte(folio, page, vma); 1633 folio_put(folio); 1634 } else if (!non_swap_entry(entry)) { 1635 max_nr = (end - addr) / PAGE_SIZE; 1636 nr = swap_pte_batch(pte, max_nr, ptent); 1637 /* Genuine swap entries, hence a private anon pages */ 1638 if (!should_zap_cows(details)) 1639 continue; 1640 rss[MM_SWAPENTS] -= nr; 1641 free_swap_and_cache_nr(entry, nr); 1642 } else if (is_migration_entry(entry)) { 1643 folio = pfn_swap_entry_folio(entry); 1644 if (!should_zap_folio(details, folio)) 1645 continue; 1646 rss[mm_counter(folio)]--; 1647 } else if (pte_marker_entry_uffd_wp(entry)) { 1648 /* 1649 * For anon: always drop the marker; for file: only 1650 * drop the marker if explicitly requested. 1651 */ 1652 if (!vma_is_anonymous(vma) && 1653 !zap_drop_file_uffd_wp(details)) 1654 continue; 1655 } else if (is_hwpoison_entry(entry) || 1656 is_poisoned_swp_entry(entry)) { 1657 if (!should_zap_cows(details)) 1658 continue; 1659 } else { 1660 /* We should have covered all the swap entry types */ 1661 pr_alert("unrecognized swap entry 0x%lx\n", entry.val); 1662 WARN_ON_ONCE(1); 1663 } 1664 clear_not_present_full_ptes(mm, addr, pte, nr, tlb->fullmm); 1665 zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, ptent); 1666 } while (pte += nr, addr += PAGE_SIZE * nr, addr != end); 1667 1668 add_mm_rss_vec(mm, rss); 1669 arch_leave_lazy_mmu_mode(); 1670 1671 /* Do the actual TLB flush before dropping ptl */ 1672 if (force_flush) { 1673 tlb_flush_mmu_tlbonly(tlb); 1674 tlb_flush_rmaps(tlb, vma); 1675 } 1676 pte_unmap_unlock(start_pte, ptl); 1677 1678 /* 1679 * If we forced a TLB flush (either due to running out of 1680 * batch buffers or because we needed to flush dirty TLB 1681 * entries before releasing the ptl), free the batched 1682 * memory too. Come back again if we didn't do everything. 1683 */ 1684 if (force_flush) 1685 tlb_flush_mmu(tlb); 1686 1687 return addr; 1688 } 1689 1690 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, 1691 struct vm_area_struct *vma, pud_t *pud, 1692 unsigned long addr, unsigned long end, 1693 struct zap_details *details) 1694 { 1695 pmd_t *pmd; 1696 unsigned long next; 1697 1698 pmd = pmd_offset(pud, addr); 1699 do { 1700 next = pmd_addr_end(addr, end); 1701 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { 1702 if (next - addr != HPAGE_PMD_SIZE) 1703 __split_huge_pmd(vma, pmd, addr, false, NULL); 1704 else if (zap_huge_pmd(tlb, vma, pmd, addr)) { 1705 addr = next; 1706 continue; 1707 } 1708 /* fall through */ 1709 } else if (details && details->single_folio && 1710 folio_test_pmd_mappable(details->single_folio) && 1711 next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) { 1712 spinlock_t *ptl = pmd_lock(tlb->mm, pmd); 1713 /* 1714 * Take and drop THP pmd lock so that we cannot return 1715 * prematurely, while zap_huge_pmd() has cleared *pmd, 1716 * but not yet decremented compound_mapcount(). 1717 */ 1718 spin_unlock(ptl); 1719 } 1720 if (pmd_none(*pmd)) { 1721 addr = next; 1722 continue; 1723 } 1724 addr = zap_pte_range(tlb, vma, pmd, addr, next, details); 1725 if (addr != next) 1726 pmd--; 1727 } while (pmd++, cond_resched(), addr != end); 1728 1729 return addr; 1730 } 1731 1732 static inline unsigned long zap_pud_range(struct mmu_gather *tlb, 1733 struct vm_area_struct *vma, p4d_t *p4d, 1734 unsigned long addr, unsigned long end, 1735 struct zap_details *details) 1736 { 1737 pud_t *pud; 1738 unsigned long next; 1739 1740 pud = pud_offset(p4d, addr); 1741 do { 1742 next = pud_addr_end(addr, end); 1743 if (pud_trans_huge(*pud) || pud_devmap(*pud)) { 1744 if (next - addr != HPAGE_PUD_SIZE) { 1745 mmap_assert_locked(tlb->mm); 1746 split_huge_pud(vma, pud, addr); 1747 } else if (zap_huge_pud(tlb, vma, pud, addr)) 1748 goto next; 1749 /* fall through */ 1750 } 1751 if (pud_none_or_clear_bad(pud)) 1752 continue; 1753 next = zap_pmd_range(tlb, vma, pud, addr, next, details); 1754 next: 1755 cond_resched(); 1756 } while (pud++, addr = next, addr != end); 1757 1758 return addr; 1759 } 1760 1761 static inline unsigned long zap_p4d_range(struct mmu_gather *tlb, 1762 struct vm_area_struct *vma, pgd_t *pgd, 1763 unsigned long addr, unsigned long end, 1764 struct zap_details *details) 1765 { 1766 p4d_t *p4d; 1767 unsigned long next; 1768 1769 p4d = p4d_offset(pgd, addr); 1770 do { 1771 next = p4d_addr_end(addr, end); 1772 if (p4d_none_or_clear_bad(p4d)) 1773 continue; 1774 next = zap_pud_range(tlb, vma, p4d, addr, next, details); 1775 } while (p4d++, addr = next, addr != end); 1776 1777 return addr; 1778 } 1779 1780 void unmap_page_range(struct mmu_gather *tlb, 1781 struct vm_area_struct *vma, 1782 unsigned long addr, unsigned long end, 1783 struct zap_details *details) 1784 { 1785 pgd_t *pgd; 1786 unsigned long next; 1787 1788 BUG_ON(addr >= end); 1789 tlb_start_vma(tlb, vma); 1790 pgd = pgd_offset(vma->vm_mm, addr); 1791 do { 1792 next = pgd_addr_end(addr, end); 1793 if (pgd_none_or_clear_bad(pgd)) 1794 continue; 1795 next = zap_p4d_range(tlb, vma, pgd, addr, next, details); 1796 } while (pgd++, addr = next, addr != end); 1797 tlb_end_vma(tlb, vma); 1798 } 1799 1800 1801 static void unmap_single_vma(struct mmu_gather *tlb, 1802 struct vm_area_struct *vma, unsigned long start_addr, 1803 unsigned long end_addr, 1804 struct zap_details *details, bool mm_wr_locked) 1805 { 1806 unsigned long start = max(vma->vm_start, start_addr); 1807 unsigned long end; 1808 1809 if (start >= vma->vm_end) 1810 return; 1811 end = min(vma->vm_end, end_addr); 1812 if (end <= vma->vm_start) 1813 return; 1814 1815 if (vma->vm_file) 1816 uprobe_munmap(vma, start, end); 1817 1818 if (unlikely(vma->vm_flags & VM_PFNMAP)) 1819 untrack_pfn(vma, 0, 0, mm_wr_locked); 1820 1821 if (start != end) { 1822 if (unlikely(is_vm_hugetlb_page(vma))) { 1823 /* 1824 * It is undesirable to test vma->vm_file as it 1825 * should be non-null for valid hugetlb area. 1826 * However, vm_file will be NULL in the error 1827 * cleanup path of mmap_region. When 1828 * hugetlbfs ->mmap method fails, 1829 * mmap_region() nullifies vma->vm_file 1830 * before calling this function to clean up. 1831 * Since no pte has actually been setup, it is 1832 * safe to do nothing in this case. 1833 */ 1834 if (vma->vm_file) { 1835 zap_flags_t zap_flags = details ? 1836 details->zap_flags : 0; 1837 __unmap_hugepage_range(tlb, vma, start, end, 1838 NULL, zap_flags); 1839 } 1840 } else 1841 unmap_page_range(tlb, vma, start, end, details); 1842 } 1843 } 1844 1845 /** 1846 * unmap_vmas - unmap a range of memory covered by a list of vma's 1847 * @tlb: address of the caller's struct mmu_gather 1848 * @mas: the maple state 1849 * @vma: the starting vma 1850 * @start_addr: virtual address at which to start unmapping 1851 * @end_addr: virtual address at which to end unmapping 1852 * @tree_end: The maximum index to check 1853 * @mm_wr_locked: lock flag 1854 * 1855 * Unmap all pages in the vma list. 1856 * 1857 * Only addresses between `start' and `end' will be unmapped. 1858 * 1859 * The VMA list must be sorted in ascending virtual address order. 1860 * 1861 * unmap_vmas() assumes that the caller will flush the whole unmapped address 1862 * range after unmap_vmas() returns. So the only responsibility here is to 1863 * ensure that any thus-far unmapped pages are flushed before unmap_vmas() 1864 * drops the lock and schedules. 1865 */ 1866 void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas, 1867 struct vm_area_struct *vma, unsigned long start_addr, 1868 unsigned long end_addr, unsigned long tree_end, 1869 bool mm_wr_locked) 1870 { 1871 struct mmu_notifier_range range; 1872 struct zap_details details = { 1873 .zap_flags = ZAP_FLAG_DROP_MARKER | ZAP_FLAG_UNMAP, 1874 /* Careful - we need to zap private pages too! */ 1875 .even_cows = true, 1876 }; 1877 1878 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm, 1879 start_addr, end_addr); 1880 mmu_notifier_invalidate_range_start(&range); 1881 do { 1882 unsigned long start = start_addr; 1883 unsigned long end = end_addr; 1884 hugetlb_zap_begin(vma, &start, &end); 1885 unmap_single_vma(tlb, vma, start, end, &details, 1886 mm_wr_locked); 1887 hugetlb_zap_end(vma, &details); 1888 vma = mas_find(mas, tree_end - 1); 1889 } while (vma && likely(!xa_is_zero(vma))); 1890 mmu_notifier_invalidate_range_end(&range); 1891 } 1892 1893 /** 1894 * zap_page_range_single - remove user pages in a given range 1895 * @vma: vm_area_struct holding the applicable pages 1896 * @address: starting address of pages to zap 1897 * @size: number of bytes to zap 1898 * @details: details of shared cache invalidation 1899 * 1900 * The range must fit into one VMA. 1901 */ 1902 void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, 1903 unsigned long size, struct zap_details *details) 1904 { 1905 const unsigned long end = address + size; 1906 struct mmu_notifier_range range; 1907 struct mmu_gather tlb; 1908 1909 lru_add_drain(); 1910 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 1911 address, end); 1912 hugetlb_zap_begin(vma, &range.start, &range.end); 1913 tlb_gather_mmu(&tlb, vma->vm_mm); 1914 update_hiwater_rss(vma->vm_mm); 1915 mmu_notifier_invalidate_range_start(&range); 1916 /* 1917 * unmap 'address-end' not 'range.start-range.end' as range 1918 * could have been expanded for hugetlb pmd sharing. 1919 */ 1920 unmap_single_vma(&tlb, vma, address, end, details, false); 1921 mmu_notifier_invalidate_range_end(&range); 1922 tlb_finish_mmu(&tlb); 1923 hugetlb_zap_end(vma, details); 1924 } 1925 1926 /** 1927 * zap_vma_ptes - remove ptes mapping the vma 1928 * @vma: vm_area_struct holding ptes to be zapped 1929 * @address: starting address of pages to zap 1930 * @size: number of bytes to zap 1931 * 1932 * This function only unmaps ptes assigned to VM_PFNMAP vmas. 1933 * 1934 * The entire address range must be fully contained within the vma. 1935 * 1936 */ 1937 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 1938 unsigned long size) 1939 { 1940 if (!range_in_vma(vma, address, address + size) || 1941 !(vma->vm_flags & VM_PFNMAP)) 1942 return; 1943 1944 zap_page_range_single(vma, address, size, NULL); 1945 } 1946 EXPORT_SYMBOL_GPL(zap_vma_ptes); 1947 1948 static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr) 1949 { 1950 pgd_t *pgd; 1951 p4d_t *p4d; 1952 pud_t *pud; 1953 pmd_t *pmd; 1954 1955 pgd = pgd_offset(mm, addr); 1956 p4d = p4d_alloc(mm, pgd, addr); 1957 if (!p4d) 1958 return NULL; 1959 pud = pud_alloc(mm, p4d, addr); 1960 if (!pud) 1961 return NULL; 1962 pmd = pmd_alloc(mm, pud, addr); 1963 if (!pmd) 1964 return NULL; 1965 1966 VM_BUG_ON(pmd_trans_huge(*pmd)); 1967 return pmd; 1968 } 1969 1970 pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, 1971 spinlock_t **ptl) 1972 { 1973 pmd_t *pmd = walk_to_pmd(mm, addr); 1974 1975 if (!pmd) 1976 return NULL; 1977 return pte_alloc_map_lock(mm, pmd, addr, ptl); 1978 } 1979 1980 static int validate_page_before_insert(struct page *page) 1981 { 1982 struct folio *folio = page_folio(page); 1983 1984 if (folio_test_anon(folio) || folio_test_slab(folio) || 1985 page_has_type(page)) 1986 return -EINVAL; 1987 flush_dcache_folio(folio); 1988 return 0; 1989 } 1990 1991 static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte, 1992 unsigned long addr, struct page *page, pgprot_t prot) 1993 { 1994 struct folio *folio = page_folio(page); 1995 1996 if (!pte_none(ptep_get(pte))) 1997 return -EBUSY; 1998 /* Ok, finally just insert the thing.. */ 1999 folio_get(folio); 2000 inc_mm_counter(vma->vm_mm, mm_counter_file(folio)); 2001 folio_add_file_rmap_pte(folio, page, vma); 2002 set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot)); 2003 return 0; 2004 } 2005 2006 /* 2007 * This is the old fallback for page remapping. 2008 * 2009 * For historical reasons, it only allows reserved pages. Only 2010 * old drivers should use this, and they needed to mark their 2011 * pages reserved for the old functions anyway. 2012 */ 2013 static int insert_page(struct vm_area_struct *vma, unsigned long addr, 2014 struct page *page, pgprot_t prot) 2015 { 2016 int retval; 2017 pte_t *pte; 2018 spinlock_t *ptl; 2019 2020 retval = validate_page_before_insert(page); 2021 if (retval) 2022 goto out; 2023 retval = -ENOMEM; 2024 pte = get_locked_pte(vma->vm_mm, addr, &ptl); 2025 if (!pte) 2026 goto out; 2027 retval = insert_page_into_pte_locked(vma, pte, addr, page, prot); 2028 pte_unmap_unlock(pte, ptl); 2029 out: 2030 return retval; 2031 } 2032 2033 static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte, 2034 unsigned long addr, struct page *page, pgprot_t prot) 2035 { 2036 int err; 2037 2038 if (!page_count(page)) 2039 return -EINVAL; 2040 err = validate_page_before_insert(page); 2041 if (err) 2042 return err; 2043 return insert_page_into_pte_locked(vma, pte, addr, page, prot); 2044 } 2045 2046 /* insert_pages() amortizes the cost of spinlock operations 2047 * when inserting pages in a loop. 2048 */ 2049 static int insert_pages(struct vm_area_struct *vma, unsigned long addr, 2050 struct page **pages, unsigned long *num, pgprot_t prot) 2051 { 2052 pmd_t *pmd = NULL; 2053 pte_t *start_pte, *pte; 2054 spinlock_t *pte_lock; 2055 struct mm_struct *const mm = vma->vm_mm; 2056 unsigned long curr_page_idx = 0; 2057 unsigned long remaining_pages_total = *num; 2058 unsigned long pages_to_write_in_pmd; 2059 int ret; 2060 more: 2061 ret = -EFAULT; 2062 pmd = walk_to_pmd(mm, addr); 2063 if (!pmd) 2064 goto out; 2065 2066 pages_to_write_in_pmd = min_t(unsigned long, 2067 remaining_pages_total, PTRS_PER_PTE - pte_index(addr)); 2068 2069 /* Allocate the PTE if necessary; takes PMD lock once only. */ 2070 ret = -ENOMEM; 2071 if (pte_alloc(mm, pmd)) 2072 goto out; 2073 2074 while (pages_to_write_in_pmd) { 2075 int pte_idx = 0; 2076 const int batch_size = min_t(int, pages_to_write_in_pmd, 8); 2077 2078 start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock); 2079 if (!start_pte) { 2080 ret = -EFAULT; 2081 goto out; 2082 } 2083 for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) { 2084 int err = insert_page_in_batch_locked(vma, pte, 2085 addr, pages[curr_page_idx], prot); 2086 if (unlikely(err)) { 2087 pte_unmap_unlock(start_pte, pte_lock); 2088 ret = err; 2089 remaining_pages_total -= pte_idx; 2090 goto out; 2091 } 2092 addr += PAGE_SIZE; 2093 ++curr_page_idx; 2094 } 2095 pte_unmap_unlock(start_pte, pte_lock); 2096 pages_to_write_in_pmd -= batch_size; 2097 remaining_pages_total -= batch_size; 2098 } 2099 if (remaining_pages_total) 2100 goto more; 2101 ret = 0; 2102 out: 2103 *num = remaining_pages_total; 2104 return ret; 2105 } 2106 2107 /** 2108 * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock. 2109 * @vma: user vma to map to 2110 * @addr: target start user address of these pages 2111 * @pages: source kernel pages 2112 * @num: in: number of pages to map. out: number of pages that were *not* 2113 * mapped. (0 means all pages were successfully mapped). 2114 * 2115 * Preferred over vm_insert_page() when inserting multiple pages. 2116 * 2117 * In case of error, we may have mapped a subset of the provided 2118 * pages. It is the caller's responsibility to account for this case. 2119 * 2120 * The same restrictions apply as in vm_insert_page(). 2121 */ 2122 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, 2123 struct page **pages, unsigned long *num) 2124 { 2125 const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1; 2126 2127 if (addr < vma->vm_start || end_addr >= vma->vm_end) 2128 return -EFAULT; 2129 if (!(vma->vm_flags & VM_MIXEDMAP)) { 2130 BUG_ON(mmap_read_trylock(vma->vm_mm)); 2131 BUG_ON(vma->vm_flags & VM_PFNMAP); 2132 vm_flags_set(vma, VM_MIXEDMAP); 2133 } 2134 /* Defer page refcount checking till we're about to map that page. */ 2135 return insert_pages(vma, addr, pages, num, vma->vm_page_prot); 2136 } 2137 EXPORT_SYMBOL(vm_insert_pages); 2138 2139 /** 2140 * vm_insert_page - insert single page into user vma 2141 * @vma: user vma to map to 2142 * @addr: target user address of this page 2143 * @page: source kernel page 2144 * 2145 * This allows drivers to insert individual pages they've allocated 2146 * into a user vma. 2147 * 2148 * The page has to be a nice clean _individual_ kernel allocation. 2149 * If you allocate a compound page, you need to have marked it as 2150 * such (__GFP_COMP), or manually just split the page up yourself 2151 * (see split_page()). 2152 * 2153 * NOTE! Traditionally this was done with "remap_pfn_range()" which 2154 * took an arbitrary page protection parameter. This doesn't allow 2155 * that. Your vma protection will have to be set up correctly, which 2156 * means that if you want a shared writable mapping, you'd better 2157 * ask for a shared writable mapping! 2158 * 2159 * The page does not need to be reserved. 2160 * 2161 * Usually this function is called from f_op->mmap() handler 2162 * under mm->mmap_lock write-lock, so it can change vma->vm_flags. 2163 * Caller must set VM_MIXEDMAP on vma if it wants to call this 2164 * function from other places, for example from page-fault handler. 2165 * 2166 * Return: %0 on success, negative error code otherwise. 2167 */ 2168 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, 2169 struct page *page) 2170 { 2171 if (addr < vma->vm_start || addr >= vma->vm_end) 2172 return -EFAULT; 2173 if (!page_count(page)) 2174 return -EINVAL; 2175 if (!(vma->vm_flags & VM_MIXEDMAP)) { 2176 BUG_ON(mmap_read_trylock(vma->vm_mm)); 2177 BUG_ON(vma->vm_flags & VM_PFNMAP); 2178 vm_flags_set(vma, VM_MIXEDMAP); 2179 } 2180 return insert_page(vma, addr, page, vma->vm_page_prot); 2181 } 2182 EXPORT_SYMBOL(vm_insert_page); 2183 2184 /* 2185 * __vm_map_pages - maps range of kernel pages into user vma 2186 * @vma: user vma to map to 2187 * @pages: pointer to array of source kernel pages 2188 * @num: number of pages in page array 2189 * @offset: user's requested vm_pgoff 2190 * 2191 * This allows drivers to map range of kernel pages into a user vma. 2192 * 2193 * Return: 0 on success and error code otherwise. 2194 */ 2195 static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages, 2196 unsigned long num, unsigned long offset) 2197 { 2198 unsigned long count = vma_pages(vma); 2199 unsigned long uaddr = vma->vm_start; 2200 int ret, i; 2201 2202 /* Fail if the user requested offset is beyond the end of the object */ 2203 if (offset >= num) 2204 return -ENXIO; 2205 2206 /* Fail if the user requested size exceeds available object size */ 2207 if (count > num - offset) 2208 return -ENXIO; 2209 2210 for (i = 0; i < count; i++) { 2211 ret = vm_insert_page(vma, uaddr, pages[offset + i]); 2212 if (ret < 0) 2213 return ret; 2214 uaddr += PAGE_SIZE; 2215 } 2216 2217 return 0; 2218 } 2219 2220 /** 2221 * vm_map_pages - maps range of kernel pages starts with non zero offset 2222 * @vma: user vma to map to 2223 * @pages: pointer to array of source kernel pages 2224 * @num: number of pages in page array 2225 * 2226 * Maps an object consisting of @num pages, catering for the user's 2227 * requested vm_pgoff 2228 * 2229 * If we fail to insert any page into the vma, the function will return 2230 * immediately leaving any previously inserted pages present. Callers 2231 * from the mmap handler may immediately return the error as their caller 2232 * will destroy the vma, removing any successfully inserted pages. Other 2233 * callers should make their own arrangements for calling unmap_region(). 2234 * 2235 * Context: Process context. Called by mmap handlers. 2236 * Return: 0 on success and error code otherwise. 2237 */ 2238 int vm_map_pages(struct vm_area_struct *vma, struct page **pages, 2239 unsigned long num) 2240 { 2241 return __vm_map_pages(vma, pages, num, vma->vm_pgoff); 2242 } 2243 EXPORT_SYMBOL(vm_map_pages); 2244 2245 /** 2246 * vm_map_pages_zero - map range of kernel pages starts with zero offset 2247 * @vma: user vma to map to 2248 * @pages: pointer to array of source kernel pages 2249 * @num: number of pages in page array 2250 * 2251 * Similar to vm_map_pages(), except that it explicitly sets the offset 2252 * to 0. This function is intended for the drivers that did not consider 2253 * vm_pgoff. 2254 * 2255 * Context: Process context. Called by mmap handlers. 2256 * Return: 0 on success and error code otherwise. 2257 */ 2258 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, 2259 unsigned long num) 2260 { 2261 return __vm_map_pages(vma, pages, num, 0); 2262 } 2263 EXPORT_SYMBOL(vm_map_pages_zero); 2264 2265 static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr, 2266 pfn_t pfn, pgprot_t prot, bool mkwrite) 2267 { 2268 struct mm_struct *mm = vma->vm_mm; 2269 pte_t *pte, entry; 2270 spinlock_t *ptl; 2271 2272 pte = get_locked_pte(mm, addr, &ptl); 2273 if (!pte) 2274 return VM_FAULT_OOM; 2275 entry = ptep_get(pte); 2276 if (!pte_none(entry)) { 2277 if (mkwrite) { 2278 /* 2279 * For read faults on private mappings the PFN passed 2280 * in may not match the PFN we have mapped if the 2281 * mapped PFN is a writeable COW page. In the mkwrite 2282 * case we are creating a writable PTE for a shared 2283 * mapping and we expect the PFNs to match. If they 2284 * don't match, we are likely racing with block 2285 * allocation and mapping invalidation so just skip the 2286 * update. 2287 */ 2288 if (pte_pfn(entry) != pfn_t_to_pfn(pfn)) { 2289 WARN_ON_ONCE(!is_zero_pfn(pte_pfn(entry))); 2290 goto out_unlock; 2291 } 2292 entry = pte_mkyoung(entry); 2293 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2294 if (ptep_set_access_flags(vma, addr, pte, entry, 1)) 2295 update_mmu_cache(vma, addr, pte); 2296 } 2297 goto out_unlock; 2298 } 2299 2300 /* Ok, finally just insert the thing.. */ 2301 if (pfn_t_devmap(pfn)) 2302 entry = pte_mkdevmap(pfn_t_pte(pfn, prot)); 2303 else 2304 entry = pte_mkspecial(pfn_t_pte(pfn, prot)); 2305 2306 if (mkwrite) { 2307 entry = pte_mkyoung(entry); 2308 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2309 } 2310 2311 set_pte_at(mm, addr, pte, entry); 2312 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ 2313 2314 out_unlock: 2315 pte_unmap_unlock(pte, ptl); 2316 return VM_FAULT_NOPAGE; 2317 } 2318 2319 /** 2320 * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot 2321 * @vma: user vma to map to 2322 * @addr: target user address of this page 2323 * @pfn: source kernel pfn 2324 * @pgprot: pgprot flags for the inserted page 2325 * 2326 * This is exactly like vmf_insert_pfn(), except that it allows drivers 2327 * to override pgprot on a per-page basis. 2328 * 2329 * This only makes sense for IO mappings, and it makes no sense for 2330 * COW mappings. In general, using multiple vmas is preferable; 2331 * vmf_insert_pfn_prot should only be used if using multiple VMAs is 2332 * impractical. 2333 * 2334 * pgprot typically only differs from @vma->vm_page_prot when drivers set 2335 * caching- and encryption bits different than those of @vma->vm_page_prot, 2336 * because the caching- or encryption mode may not be known at mmap() time. 2337 * 2338 * This is ok as long as @vma->vm_page_prot is not used by the core vm 2339 * to set caching and encryption bits for those vmas (except for COW pages). 2340 * This is ensured by core vm only modifying these page table entries using 2341 * functions that don't touch caching- or encryption bits, using pte_modify() 2342 * if needed. (See for example mprotect()). 2343 * 2344 * Also when new page-table entries are created, this is only done using the 2345 * fault() callback, and never using the value of vma->vm_page_prot, 2346 * except for page-table entries that point to anonymous pages as the result 2347 * of COW. 2348 * 2349 * Context: Process context. May allocate using %GFP_KERNEL. 2350 * Return: vm_fault_t value. 2351 */ 2352 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, 2353 unsigned long pfn, pgprot_t pgprot) 2354 { 2355 /* 2356 * Technically, architectures with pte_special can avoid all these 2357 * restrictions (same for remap_pfn_range). However we would like 2358 * consistency in testing and feature parity among all, so we should 2359 * try to keep these invariants in place for everybody. 2360 */ 2361 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); 2362 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 2363 (VM_PFNMAP|VM_MIXEDMAP)); 2364 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 2365 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); 2366 2367 if (addr < vma->vm_start || addr >= vma->vm_end) 2368 return VM_FAULT_SIGBUS; 2369 2370 if (!pfn_modify_allowed(pfn, pgprot)) 2371 return VM_FAULT_SIGBUS; 2372 2373 track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV)); 2374 2375 return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot, 2376 false); 2377 } 2378 EXPORT_SYMBOL(vmf_insert_pfn_prot); 2379 2380 /** 2381 * vmf_insert_pfn - insert single pfn into user vma 2382 * @vma: user vma to map to 2383 * @addr: target user address of this page 2384 * @pfn: source kernel pfn 2385 * 2386 * Similar to vm_insert_page, this allows drivers to insert individual pages 2387 * they've allocated into a user vma. Same comments apply. 2388 * 2389 * This function should only be called from a vm_ops->fault handler, and 2390 * in that case the handler should return the result of this function. 2391 * 2392 * vma cannot be a COW mapping. 2393 * 2394 * As this is called only for pages that do not currently exist, we 2395 * do not need to flush old virtual caches or the TLB. 2396 * 2397 * Context: Process context. May allocate using %GFP_KERNEL. 2398 * Return: vm_fault_t value. 2399 */ 2400 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 2401 unsigned long pfn) 2402 { 2403 return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot); 2404 } 2405 EXPORT_SYMBOL(vmf_insert_pfn); 2406 2407 static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn) 2408 { 2409 /* these checks mirror the abort conditions in vm_normal_page */ 2410 if (vma->vm_flags & VM_MIXEDMAP) 2411 return true; 2412 if (pfn_t_devmap(pfn)) 2413 return true; 2414 if (pfn_t_special(pfn)) 2415 return true; 2416 if (is_zero_pfn(pfn_t_to_pfn(pfn))) 2417 return true; 2418 return false; 2419 } 2420 2421 static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma, 2422 unsigned long addr, pfn_t pfn, bool mkwrite) 2423 { 2424 pgprot_t pgprot = vma->vm_page_prot; 2425 int err; 2426 2427 BUG_ON(!vm_mixed_ok(vma, pfn)); 2428 2429 if (addr < vma->vm_start || addr >= vma->vm_end) 2430 return VM_FAULT_SIGBUS; 2431 2432 track_pfn_insert(vma, &pgprot, pfn); 2433 2434 if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot)) 2435 return VM_FAULT_SIGBUS; 2436 2437 /* 2438 * If we don't have pte special, then we have to use the pfn_valid() 2439 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must* 2440 * refcount the page if pfn_valid is true (hence insert_page rather 2441 * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP 2442 * without pte special, it would there be refcounted as a normal page. 2443 */ 2444 if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && 2445 !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) { 2446 struct page *page; 2447 2448 /* 2449 * At this point we are committed to insert_page() 2450 * regardless of whether the caller specified flags that 2451 * result in pfn_t_has_page() == false. 2452 */ 2453 page = pfn_to_page(pfn_t_to_pfn(pfn)); 2454 err = insert_page(vma, addr, page, pgprot); 2455 } else { 2456 return insert_pfn(vma, addr, pfn, pgprot, mkwrite); 2457 } 2458 2459 if (err == -ENOMEM) 2460 return VM_FAULT_OOM; 2461 if (err < 0 && err != -EBUSY) 2462 return VM_FAULT_SIGBUS; 2463 2464 return VM_FAULT_NOPAGE; 2465 } 2466 2467 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 2468 pfn_t pfn) 2469 { 2470 return __vm_insert_mixed(vma, addr, pfn, false); 2471 } 2472 EXPORT_SYMBOL(vmf_insert_mixed); 2473 2474 /* 2475 * If the insertion of PTE failed because someone else already added a 2476 * different entry in the mean time, we treat that as success as we assume 2477 * the same entry was actually inserted. 2478 */ 2479 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, 2480 unsigned long addr, pfn_t pfn) 2481 { 2482 return __vm_insert_mixed(vma, addr, pfn, true); 2483 } 2484 EXPORT_SYMBOL(vmf_insert_mixed_mkwrite); 2485 2486 /* 2487 * maps a range of physical memory into the requested pages. the old 2488 * mappings are removed. any references to nonexistent pages results 2489 * in null mappings (currently treated as "copy-on-access") 2490 */ 2491 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, 2492 unsigned long addr, unsigned long end, 2493 unsigned long pfn, pgprot_t prot) 2494 { 2495 pte_t *pte, *mapped_pte; 2496 spinlock_t *ptl; 2497 int err = 0; 2498 2499 mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); 2500 if (!pte) 2501 return -ENOMEM; 2502 arch_enter_lazy_mmu_mode(); 2503 do { 2504 BUG_ON(!pte_none(ptep_get(pte))); 2505 if (!pfn_modify_allowed(pfn, prot)) { 2506 err = -EACCES; 2507 break; 2508 } 2509 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); 2510 pfn++; 2511 } while (pte++, addr += PAGE_SIZE, addr != end); 2512 arch_leave_lazy_mmu_mode(); 2513 pte_unmap_unlock(mapped_pte, ptl); 2514 return err; 2515 } 2516 2517 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, 2518 unsigned long addr, unsigned long end, 2519 unsigned long pfn, pgprot_t prot) 2520 { 2521 pmd_t *pmd; 2522 unsigned long next; 2523 int err; 2524 2525 pfn -= addr >> PAGE_SHIFT; 2526 pmd = pmd_alloc(mm, pud, addr); 2527 if (!pmd) 2528 return -ENOMEM; 2529 VM_BUG_ON(pmd_trans_huge(*pmd)); 2530 do { 2531 next = pmd_addr_end(addr, end); 2532 err = remap_pte_range(mm, pmd, addr, next, 2533 pfn + (addr >> PAGE_SHIFT), prot); 2534 if (err) 2535 return err; 2536 } while (pmd++, addr = next, addr != end); 2537 return 0; 2538 } 2539 2540 static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d, 2541 unsigned long addr, unsigned long end, 2542 unsigned long pfn, pgprot_t prot) 2543 { 2544 pud_t *pud; 2545 unsigned long next; 2546 int err; 2547 2548 pfn -= addr >> PAGE_SHIFT; 2549 pud = pud_alloc(mm, p4d, addr); 2550 if (!pud) 2551 return -ENOMEM; 2552 do { 2553 next = pud_addr_end(addr, end); 2554 err = remap_pmd_range(mm, pud, addr, next, 2555 pfn + (addr >> PAGE_SHIFT), prot); 2556 if (err) 2557 return err; 2558 } while (pud++, addr = next, addr != end); 2559 return 0; 2560 } 2561 2562 static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd, 2563 unsigned long addr, unsigned long end, 2564 unsigned long pfn, pgprot_t prot) 2565 { 2566 p4d_t *p4d; 2567 unsigned long next; 2568 int err; 2569 2570 pfn -= addr >> PAGE_SHIFT; 2571 p4d = p4d_alloc(mm, pgd, addr); 2572 if (!p4d) 2573 return -ENOMEM; 2574 do { 2575 next = p4d_addr_end(addr, end); 2576 err = remap_pud_range(mm, p4d, addr, next, 2577 pfn + (addr >> PAGE_SHIFT), prot); 2578 if (err) 2579 return err; 2580 } while (p4d++, addr = next, addr != end); 2581 return 0; 2582 } 2583 2584 /* 2585 * Variant of remap_pfn_range that does not call track_pfn_remap. The caller 2586 * must have pre-validated the caching bits of the pgprot_t. 2587 */ 2588 int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, 2589 unsigned long pfn, unsigned long size, pgprot_t prot) 2590 { 2591 pgd_t *pgd; 2592 unsigned long next; 2593 unsigned long end = addr + PAGE_ALIGN(size); 2594 struct mm_struct *mm = vma->vm_mm; 2595 int err; 2596 2597 if (WARN_ON_ONCE(!PAGE_ALIGNED(addr))) 2598 return -EINVAL; 2599 2600 /* 2601 * Physically remapped pages are special. Tell the 2602 * rest of the world about it: 2603 * VM_IO tells people not to look at these pages 2604 * (accesses can have side effects). 2605 * VM_PFNMAP tells the core MM that the base pages are just 2606 * raw PFN mappings, and do not have a "struct page" associated 2607 * with them. 2608 * VM_DONTEXPAND 2609 * Disable vma merging and expanding with mremap(). 2610 * VM_DONTDUMP 2611 * Omit vma from core dump, even when VM_IO turned off. 2612 * 2613 * There's a horrible special case to handle copy-on-write 2614 * behaviour that some programs depend on. We mark the "original" 2615 * un-COW'ed pages by matching them up with "vma->vm_pgoff". 2616 * See vm_normal_page() for details. 2617 */ 2618 if (is_cow_mapping(vma->vm_flags)) { 2619 if (addr != vma->vm_start || end != vma->vm_end) 2620 return -EINVAL; 2621 vma->vm_pgoff = pfn; 2622 } 2623 2624 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); 2625 2626 BUG_ON(addr >= end); 2627 pfn -= addr >> PAGE_SHIFT; 2628 pgd = pgd_offset(mm, addr); 2629 flush_cache_range(vma, addr, end); 2630 do { 2631 next = pgd_addr_end(addr, end); 2632 err = remap_p4d_range(mm, pgd, addr, next, 2633 pfn + (addr >> PAGE_SHIFT), prot); 2634 if (err) 2635 return err; 2636 } while (pgd++, addr = next, addr != end); 2637 2638 return 0; 2639 } 2640 2641 /** 2642 * remap_pfn_range - remap kernel memory to userspace 2643 * @vma: user vma to map to 2644 * @addr: target page aligned user address to start at 2645 * @pfn: page frame number of kernel physical memory address 2646 * @size: size of mapping area 2647 * @prot: page protection flags for this mapping 2648 * 2649 * Note: this is only safe if the mm semaphore is held when called. 2650 * 2651 * Return: %0 on success, negative error code otherwise. 2652 */ 2653 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, 2654 unsigned long pfn, unsigned long size, pgprot_t prot) 2655 { 2656 int err; 2657 2658 err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size)); 2659 if (err) 2660 return -EINVAL; 2661 2662 err = remap_pfn_range_notrack(vma, addr, pfn, size, prot); 2663 if (err) 2664 untrack_pfn(vma, pfn, PAGE_ALIGN(size), true); 2665 return err; 2666 } 2667 EXPORT_SYMBOL(remap_pfn_range); 2668 2669 /** 2670 * vm_iomap_memory - remap memory to userspace 2671 * @vma: user vma to map to 2672 * @start: start of the physical memory to be mapped 2673 * @len: size of area 2674 * 2675 * This is a simplified io_remap_pfn_range() for common driver use. The 2676 * driver just needs to give us the physical memory range to be mapped, 2677 * we'll figure out the rest from the vma information. 2678 * 2679 * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get 2680 * whatever write-combining details or similar. 2681 * 2682 * Return: %0 on success, negative error code otherwise. 2683 */ 2684 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) 2685 { 2686 unsigned long vm_len, pfn, pages; 2687 2688 /* Check that the physical memory area passed in looks valid */ 2689 if (start + len < start) 2690 return -EINVAL; 2691 /* 2692 * You *really* shouldn't map things that aren't page-aligned, 2693 * but we've historically allowed it because IO memory might 2694 * just have smaller alignment. 2695 */ 2696 len += start & ~PAGE_MASK; 2697 pfn = start >> PAGE_SHIFT; 2698 pages = (len + ~PAGE_MASK) >> PAGE_SHIFT; 2699 if (pfn + pages < pfn) 2700 return -EINVAL; 2701 2702 /* We start the mapping 'vm_pgoff' pages into the area */ 2703 if (vma->vm_pgoff > pages) 2704 return -EINVAL; 2705 pfn += vma->vm_pgoff; 2706 pages -= vma->vm_pgoff; 2707 2708 /* Can we fit all of the mapping? */ 2709 vm_len = vma->vm_end - vma->vm_start; 2710 if (vm_len >> PAGE_SHIFT > pages) 2711 return -EINVAL; 2712 2713 /* Ok, let it rip */ 2714 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); 2715 } 2716 EXPORT_SYMBOL(vm_iomap_memory); 2717 2718 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, 2719 unsigned long addr, unsigned long end, 2720 pte_fn_t fn, void *data, bool create, 2721 pgtbl_mod_mask *mask) 2722 { 2723 pte_t *pte, *mapped_pte; 2724 int err = 0; 2725 spinlock_t *ptl; 2726 2727 if (create) { 2728 mapped_pte = pte = (mm == &init_mm) ? 2729 pte_alloc_kernel_track(pmd, addr, mask) : 2730 pte_alloc_map_lock(mm, pmd, addr, &ptl); 2731 if (!pte) 2732 return -ENOMEM; 2733 } else { 2734 mapped_pte = pte = (mm == &init_mm) ? 2735 pte_offset_kernel(pmd, addr) : 2736 pte_offset_map_lock(mm, pmd, addr, &ptl); 2737 if (!pte) 2738 return -EINVAL; 2739 } 2740 2741 arch_enter_lazy_mmu_mode(); 2742 2743 if (fn) { 2744 do { 2745 if (create || !pte_none(ptep_get(pte))) { 2746 err = fn(pte++, addr, data); 2747 if (err) 2748 break; 2749 } 2750 } while (addr += PAGE_SIZE, addr != end); 2751 } 2752 *mask |= PGTBL_PTE_MODIFIED; 2753 2754 arch_leave_lazy_mmu_mode(); 2755 2756 if (mm != &init_mm) 2757 pte_unmap_unlock(mapped_pte, ptl); 2758 return err; 2759 } 2760 2761 static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, 2762 unsigned long addr, unsigned long end, 2763 pte_fn_t fn, void *data, bool create, 2764 pgtbl_mod_mask *mask) 2765 { 2766 pmd_t *pmd; 2767 unsigned long next; 2768 int err = 0; 2769 2770 BUG_ON(pud_leaf(*pud)); 2771 2772 if (create) { 2773 pmd = pmd_alloc_track(mm, pud, addr, mask); 2774 if (!pmd) 2775 return -ENOMEM; 2776 } else { 2777 pmd = pmd_offset(pud, addr); 2778 } 2779 do { 2780 next = pmd_addr_end(addr, end); 2781 if (pmd_none(*pmd) && !create) 2782 continue; 2783 if (WARN_ON_ONCE(pmd_leaf(*pmd))) 2784 return -EINVAL; 2785 if (!pmd_none(*pmd) && WARN_ON_ONCE(pmd_bad(*pmd))) { 2786 if (!create) 2787 continue; 2788 pmd_clear_bad(pmd); 2789 } 2790 err = apply_to_pte_range(mm, pmd, addr, next, 2791 fn, data, create, mask); 2792 if (err) 2793 break; 2794 } while (pmd++, addr = next, addr != end); 2795 2796 return err; 2797 } 2798 2799 static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d, 2800 unsigned long addr, unsigned long end, 2801 pte_fn_t fn, void *data, bool create, 2802 pgtbl_mod_mask *mask) 2803 { 2804 pud_t *pud; 2805 unsigned long next; 2806 int err = 0; 2807 2808 if (create) { 2809 pud = pud_alloc_track(mm, p4d, addr, mask); 2810 if (!pud) 2811 return -ENOMEM; 2812 } else { 2813 pud = pud_offset(p4d, addr); 2814 } 2815 do { 2816 next = pud_addr_end(addr, end); 2817 if (pud_none(*pud) && !create) 2818 continue; 2819 if (WARN_ON_ONCE(pud_leaf(*pud))) 2820 return -EINVAL; 2821 if (!pud_none(*pud) && WARN_ON_ONCE(pud_bad(*pud))) { 2822 if (!create) 2823 continue; 2824 pud_clear_bad(pud); 2825 } 2826 err = apply_to_pmd_range(mm, pud, addr, next, 2827 fn, data, create, mask); 2828 if (err) 2829 break; 2830 } while (pud++, addr = next, addr != end); 2831 2832 return err; 2833 } 2834 2835 static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd, 2836 unsigned long addr, unsigned long end, 2837 pte_fn_t fn, void *data, bool create, 2838 pgtbl_mod_mask *mask) 2839 { 2840 p4d_t *p4d; 2841 unsigned long next; 2842 int err = 0; 2843 2844 if (create) { 2845 p4d = p4d_alloc_track(mm, pgd, addr, mask); 2846 if (!p4d) 2847 return -ENOMEM; 2848 } else { 2849 p4d = p4d_offset(pgd, addr); 2850 } 2851 do { 2852 next = p4d_addr_end(addr, end); 2853 if (p4d_none(*p4d) && !create) 2854 continue; 2855 if (WARN_ON_ONCE(p4d_leaf(*p4d))) 2856 return -EINVAL; 2857 if (!p4d_none(*p4d) && WARN_ON_ONCE(p4d_bad(*p4d))) { 2858 if (!create) 2859 continue; 2860 p4d_clear_bad(p4d); 2861 } 2862 err = apply_to_pud_range(mm, p4d, addr, next, 2863 fn, data, create, mask); 2864 if (err) 2865 break; 2866 } while (p4d++, addr = next, addr != end); 2867 2868 return err; 2869 } 2870 2871 static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr, 2872 unsigned long size, pte_fn_t fn, 2873 void *data, bool create) 2874 { 2875 pgd_t *pgd; 2876 unsigned long start = addr, next; 2877 unsigned long end = addr + size; 2878 pgtbl_mod_mask mask = 0; 2879 int err = 0; 2880 2881 if (WARN_ON(addr >= end)) 2882 return -EINVAL; 2883 2884 pgd = pgd_offset(mm, addr); 2885 do { 2886 next = pgd_addr_end(addr, end); 2887 if (pgd_none(*pgd) && !create) 2888 continue; 2889 if (WARN_ON_ONCE(pgd_leaf(*pgd))) 2890 return -EINVAL; 2891 if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) { 2892 if (!create) 2893 continue; 2894 pgd_clear_bad(pgd); 2895 } 2896 err = apply_to_p4d_range(mm, pgd, addr, next, 2897 fn, data, create, &mask); 2898 if (err) 2899 break; 2900 } while (pgd++, addr = next, addr != end); 2901 2902 if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 2903 arch_sync_kernel_mappings(start, start + size); 2904 2905 return err; 2906 } 2907 2908 /* 2909 * Scan a region of virtual memory, filling in page tables as necessary 2910 * and calling a provided function on each leaf page table. 2911 */ 2912 int apply_to_page_range(struct mm_struct *mm, unsigned long addr, 2913 unsigned long size, pte_fn_t fn, void *data) 2914 { 2915 return __apply_to_page_range(mm, addr, size, fn, data, true); 2916 } 2917 EXPORT_SYMBOL_GPL(apply_to_page_range); 2918 2919 /* 2920 * Scan a region of virtual memory, calling a provided function on 2921 * each leaf page table where it exists. 2922 * 2923 * Unlike apply_to_page_range, this does _not_ fill in page tables 2924 * where they are absent. 2925 */ 2926 int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr, 2927 unsigned long size, pte_fn_t fn, void *data) 2928 { 2929 return __apply_to_page_range(mm, addr, size, fn, data, false); 2930 } 2931 EXPORT_SYMBOL_GPL(apply_to_existing_page_range); 2932 2933 /* 2934 * handle_pte_fault chooses page fault handler according to an entry which was 2935 * read non-atomically. Before making any commitment, on those architectures 2936 * or configurations (e.g. i386 with PAE) which might give a mix of unmatched 2937 * parts, do_swap_page must check under lock before unmapping the pte and 2938 * proceeding (but do_wp_page is only called after already making such a check; 2939 * and do_anonymous_page can safely check later on). 2940 */ 2941 static inline int pte_unmap_same(struct vm_fault *vmf) 2942 { 2943 int same = 1; 2944 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION) 2945 if (sizeof(pte_t) > sizeof(unsigned long)) { 2946 spin_lock(vmf->ptl); 2947 same = pte_same(ptep_get(vmf->pte), vmf->orig_pte); 2948 spin_unlock(vmf->ptl); 2949 } 2950 #endif 2951 pte_unmap(vmf->pte); 2952 vmf->pte = NULL; 2953 return same; 2954 } 2955 2956 /* 2957 * Return: 2958 * 0: copied succeeded 2959 * -EHWPOISON: copy failed due to hwpoison in source page 2960 * -EAGAIN: copied failed (some other reason) 2961 */ 2962 static inline int __wp_page_copy_user(struct page *dst, struct page *src, 2963 struct vm_fault *vmf) 2964 { 2965 int ret; 2966 void *kaddr; 2967 void __user *uaddr; 2968 struct vm_area_struct *vma = vmf->vma; 2969 struct mm_struct *mm = vma->vm_mm; 2970 unsigned long addr = vmf->address; 2971 2972 if (likely(src)) { 2973 if (copy_mc_user_highpage(dst, src, addr, vma)) { 2974 memory_failure_queue(page_to_pfn(src), 0); 2975 return -EHWPOISON; 2976 } 2977 return 0; 2978 } 2979 2980 /* 2981 * If the source page was a PFN mapping, we don't have 2982 * a "struct page" for it. We do a best-effort copy by 2983 * just copying from the original user address. If that 2984 * fails, we just zero-fill it. Live with it. 2985 */ 2986 kaddr = kmap_local_page(dst); 2987 pagefault_disable(); 2988 uaddr = (void __user *)(addr & PAGE_MASK); 2989 2990 /* 2991 * On architectures with software "accessed" bits, we would 2992 * take a double page fault, so mark it accessed here. 2993 */ 2994 vmf->pte = NULL; 2995 if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) { 2996 pte_t entry; 2997 2998 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); 2999 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { 3000 /* 3001 * Other thread has already handled the fault 3002 * and update local tlb only 3003 */ 3004 if (vmf->pte) 3005 update_mmu_tlb(vma, addr, vmf->pte); 3006 ret = -EAGAIN; 3007 goto pte_unlock; 3008 } 3009 3010 entry = pte_mkyoung(vmf->orig_pte); 3011 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0)) 3012 update_mmu_cache_range(vmf, vma, addr, vmf->pte, 1); 3013 } 3014 3015 /* 3016 * This really shouldn't fail, because the page is there 3017 * in the page tables. But it might just be unreadable, 3018 * in which case we just give up and fill the result with 3019 * zeroes. 3020 */ 3021 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) { 3022 if (vmf->pte) 3023 goto warn; 3024 3025 /* Re-validate under PTL if the page is still mapped */ 3026 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); 3027 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { 3028 /* The PTE changed under us, update local tlb */ 3029 if (vmf->pte) 3030 update_mmu_tlb(vma, addr, vmf->pte); 3031 ret = -EAGAIN; 3032 goto pte_unlock; 3033 } 3034 3035 /* 3036 * The same page can be mapped back since last copy attempt. 3037 * Try to copy again under PTL. 3038 */ 3039 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) { 3040 /* 3041 * Give a warn in case there can be some obscure 3042 * use-case 3043 */ 3044 warn: 3045 WARN_ON_ONCE(1); 3046 clear_page(kaddr); 3047 } 3048 } 3049 3050 ret = 0; 3051 3052 pte_unlock: 3053 if (vmf->pte) 3054 pte_unmap_unlock(vmf->pte, vmf->ptl); 3055 pagefault_enable(); 3056 kunmap_local(kaddr); 3057 flush_dcache_page(dst); 3058 3059 return ret; 3060 } 3061 3062 static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma) 3063 { 3064 struct file *vm_file = vma->vm_file; 3065 3066 if (vm_file) 3067 return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO; 3068 3069 /* 3070 * Special mappings (e.g. VDSO) do not have any file so fake 3071 * a default GFP_KERNEL for them. 3072 */ 3073 return GFP_KERNEL; 3074 } 3075 3076 /* 3077 * Notify the address space that the page is about to become writable so that 3078 * it can prohibit this or wait for the page to get into an appropriate state. 3079 * 3080 * We do this without the lock held, so that it can sleep if it needs to. 3081 */ 3082 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf, struct folio *folio) 3083 { 3084 vm_fault_t ret; 3085 unsigned int old_flags = vmf->flags; 3086 3087 vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; 3088 3089 if (vmf->vma->vm_file && 3090 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host)) 3091 return VM_FAULT_SIGBUS; 3092 3093 ret = vmf->vma->vm_ops->page_mkwrite(vmf); 3094 /* Restore original flags so that caller is not surprised */ 3095 vmf->flags = old_flags; 3096 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) 3097 return ret; 3098 if (unlikely(!(ret & VM_FAULT_LOCKED))) { 3099 folio_lock(folio); 3100 if (!folio->mapping) { 3101 folio_unlock(folio); 3102 return 0; /* retry */ 3103 } 3104 ret |= VM_FAULT_LOCKED; 3105 } else 3106 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 3107 return ret; 3108 } 3109 3110 /* 3111 * Handle dirtying of a page in shared file mapping on a write fault. 3112 * 3113 * The function expects the page to be locked and unlocks it. 3114 */ 3115 static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf) 3116 { 3117 struct vm_area_struct *vma = vmf->vma; 3118 struct address_space *mapping; 3119 struct folio *folio = page_folio(vmf->page); 3120 bool dirtied; 3121 bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite; 3122 3123 dirtied = folio_mark_dirty(folio); 3124 VM_BUG_ON_FOLIO(folio_test_anon(folio), folio); 3125 /* 3126 * Take a local copy of the address_space - folio.mapping may be zeroed 3127 * by truncate after folio_unlock(). The address_space itself remains 3128 * pinned by vma->vm_file's reference. We rely on folio_unlock()'s 3129 * release semantics to prevent the compiler from undoing this copying. 3130 */ 3131 mapping = folio_raw_mapping(folio); 3132 folio_unlock(folio); 3133 3134 if (!page_mkwrite) 3135 file_update_time(vma->vm_file); 3136 3137 /* 3138 * Throttle page dirtying rate down to writeback speed. 3139 * 3140 * mapping may be NULL here because some device drivers do not 3141 * set page.mapping but still dirty their pages 3142 * 3143 * Drop the mmap_lock before waiting on IO, if we can. The file 3144 * is pinning the mapping, as per above. 3145 */ 3146 if ((dirtied || page_mkwrite) && mapping) { 3147 struct file *fpin; 3148 3149 fpin = maybe_unlock_mmap_for_io(vmf, NULL); 3150 balance_dirty_pages_ratelimited(mapping); 3151 if (fpin) { 3152 fput(fpin); 3153 return VM_FAULT_COMPLETED; 3154 } 3155 } 3156 3157 return 0; 3158 } 3159 3160 /* 3161 * Handle write page faults for pages that can be reused in the current vma 3162 * 3163 * This can happen either due to the mapping being with the VM_SHARED flag, 3164 * or due to us being the last reference standing to the page. In either 3165 * case, all we need to do here is to mark the page as writable and update 3166 * any related book-keeping. 3167 */ 3168 static inline void wp_page_reuse(struct vm_fault *vmf, struct folio *folio) 3169 __releases(vmf->ptl) 3170 { 3171 struct vm_area_struct *vma = vmf->vma; 3172 pte_t entry; 3173 3174 VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE)); 3175 3176 if (folio) { 3177 VM_BUG_ON(folio_test_anon(folio) && 3178 !PageAnonExclusive(vmf->page)); 3179 /* 3180 * Clear the folio's cpupid information as the existing 3181 * information potentially belongs to a now completely 3182 * unrelated process. 3183 */ 3184 folio_xchg_last_cpupid(folio, (1 << LAST_CPUPID_SHIFT) - 1); 3185 } 3186 3187 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); 3188 entry = pte_mkyoung(vmf->orig_pte); 3189 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 3190 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) 3191 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); 3192 pte_unmap_unlock(vmf->pte, vmf->ptl); 3193 count_vm_event(PGREUSE); 3194 } 3195 3196 /* 3197 * We could add a bitflag somewhere, but for now, we know that all 3198 * vm_ops that have a ->map_pages have been audited and don't need 3199 * the mmap_lock to be held. 3200 */ 3201 static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf) 3202 { 3203 struct vm_area_struct *vma = vmf->vma; 3204 3205 if (vma->vm_ops->map_pages || !(vmf->flags & FAULT_FLAG_VMA_LOCK)) 3206 return 0; 3207 vma_end_read(vma); 3208 return VM_FAULT_RETRY; 3209 } 3210 3211 /** 3212 * vmf_anon_prepare - Prepare to handle an anonymous fault. 3213 * @vmf: The vm_fault descriptor passed from the fault handler. 3214 * 3215 * When preparing to insert an anonymous page into a VMA from a 3216 * fault handler, call this function rather than anon_vma_prepare(). 3217 * If this vma does not already have an associated anon_vma and we are 3218 * only protected by the per-VMA lock, the caller must retry with the 3219 * mmap_lock held. __anon_vma_prepare() will look at adjacent VMAs to 3220 * determine if this VMA can share its anon_vma, and that's not safe to 3221 * do with only the per-VMA lock held for this VMA. 3222 * 3223 * Return: 0 if fault handling can proceed. Any other value should be 3224 * returned to the caller. 3225 */ 3226 vm_fault_t vmf_anon_prepare(struct vm_fault *vmf) 3227 { 3228 struct vm_area_struct *vma = vmf->vma; 3229 vm_fault_t ret = 0; 3230 3231 if (likely(vma->anon_vma)) 3232 return 0; 3233 if (vmf->flags & FAULT_FLAG_VMA_LOCK) { 3234 if (!mmap_read_trylock(vma->vm_mm)) { 3235 vma_end_read(vma); 3236 return VM_FAULT_RETRY; 3237 } 3238 } 3239 if (__anon_vma_prepare(vma)) 3240 ret = VM_FAULT_OOM; 3241 if (vmf->flags & FAULT_FLAG_VMA_LOCK) 3242 mmap_read_unlock(vma->vm_mm); 3243 return ret; 3244 } 3245 3246 /* 3247 * Handle the case of a page which we actually need to copy to a new page, 3248 * either due to COW or unsharing. 3249 * 3250 * Called with mmap_lock locked and the old page referenced, but 3251 * without the ptl held. 3252 * 3253 * High level logic flow: 3254 * 3255 * - Allocate a page, copy the content of the old page to the new one. 3256 * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc. 3257 * - Take the PTL. If the pte changed, bail out and release the allocated page 3258 * - If the pte is still the way we remember it, update the page table and all 3259 * relevant references. This includes dropping the reference the page-table 3260 * held to the old page, as well as updating the rmap. 3261 * - In any case, unlock the PTL and drop the reference we took to the old page. 3262 */ 3263 static vm_fault_t wp_page_copy(struct vm_fault *vmf) 3264 { 3265 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; 3266 struct vm_area_struct *vma = vmf->vma; 3267 struct mm_struct *mm = vma->vm_mm; 3268 struct folio *old_folio = NULL; 3269 struct folio *new_folio = NULL; 3270 pte_t entry; 3271 int page_copied = 0; 3272 struct mmu_notifier_range range; 3273 vm_fault_t ret; 3274 bool pfn_is_zero; 3275 3276 delayacct_wpcopy_start(); 3277 3278 if (vmf->page) 3279 old_folio = page_folio(vmf->page); 3280 ret = vmf_anon_prepare(vmf); 3281 if (unlikely(ret)) 3282 goto out; 3283 3284 pfn_is_zero = is_zero_pfn(pte_pfn(vmf->orig_pte)); 3285 new_folio = folio_prealloc(mm, vma, vmf->address, pfn_is_zero); 3286 if (!new_folio) 3287 goto oom; 3288 3289 if (!pfn_is_zero) { 3290 int err; 3291 3292 err = __wp_page_copy_user(&new_folio->page, vmf->page, vmf); 3293 if (err) { 3294 /* 3295 * COW failed, if the fault was solved by other, 3296 * it's fine. If not, userspace would re-fault on 3297 * the same address and we will handle the fault 3298 * from the second attempt. 3299 * The -EHWPOISON case will not be retried. 3300 */ 3301 folio_put(new_folio); 3302 if (old_folio) 3303 folio_put(old_folio); 3304 3305 delayacct_wpcopy_end(); 3306 return err == -EHWPOISON ? VM_FAULT_HWPOISON : 0; 3307 } 3308 kmsan_copy_page_meta(&new_folio->page, vmf->page); 3309 } 3310 3311 __folio_mark_uptodate(new_folio); 3312 3313 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, 3314 vmf->address & PAGE_MASK, 3315 (vmf->address & PAGE_MASK) + PAGE_SIZE); 3316 mmu_notifier_invalidate_range_start(&range); 3317 3318 /* 3319 * Re-check the pte - we dropped the lock 3320 */ 3321 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); 3322 if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { 3323 if (old_folio) { 3324 if (!folio_test_anon(old_folio)) { 3325 dec_mm_counter(mm, mm_counter_file(old_folio)); 3326 inc_mm_counter(mm, MM_ANONPAGES); 3327 } 3328 } else { 3329 ksm_might_unmap_zero_page(mm, vmf->orig_pte); 3330 inc_mm_counter(mm, MM_ANONPAGES); 3331 } 3332 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); 3333 entry = mk_pte(&new_folio->page, vma->vm_page_prot); 3334 entry = pte_sw_mkyoung(entry); 3335 if (unlikely(unshare)) { 3336 if (pte_soft_dirty(vmf->orig_pte)) 3337 entry = pte_mksoft_dirty(entry); 3338 if (pte_uffd_wp(vmf->orig_pte)) 3339 entry = pte_mkuffd_wp(entry); 3340 } else { 3341 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 3342 } 3343 3344 /* 3345 * Clear the pte entry and flush it first, before updating the 3346 * pte with the new entry, to keep TLBs on different CPUs in 3347 * sync. This code used to set the new PTE then flush TLBs, but 3348 * that left a window where the new PTE could be loaded into 3349 * some TLBs while the old PTE remains in others. 3350 */ 3351 ptep_clear_flush(vma, vmf->address, vmf->pte); 3352 folio_add_new_anon_rmap(new_folio, vma, vmf->address); 3353 folio_add_lru_vma(new_folio, vma); 3354 BUG_ON(unshare && pte_write(entry)); 3355 set_pte_at(mm, vmf->address, vmf->pte, entry); 3356 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); 3357 if (old_folio) { 3358 /* 3359 * Only after switching the pte to the new page may 3360 * we remove the mapcount here. Otherwise another 3361 * process may come and find the rmap count decremented 3362 * before the pte is switched to the new page, and 3363 * "reuse" the old page writing into it while our pte 3364 * here still points into it and can be read by other 3365 * threads. 3366 * 3367 * The critical issue is to order this 3368 * folio_remove_rmap_pte() with the ptp_clear_flush 3369 * above. Those stores are ordered by (if nothing else,) 3370 * the barrier present in the atomic_add_negative 3371 * in folio_remove_rmap_pte(); 3372 * 3373 * Then the TLB flush in ptep_clear_flush ensures that 3374 * no process can access the old page before the 3375 * decremented mapcount is visible. And the old page 3376 * cannot be reused until after the decremented 3377 * mapcount is visible. So transitively, TLBs to 3378 * old page will be flushed before it can be reused. 3379 */ 3380 folio_remove_rmap_pte(old_folio, vmf->page, vma); 3381 } 3382 3383 /* Free the old page.. */ 3384 new_folio = old_folio; 3385 page_copied = 1; 3386 pte_unmap_unlock(vmf->pte, vmf->ptl); 3387 } else if (vmf->pte) { 3388 update_mmu_tlb(vma, vmf->address, vmf->pte); 3389 pte_unmap_unlock(vmf->pte, vmf->ptl); 3390 } 3391 3392 mmu_notifier_invalidate_range_end(&range); 3393 3394 if (new_folio) 3395 folio_put(new_folio); 3396 if (old_folio) { 3397 if (page_copied) 3398 free_swap_cache(old_folio); 3399 folio_put(old_folio); 3400 } 3401 3402 delayacct_wpcopy_end(); 3403 return 0; 3404 oom: 3405 ret = VM_FAULT_OOM; 3406 out: 3407 if (old_folio) 3408 folio_put(old_folio); 3409 3410 delayacct_wpcopy_end(); 3411 return ret; 3412 } 3413 3414 /** 3415 * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE 3416 * writeable once the page is prepared 3417 * 3418 * @vmf: structure describing the fault 3419 * @folio: the folio of vmf->page 3420 * 3421 * This function handles all that is needed to finish a write page fault in a 3422 * shared mapping due to PTE being read-only once the mapped page is prepared. 3423 * It handles locking of PTE and modifying it. 3424 * 3425 * The function expects the page to be locked or other protection against 3426 * concurrent faults / writeback (such as DAX radix tree locks). 3427 * 3428 * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before 3429 * we acquired PTE lock. 3430 */ 3431 static vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf, struct folio *folio) 3432 { 3433 WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); 3434 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, 3435 &vmf->ptl); 3436 if (!vmf->pte) 3437 return VM_FAULT_NOPAGE; 3438 /* 3439 * We might have raced with another page fault while we released the 3440 * pte_offset_map_lock. 3441 */ 3442 if (!pte_same(ptep_get(vmf->pte), vmf->orig_pte)) { 3443 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); 3444 pte_unmap_unlock(vmf->pte, vmf->ptl); 3445 return VM_FAULT_NOPAGE; 3446 } 3447 wp_page_reuse(vmf, folio); 3448 return 0; 3449 } 3450 3451 /* 3452 * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED 3453 * mapping 3454 */ 3455 static vm_fault_t wp_pfn_shared(struct vm_fault *vmf) 3456 { 3457 struct vm_area_struct *vma = vmf->vma; 3458 3459 if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { 3460 vm_fault_t ret; 3461 3462 pte_unmap_unlock(vmf->pte, vmf->ptl); 3463 ret = vmf_can_call_fault(vmf); 3464 if (ret) 3465 return ret; 3466 3467 vmf->flags |= FAULT_FLAG_MKWRITE; 3468 ret = vma->vm_ops->pfn_mkwrite(vmf); 3469 if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)) 3470 return ret; 3471 return finish_mkwrite_fault(vmf, NULL); 3472 } 3473 wp_page_reuse(vmf, NULL); 3474 return 0; 3475 } 3476 3477 static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio) 3478 __releases(vmf->ptl) 3479 { 3480 struct vm_area_struct *vma = vmf->vma; 3481 vm_fault_t ret = 0; 3482 3483 folio_get(folio); 3484 3485 if (vma->vm_ops && vma->vm_ops->page_mkwrite) { 3486 vm_fault_t tmp; 3487 3488 pte_unmap_unlock(vmf->pte, vmf->ptl); 3489 tmp = vmf_can_call_fault(vmf); 3490 if (tmp) { 3491 folio_put(folio); 3492 return tmp; 3493 } 3494 3495 tmp = do_page_mkwrite(vmf, folio); 3496 if (unlikely(!tmp || (tmp & 3497 (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { 3498 folio_put(folio); 3499 return tmp; 3500 } 3501 tmp = finish_mkwrite_fault(vmf, folio); 3502 if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { 3503 folio_unlock(folio); 3504 folio_put(folio); 3505 return tmp; 3506 } 3507 } else { 3508 wp_page_reuse(vmf, folio); 3509 folio_lock(folio); 3510 } 3511 ret |= fault_dirty_shared_page(vmf); 3512 folio_put(folio); 3513 3514 return ret; 3515 } 3516 3517 static bool wp_can_reuse_anon_folio(struct folio *folio, 3518 struct vm_area_struct *vma) 3519 { 3520 /* 3521 * We could currently only reuse a subpage of a large folio if no 3522 * other subpages of the large folios are still mapped. However, 3523 * let's just consistently not reuse subpages even if we could 3524 * reuse in that scenario, and give back a large folio a bit 3525 * sooner. 3526 */ 3527 if (folio_test_large(folio)) 3528 return false; 3529 3530 /* 3531 * We have to verify under folio lock: these early checks are 3532 * just an optimization to avoid locking the folio and freeing 3533 * the swapcache if there is little hope that we can reuse. 3534 * 3535 * KSM doesn't necessarily raise the folio refcount. 3536 */ 3537 if (folio_test_ksm(folio) || folio_ref_count(folio) > 3) 3538 return false; 3539 if (!folio_test_lru(folio)) 3540 /* 3541 * We cannot easily detect+handle references from 3542 * remote LRU caches or references to LRU folios. 3543 */ 3544 lru_add_drain(); 3545 if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio)) 3546 return false; 3547 if (!folio_trylock(folio)) 3548 return false; 3549 if (folio_test_swapcache(folio)) 3550 folio_free_swap(folio); 3551 if (folio_test_ksm(folio) || folio_ref_count(folio) != 1) { 3552 folio_unlock(folio); 3553 return false; 3554 } 3555 /* 3556 * Ok, we've got the only folio reference from our mapping 3557 * and the folio is locked, it's dark out, and we're wearing 3558 * sunglasses. Hit it. 3559 */ 3560 folio_move_anon_rmap(folio, vma); 3561 folio_unlock(folio); 3562 return true; 3563 } 3564 3565 /* 3566 * This routine handles present pages, when 3567 * * users try to write to a shared page (FAULT_FLAG_WRITE) 3568 * * GUP wants to take a R/O pin on a possibly shared anonymous page 3569 * (FAULT_FLAG_UNSHARE) 3570 * 3571 * It is done by copying the page to a new address and decrementing the 3572 * shared-page counter for the old page. 3573 * 3574 * Note that this routine assumes that the protection checks have been 3575 * done by the caller (the low-level page fault routine in most cases). 3576 * Thus, with FAULT_FLAG_WRITE, we can safely just mark it writable once we've 3577 * done any necessary COW. 3578 * 3579 * In case of FAULT_FLAG_WRITE, we also mark the page dirty at this point even 3580 * though the page will change only once the write actually happens. This 3581 * avoids a few races, and potentially makes it more efficient. 3582 * 3583 * We enter with non-exclusive mmap_lock (to exclude vma changes, 3584 * but allow concurrent faults), with pte both mapped and locked. 3585 * We return with mmap_lock still held, but pte unmapped and unlocked. 3586 */ 3587 static vm_fault_t do_wp_page(struct vm_fault *vmf) 3588 __releases(vmf->ptl) 3589 { 3590 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; 3591 struct vm_area_struct *vma = vmf->vma; 3592 struct folio *folio = NULL; 3593 pte_t pte; 3594 3595 if (likely(!unshare)) { 3596 if (userfaultfd_pte_wp(vma, ptep_get(vmf->pte))) { 3597 if (!userfaultfd_wp_async(vma)) { 3598 pte_unmap_unlock(vmf->pte, vmf->ptl); 3599 return handle_userfault(vmf, VM_UFFD_WP); 3600 } 3601 3602 /* 3603 * Nothing needed (cache flush, TLB invalidations, 3604 * etc.) because we're only removing the uffd-wp bit, 3605 * which is completely invisible to the user. 3606 */ 3607 pte = pte_clear_uffd_wp(ptep_get(vmf->pte)); 3608 3609 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); 3610 /* 3611 * Update this to be prepared for following up CoW 3612 * handling 3613 */ 3614 vmf->orig_pte = pte; 3615 } 3616 3617 /* 3618 * Userfaultfd write-protect can defer flushes. Ensure the TLB 3619 * is flushed in this case before copying. 3620 */ 3621 if (unlikely(userfaultfd_wp(vmf->vma) && 3622 mm_tlb_flush_pending(vmf->vma->vm_mm))) 3623 flush_tlb_page(vmf->vma, vmf->address); 3624 } 3625 3626 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); 3627 3628 if (vmf->page) 3629 folio = page_folio(vmf->page); 3630 3631 /* 3632 * Shared mapping: we are guaranteed to have VM_WRITE and 3633 * FAULT_FLAG_WRITE set at this point. 3634 */ 3635 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { 3636 /* 3637 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a 3638 * VM_PFNMAP VMA. 3639 * 3640 * We should not cow pages in a shared writeable mapping. 3641 * Just mark the pages writable and/or call ops->pfn_mkwrite. 3642 */ 3643 if (!vmf->page) 3644 return wp_pfn_shared(vmf); 3645 return wp_page_shared(vmf, folio); 3646 } 3647 3648 /* 3649 * Private mapping: create an exclusive anonymous page copy if reuse 3650 * is impossible. We might miss VM_WRITE for FOLL_FORCE handling. 3651 * 3652 * If we encounter a page that is marked exclusive, we must reuse 3653 * the page without further checks. 3654 */ 3655 if (folio && folio_test_anon(folio) && 3656 (PageAnonExclusive(vmf->page) || wp_can_reuse_anon_folio(folio, vma))) { 3657 if (!PageAnonExclusive(vmf->page)) 3658 SetPageAnonExclusive(vmf->page); 3659 if (unlikely(unshare)) { 3660 pte_unmap_unlock(vmf->pte, vmf->ptl); 3661 return 0; 3662 } 3663 wp_page_reuse(vmf, folio); 3664 return 0; 3665 } 3666 /* 3667 * Ok, we need to copy. Oh, well.. 3668 */ 3669 if (folio) 3670 folio_get(folio); 3671 3672 pte_unmap_unlock(vmf->pte, vmf->ptl); 3673 #ifdef CONFIG_KSM 3674 if (folio && folio_test_ksm(folio)) 3675 count_vm_event(COW_KSM); 3676 #endif 3677 return wp_page_copy(vmf); 3678 } 3679 3680 static void unmap_mapping_range_vma(struct vm_area_struct *vma, 3681 unsigned long start_addr, unsigned long end_addr, 3682 struct zap_details *details) 3683 { 3684 zap_page_range_single(vma, start_addr, end_addr - start_addr, details); 3685 } 3686 3687 static inline void unmap_mapping_range_tree(struct rb_root_cached *root, 3688 pgoff_t first_index, 3689 pgoff_t last_index, 3690 struct zap_details *details) 3691 { 3692 struct vm_area_struct *vma; 3693 pgoff_t vba, vea, zba, zea; 3694 3695 vma_interval_tree_foreach(vma, root, first_index, last_index) { 3696 vba = vma->vm_pgoff; 3697 vea = vba + vma_pages(vma) - 1; 3698 zba = max(first_index, vba); 3699 zea = min(last_index, vea); 3700 3701 unmap_mapping_range_vma(vma, 3702 ((zba - vba) << PAGE_SHIFT) + vma->vm_start, 3703 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, 3704 details); 3705 } 3706 } 3707 3708 /** 3709 * unmap_mapping_folio() - Unmap single folio from processes. 3710 * @folio: The locked folio to be unmapped. 3711 * 3712 * Unmap this folio from any userspace process which still has it mmaped. 3713 * Typically, for efficiency, the range of nearby pages has already been 3714 * unmapped by unmap_mapping_pages() or unmap_mapping_range(). But once 3715 * truncation or invalidation holds the lock on a folio, it may find that 3716 * the page has been remapped again: and then uses unmap_mapping_folio() 3717 * to unmap it finally. 3718 */ 3719 void unmap_mapping_folio(struct folio *folio) 3720 { 3721 struct address_space *mapping = folio->mapping; 3722 struct zap_details details = { }; 3723 pgoff_t first_index; 3724 pgoff_t last_index; 3725 3726 VM_BUG_ON(!folio_test_locked(folio)); 3727 3728 first_index = folio->index; 3729 last_index = folio_next_index(folio) - 1; 3730 3731 details.even_cows = false; 3732 details.single_folio = folio; 3733 details.zap_flags = ZAP_FLAG_DROP_MARKER; 3734 3735 i_mmap_lock_read(mapping); 3736 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))) 3737 unmap_mapping_range_tree(&mapping->i_mmap, first_index, 3738 last_index, &details); 3739 i_mmap_unlock_read(mapping); 3740 } 3741 3742 /** 3743 * unmap_mapping_pages() - Unmap pages from processes. 3744 * @mapping: The address space containing pages to be unmapped. 3745 * @start: Index of first page to be unmapped. 3746 * @nr: Number of pages to be unmapped. 0 to unmap to end of file. 3747 * @even_cows: Whether to unmap even private COWed pages. 3748 * 3749 * Unmap the pages in this address space from any userspace process which 3750 * has them mmaped. Generally, you want to remove COWed pages as well when 3751 * a file is being truncated, but not when invalidating pages from the page 3752 * cache. 3753 */ 3754 void unmap_mapping_pages(struct address_space *mapping, pgoff_t start, 3755 pgoff_t nr, bool even_cows) 3756 { 3757 struct zap_details details = { }; 3758 pgoff_t first_index = start; 3759 pgoff_t last_index = start + nr - 1; 3760 3761 details.even_cows = even_cows; 3762 if (last_index < first_index) 3763 last_index = ULONG_MAX; 3764 3765 i_mmap_lock_read(mapping); 3766 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))) 3767 unmap_mapping_range_tree(&mapping->i_mmap, first_index, 3768 last_index, &details); 3769 i_mmap_unlock_read(mapping); 3770 } 3771 EXPORT_SYMBOL_GPL(unmap_mapping_pages); 3772 3773 /** 3774 * unmap_mapping_range - unmap the portion of all mmaps in the specified 3775 * address_space corresponding to the specified byte range in the underlying 3776 * file. 3777 * 3778 * @mapping: the address space containing mmaps to be unmapped. 3779 * @holebegin: byte in first page to unmap, relative to the start of 3780 * the underlying file. This will be rounded down to a PAGE_SIZE 3781 * boundary. Note that this is different from truncate_pagecache(), which 3782 * must keep the partial page. In contrast, we must get rid of 3783 * partial pages. 3784 * @holelen: size of prospective hole in bytes. This will be rounded 3785 * up to a PAGE_SIZE boundary. A holelen of zero truncates to the 3786 * end of the file. 3787 * @even_cows: 1 when truncating a file, unmap even private COWed pages; 3788 * but 0 when invalidating pagecache, don't throw away private data. 3789 */ 3790 void unmap_mapping_range(struct address_space *mapping, 3791 loff_t const holebegin, loff_t const holelen, int even_cows) 3792 { 3793 pgoff_t hba = (pgoff_t)(holebegin) >> PAGE_SHIFT; 3794 pgoff_t hlen = ((pgoff_t)(holelen) + PAGE_SIZE - 1) >> PAGE_SHIFT; 3795 3796 /* Check for overflow. */ 3797 if (sizeof(holelen) > sizeof(hlen)) { 3798 long long holeend = 3799 (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; 3800 if (holeend & ~(long long)ULONG_MAX) 3801 hlen = ULONG_MAX - hba + 1; 3802 } 3803 3804 unmap_mapping_pages(mapping, hba, hlen, even_cows); 3805 } 3806 EXPORT_SYMBOL(unmap_mapping_range); 3807 3808 /* 3809 * Restore a potential device exclusive pte to a working pte entry 3810 */ 3811 static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf) 3812 { 3813 struct folio *folio = page_folio(vmf->page); 3814 struct vm_area_struct *vma = vmf->vma; 3815 struct mmu_notifier_range range; 3816 vm_fault_t ret; 3817 3818 /* 3819 * We need a reference to lock the folio because we don't hold 3820 * the PTL so a racing thread can remove the device-exclusive 3821 * entry and unmap it. If the folio is free the entry must 3822 * have been removed already. If it happens to have already 3823 * been re-allocated after being freed all we do is lock and 3824 * unlock it. 3825 */ 3826 if (!folio_try_get(folio)) 3827 return 0; 3828 3829 ret = folio_lock_or_retry(folio, vmf); 3830 if (ret) { 3831 folio_put(folio); 3832 return ret; 3833 } 3834 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, 3835 vma->vm_mm, vmf->address & PAGE_MASK, 3836 (vmf->address & PAGE_MASK) + PAGE_SIZE, NULL); 3837 mmu_notifier_invalidate_range_start(&range); 3838 3839 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, 3840 &vmf->ptl); 3841 if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) 3842 restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte); 3843 3844 if (vmf->pte) 3845 pte_unmap_unlock(vmf->pte, vmf->ptl); 3846 folio_unlock(folio); 3847 folio_put(folio); 3848 3849 mmu_notifier_invalidate_range_end(&range); 3850 return 0; 3851 } 3852 3853 static inline bool should_try_to_free_swap(struct folio *folio, 3854 struct vm_area_struct *vma, 3855 unsigned int fault_flags) 3856 { 3857 if (!folio_test_swapcache(folio)) 3858 return false; 3859 if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) || 3860 folio_test_mlocked(folio)) 3861 return true; 3862 /* 3863 * If we want to map a page that's in the swapcache writable, we 3864 * have to detect via the refcount if we're really the exclusive 3865 * user. Try freeing the swapcache to get rid of the swapcache 3866 * reference only in case it's likely that we'll be the exlusive user. 3867 */ 3868 return (fault_flags & FAULT_FLAG_WRITE) && !folio_test_ksm(folio) && 3869 folio_ref_count(folio) == 2; 3870 } 3871 3872 static vm_fault_t pte_marker_clear(struct vm_fault *vmf) 3873 { 3874 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, 3875 vmf->address, &vmf->ptl); 3876 if (!vmf->pte) 3877 return 0; 3878 /* 3879 * Be careful so that we will only recover a special uffd-wp pte into a 3880 * none pte. Otherwise it means the pte could have changed, so retry. 3881 * 3882 * This should also cover the case where e.g. the pte changed 3883 * quickly from a PTE_MARKER_UFFD_WP into PTE_MARKER_POISONED. 3884 * So is_pte_marker() check is not enough to safely drop the pte. 3885 */ 3886 if (pte_same(vmf->orig_pte, ptep_get(vmf->pte))) 3887 pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte); 3888 pte_unmap_unlock(vmf->pte, vmf->ptl); 3889 return 0; 3890 } 3891 3892 static vm_fault_t do_pte_missing(struct vm_fault *vmf) 3893 { 3894 if (vma_is_anonymous(vmf->vma)) 3895 return do_anonymous_page(vmf); 3896 else 3897 return do_fault(vmf); 3898 } 3899 3900 /* 3901 * This is actually a page-missing access, but with uffd-wp special pte 3902 * installed. It means this pte was wr-protected before being unmapped. 3903 */ 3904 static vm_fault_t pte_marker_handle_uffd_wp(struct vm_fault *vmf) 3905 { 3906 /* 3907 * Just in case there're leftover special ptes even after the region 3908 * got unregistered - we can simply clear them. 3909 */ 3910 if (unlikely(!userfaultfd_wp(vmf->vma))) 3911 return pte_marker_clear(vmf); 3912 3913 return do_pte_missing(vmf); 3914 } 3915 3916 static vm_fault_t handle_pte_marker(struct vm_fault *vmf) 3917 { 3918 swp_entry_t entry = pte_to_swp_entry(vmf->orig_pte); 3919 unsigned long marker = pte_marker_get(entry); 3920 3921 /* 3922 * PTE markers should never be empty. If anything weird happened, 3923 * the best thing to do is to kill the process along with its mm. 3924 */ 3925 if (WARN_ON_ONCE(!marker)) 3926 return VM_FAULT_SIGBUS; 3927 3928 /* Higher priority than uffd-wp when data corrupted */ 3929 if (marker & PTE_MARKER_POISONED) 3930 return VM_FAULT_HWPOISON; 3931 3932 if (pte_marker_entry_uffd_wp(entry)) 3933 return pte_marker_handle_uffd_wp(vmf); 3934 3935 /* This is an unknown pte marker */ 3936 return VM_FAULT_SIGBUS; 3937 } 3938 3939 /* 3940 * We enter with non-exclusive mmap_lock (to exclude vma changes, 3941 * but allow concurrent faults), and pte mapped but not yet locked. 3942 * We return with pte unmapped and unlocked. 3943 * 3944 * We return with the mmap_lock locked or unlocked in the same cases 3945 * as does filemap_fault(). 3946 */ 3947 vm_fault_t do_swap_page(struct vm_fault *vmf) 3948 { 3949 struct vm_area_struct *vma = vmf->vma; 3950 struct folio *swapcache, *folio = NULL; 3951 struct page *page; 3952 struct swap_info_struct *si = NULL; 3953 rmap_t rmap_flags = RMAP_NONE; 3954 bool need_clear_cache = false; 3955 bool exclusive = false; 3956 swp_entry_t entry; 3957 pte_t pte; 3958 vm_fault_t ret = 0; 3959 void *shadow = NULL; 3960 3961 if (!pte_unmap_same(vmf)) 3962 goto out; 3963 3964 entry = pte_to_swp_entry(vmf->orig_pte); 3965 if (unlikely(non_swap_entry(entry))) { 3966 if (is_migration_entry(entry)) { 3967 migration_entry_wait(vma->vm_mm, vmf->pmd, 3968 vmf->address); 3969 } else if (is_device_exclusive_entry(entry)) { 3970 vmf->page = pfn_swap_entry_to_page(entry); 3971 ret = remove_device_exclusive_entry(vmf); 3972 } else if (is_device_private_entry(entry)) { 3973 if (vmf->flags & FAULT_FLAG_VMA_LOCK) { 3974 /* 3975 * migrate_to_ram is not yet ready to operate 3976 * under VMA lock. 3977 */ 3978 vma_end_read(vma); 3979 ret = VM_FAULT_RETRY; 3980 goto out; 3981 } 3982 3983 vmf->page = pfn_swap_entry_to_page(entry); 3984 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 3985 vmf->address, &vmf->ptl); 3986 if (unlikely(!vmf->pte || 3987 !pte_same(ptep_get(vmf->pte), 3988 vmf->orig_pte))) 3989 goto unlock; 3990 3991 /* 3992 * Get a page reference while we know the page can't be 3993 * freed. 3994 */ 3995 get_page(vmf->page); 3996 pte_unmap_unlock(vmf->pte, vmf->ptl); 3997 ret = vmf->page->pgmap->ops->migrate_to_ram(vmf); 3998 put_page(vmf->page); 3999 } else if (is_hwpoison_entry(entry)) { 4000 ret = VM_FAULT_HWPOISON; 4001 } else if (is_pte_marker_entry(entry)) { 4002 ret = handle_pte_marker(vmf); 4003 } else { 4004 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL); 4005 ret = VM_FAULT_SIGBUS; 4006 } 4007 goto out; 4008 } 4009 4010 /* Prevent swapoff from happening to us. */ 4011 si = get_swap_device(entry); 4012 if (unlikely(!si)) 4013 goto out; 4014 4015 folio = swap_cache_get_folio(entry, vma, vmf->address); 4016 if (folio) 4017 page = folio_file_page(folio, swp_offset(entry)); 4018 swapcache = folio; 4019 4020 if (!folio) { 4021 if (data_race(si->flags & SWP_SYNCHRONOUS_IO) && 4022 __swap_count(entry) == 1) { 4023 /* 4024 * Prevent parallel swapin from proceeding with 4025 * the cache flag. Otherwise, another thread may 4026 * finish swapin first, free the entry, and swapout 4027 * reusing the same entry. It's undetectable as 4028 * pte_same() returns true due to entry reuse. 4029 */ 4030 if (swapcache_prepare(entry)) { 4031 /* Relax a bit to prevent rapid repeated page faults */ 4032 schedule_timeout_uninterruptible(1); 4033 goto out; 4034 } 4035 need_clear_cache = true; 4036 4037 /* skip swapcache */ 4038 folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, 4039 vma, vmf->address, false); 4040 page = &folio->page; 4041 if (folio) { 4042 __folio_set_locked(folio); 4043 __folio_set_swapbacked(folio); 4044 4045 if (mem_cgroup_swapin_charge_folio(folio, 4046 vma->vm_mm, GFP_KERNEL, 4047 entry)) { 4048 ret = VM_FAULT_OOM; 4049 goto out_page; 4050 } 4051 mem_cgroup_swapin_uncharge_swap(entry); 4052 4053 shadow = get_shadow_from_swap_cache(entry); 4054 if (shadow) 4055 workingset_refault(folio, shadow); 4056 4057 folio_add_lru(folio); 4058 4059 /* To provide entry to swap_read_folio() */ 4060 folio->swap = entry; 4061 swap_read_folio(folio, true, NULL); 4062 folio->private = NULL; 4063 } 4064 } else { 4065 page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, 4066 vmf); 4067 if (page) 4068 folio = page_folio(page); 4069 swapcache = folio; 4070 } 4071 4072 if (!folio) { 4073 /* 4074 * Back out if somebody else faulted in this pte 4075 * while we released the pte lock. 4076 */ 4077 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 4078 vmf->address, &vmf->ptl); 4079 if (likely(vmf->pte && 4080 pte_same(ptep_get(vmf->pte), vmf->orig_pte))) 4081 ret = VM_FAULT_OOM; 4082 goto unlock; 4083 } 4084 4085 /* Had to read the page from swap area: Major fault */ 4086 ret = VM_FAULT_MAJOR; 4087 count_vm_event(PGMAJFAULT); 4088 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); 4089 } else if (PageHWPoison(page)) { 4090 /* 4091 * hwpoisoned dirty swapcache pages are kept for killing 4092 * owner processes (which may be unknown at hwpoison time) 4093 */ 4094 ret = VM_FAULT_HWPOISON; 4095 goto out_release; 4096 } 4097 4098 ret |= folio_lock_or_retry(folio, vmf); 4099 if (ret & VM_FAULT_RETRY) 4100 goto out_release; 4101 4102 if (swapcache) { 4103 /* 4104 * Make sure folio_free_swap() or swapoff did not release the 4105 * swapcache from under us. The page pin, and pte_same test 4106 * below, are not enough to exclude that. Even if it is still 4107 * swapcache, we need to check that the page's swap has not 4108 * changed. 4109 */ 4110 if (unlikely(!folio_test_swapcache(folio) || 4111 page_swap_entry(page).val != entry.val)) 4112 goto out_page; 4113 4114 /* 4115 * KSM sometimes has to copy on read faults, for example, if 4116 * page->index of !PageKSM() pages would be nonlinear inside the 4117 * anon VMA -- PageKSM() is lost on actual swapout. 4118 */ 4119 folio = ksm_might_need_to_copy(folio, vma, vmf->address); 4120 if (unlikely(!folio)) { 4121 ret = VM_FAULT_OOM; 4122 folio = swapcache; 4123 goto out_page; 4124 } else if (unlikely(folio == ERR_PTR(-EHWPOISON))) { 4125 ret = VM_FAULT_HWPOISON; 4126 folio = swapcache; 4127 goto out_page; 4128 } 4129 if (folio != swapcache) 4130 page = folio_page(folio, 0); 4131 4132 /* 4133 * If we want to map a page that's in the swapcache writable, we 4134 * have to detect via the refcount if we're really the exclusive 4135 * owner. Try removing the extra reference from the local LRU 4136 * caches if required. 4137 */ 4138 if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache && 4139 !folio_test_ksm(folio) && !folio_test_lru(folio)) 4140 lru_add_drain(); 4141 } 4142 4143 folio_throttle_swaprate(folio, GFP_KERNEL); 4144 4145 /* 4146 * Back out if somebody else already faulted in this pte. 4147 */ 4148 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, 4149 &vmf->ptl); 4150 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) 4151 goto out_nomap; 4152 4153 if (unlikely(!folio_test_uptodate(folio))) { 4154 ret = VM_FAULT_SIGBUS; 4155 goto out_nomap; 4156 } 4157 4158 /* 4159 * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte 4160 * must never point at an anonymous page in the swapcache that is 4161 * PG_anon_exclusive. Sanity check that this holds and especially, that 4162 * no filesystem set PG_mappedtodisk on a page in the swapcache. Sanity 4163 * check after taking the PT lock and making sure that nobody 4164 * concurrently faulted in this page and set PG_anon_exclusive. 4165 */ 4166 BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio)); 4167 BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page)); 4168 4169 /* 4170 * Check under PT lock (to protect against concurrent fork() sharing 4171 * the swap entry concurrently) for certainly exclusive pages. 4172 */ 4173 if (!folio_test_ksm(folio)) { 4174 exclusive = pte_swp_exclusive(vmf->orig_pte); 4175 if (folio != swapcache) { 4176 /* 4177 * We have a fresh page that is not exposed to the 4178 * swapcache -> certainly exclusive. 4179 */ 4180 exclusive = true; 4181 } else if (exclusive && folio_test_writeback(folio) && 4182 data_race(si->flags & SWP_STABLE_WRITES)) { 4183 /* 4184 * This is tricky: not all swap backends support 4185 * concurrent page modifications while under writeback. 4186 * 4187 * So if we stumble over such a page in the swapcache 4188 * we must not set the page exclusive, otherwise we can 4189 * map it writable without further checks and modify it 4190 * while still under writeback. 4191 * 4192 * For these problematic swap backends, simply drop the 4193 * exclusive marker: this is perfectly fine as we start 4194 * writeback only if we fully unmapped the page and 4195 * there are no unexpected references on the page after 4196 * unmapping succeeded. After fully unmapped, no 4197 * further GUP references (FOLL_GET and FOLL_PIN) can 4198 * appear, so dropping the exclusive marker and mapping 4199 * it only R/O is fine. 4200 */ 4201 exclusive = false; 4202 } 4203 } 4204 4205 /* 4206 * Some architectures may have to restore extra metadata to the page 4207 * when reading from swap. This metadata may be indexed by swap entry 4208 * so this must be called before swap_free(). 4209 */ 4210 arch_swap_restore(folio_swap(entry, folio), folio); 4211 4212 /* 4213 * Remove the swap entry and conditionally try to free up the swapcache. 4214 * We're already holding a reference on the page but haven't mapped it 4215 * yet. 4216 */ 4217 swap_free(entry); 4218 if (should_try_to_free_swap(folio, vma, vmf->flags)) 4219 folio_free_swap(folio); 4220 4221 inc_mm_counter(vma->vm_mm, MM_ANONPAGES); 4222 dec_mm_counter(vma->vm_mm, MM_SWAPENTS); 4223 pte = mk_pte(page, vma->vm_page_prot); 4224 4225 /* 4226 * Same logic as in do_wp_page(); however, optimize for pages that are 4227 * certainly not shared either because we just allocated them without 4228 * exposing them to the swapcache or because the swap entry indicates 4229 * exclusivity. 4230 */ 4231 if (!folio_test_ksm(folio) && 4232 (exclusive || folio_ref_count(folio) == 1)) { 4233 if (vmf->flags & FAULT_FLAG_WRITE) { 4234 pte = maybe_mkwrite(pte_mkdirty(pte), vma); 4235 vmf->flags &= ~FAULT_FLAG_WRITE; 4236 } 4237 rmap_flags |= RMAP_EXCLUSIVE; 4238 } 4239 flush_icache_page(vma, page); 4240 if (pte_swp_soft_dirty(vmf->orig_pte)) 4241 pte = pte_mksoft_dirty(pte); 4242 if (pte_swp_uffd_wp(vmf->orig_pte)) 4243 pte = pte_mkuffd_wp(pte); 4244 vmf->orig_pte = pte; 4245 4246 /* ksm created a completely new copy */ 4247 if (unlikely(folio != swapcache && swapcache)) { 4248 folio_add_new_anon_rmap(folio, vma, vmf->address); 4249 folio_add_lru_vma(folio, vma); 4250 } else { 4251 folio_add_anon_rmap_pte(folio, page, vma, vmf->address, 4252 rmap_flags); 4253 } 4254 4255 VM_BUG_ON(!folio_test_anon(folio) || 4256 (pte_write(pte) && !PageAnonExclusive(page))); 4257 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); 4258 arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte); 4259 4260 folio_unlock(folio); 4261 if (folio != swapcache && swapcache) { 4262 /* 4263 * Hold the lock to avoid the swap entry to be reused 4264 * until we take the PT lock for the pte_same() check 4265 * (to avoid false positives from pte_same). For 4266 * further safety release the lock after the swap_free 4267 * so that the swap count won't change under a 4268 * parallel locked swapcache. 4269 */ 4270 folio_unlock(swapcache); 4271 folio_put(swapcache); 4272 } 4273 4274 if (vmf->flags & FAULT_FLAG_WRITE) { 4275 ret |= do_wp_page(vmf); 4276 if (ret & VM_FAULT_ERROR) 4277 ret &= VM_FAULT_ERROR; 4278 goto out; 4279 } 4280 4281 /* No need to invalidate - it was non-present before */ 4282 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); 4283 unlock: 4284 if (vmf->pte) 4285 pte_unmap_unlock(vmf->pte, vmf->ptl); 4286 out: 4287 /* Clear the swap cache pin for direct swapin after PTL unlock */ 4288 if (need_clear_cache) 4289 swapcache_clear(si, entry); 4290 if (si) 4291 put_swap_device(si); 4292 return ret; 4293 out_nomap: 4294 if (vmf->pte) 4295 pte_unmap_unlock(vmf->pte, vmf->ptl); 4296 out_page: 4297 folio_unlock(folio); 4298 out_release: 4299 folio_put(folio); 4300 if (folio != swapcache && swapcache) { 4301 folio_unlock(swapcache); 4302 folio_put(swapcache); 4303 } 4304 if (need_clear_cache) 4305 swapcache_clear(si, entry); 4306 if (si) 4307 put_swap_device(si); 4308 return ret; 4309 } 4310 4311 static bool pte_range_none(pte_t *pte, int nr_pages) 4312 { 4313 int i; 4314 4315 for (i = 0; i < nr_pages; i++) { 4316 if (!pte_none(ptep_get_lockless(pte + i))) 4317 return false; 4318 } 4319 4320 return true; 4321 } 4322 4323 static struct folio *alloc_anon_folio(struct vm_fault *vmf) 4324 { 4325 struct vm_area_struct *vma = vmf->vma; 4326 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4327 unsigned long orders; 4328 struct folio *folio; 4329 unsigned long addr; 4330 pte_t *pte; 4331 gfp_t gfp; 4332 int order; 4333 4334 /* 4335 * If uffd is active for the vma we need per-page fault fidelity to 4336 * maintain the uffd semantics. 4337 */ 4338 if (unlikely(userfaultfd_armed(vma))) 4339 goto fallback; 4340 4341 /* 4342 * Get a list of all the (large) orders below PMD_ORDER that are enabled 4343 * for this vma. Then filter out the orders that can't be allocated over 4344 * the faulting address and still be fully contained in the vma. 4345 */ 4346 orders = thp_vma_allowable_orders(vma, vma->vm_flags, 4347 TVA_IN_PF | TVA_ENFORCE_SYSFS, BIT(PMD_ORDER) - 1); 4348 orders = thp_vma_suitable_orders(vma, vmf->address, orders); 4349 4350 if (!orders) 4351 goto fallback; 4352 4353 pte = pte_offset_map(vmf->pmd, vmf->address & PMD_MASK); 4354 if (!pte) 4355 return ERR_PTR(-EAGAIN); 4356 4357 /* 4358 * Find the highest order where the aligned range is completely 4359 * pte_none(). Note that all remaining orders will be completely 4360 * pte_none(). 4361 */ 4362 order = highest_order(orders); 4363 while (orders) { 4364 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); 4365 if (pte_range_none(pte + pte_index(addr), 1 << order)) 4366 break; 4367 order = next_order(&orders, order); 4368 } 4369 4370 pte_unmap(pte); 4371 4372 if (!orders) 4373 goto fallback; 4374 4375 /* Try allocating the highest of the remaining orders. */ 4376 gfp = vma_thp_gfp_mask(vma); 4377 while (orders) { 4378 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); 4379 folio = vma_alloc_folio(gfp, order, vma, addr, true); 4380 if (folio) { 4381 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) { 4382 count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); 4383 folio_put(folio); 4384 goto next; 4385 } 4386 folio_throttle_swaprate(folio, gfp); 4387 clear_huge_page(&folio->page, vmf->address, 1 << order); 4388 return folio; 4389 } 4390 next: 4391 count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK); 4392 order = next_order(&orders, order); 4393 } 4394 4395 fallback: 4396 #endif 4397 return folio_prealloc(vma->vm_mm, vma, vmf->address, true); 4398 } 4399 4400 /* 4401 * We enter with non-exclusive mmap_lock (to exclude vma changes, 4402 * but allow concurrent faults), and pte mapped but not yet locked. 4403 * We return with mmap_lock still held, but pte unmapped and unlocked. 4404 */ 4405 static vm_fault_t do_anonymous_page(struct vm_fault *vmf) 4406 { 4407 struct vm_area_struct *vma = vmf->vma; 4408 unsigned long addr = vmf->address; 4409 struct folio *folio; 4410 vm_fault_t ret = 0; 4411 int nr_pages = 1; 4412 pte_t entry; 4413 int i; 4414 4415 /* File mapping without ->vm_ops ? */ 4416 if (vma->vm_flags & VM_SHARED) 4417 return VM_FAULT_SIGBUS; 4418 4419 /* 4420 * Use pte_alloc() instead of pte_alloc_map(), so that OOM can 4421 * be distinguished from a transient failure of pte_offset_map(). 4422 */ 4423 if (pte_alloc(vma->vm_mm, vmf->pmd)) 4424 return VM_FAULT_OOM; 4425 4426 /* Use the zero-page for reads */ 4427 if (!(vmf->flags & FAULT_FLAG_WRITE) && 4428 !mm_forbids_zeropage(vma->vm_mm)) { 4429 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address), 4430 vma->vm_page_prot)); 4431 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 4432 vmf->address, &vmf->ptl); 4433 if (!vmf->pte) 4434 goto unlock; 4435 if (vmf_pte_changed(vmf)) { 4436 update_mmu_tlb(vma, vmf->address, vmf->pte); 4437 goto unlock; 4438 } 4439 ret = check_stable_address_space(vma->vm_mm); 4440 if (ret) 4441 goto unlock; 4442 /* Deliver the page fault to userland, check inside PT lock */ 4443 if (userfaultfd_missing(vma)) { 4444 pte_unmap_unlock(vmf->pte, vmf->ptl); 4445 return handle_userfault(vmf, VM_UFFD_MISSING); 4446 } 4447 goto setpte; 4448 } 4449 4450 /* Allocate our own private page. */ 4451 ret = vmf_anon_prepare(vmf); 4452 if (ret) 4453 return ret; 4454 /* Returns NULL on OOM or ERR_PTR(-EAGAIN) if we must retry the fault */ 4455 folio = alloc_anon_folio(vmf); 4456 if (IS_ERR(folio)) 4457 return 0; 4458 if (!folio) 4459 goto oom; 4460 4461 nr_pages = folio_nr_pages(folio); 4462 addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE); 4463 4464 /* 4465 * The memory barrier inside __folio_mark_uptodate makes sure that 4466 * preceding stores to the page contents become visible before 4467 * the set_pte_at() write. 4468 */ 4469 __folio_mark_uptodate(folio); 4470 4471 entry = mk_pte(&folio->page, vma->vm_page_prot); 4472 entry = pte_sw_mkyoung(entry); 4473 if (vma->vm_flags & VM_WRITE) 4474 entry = pte_mkwrite(pte_mkdirty(entry), vma); 4475 4476 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); 4477 if (!vmf->pte) 4478 goto release; 4479 if (nr_pages == 1 && vmf_pte_changed(vmf)) { 4480 update_mmu_tlb(vma, addr, vmf->pte); 4481 goto release; 4482 } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) { 4483 for (i = 0; i < nr_pages; i++) 4484 update_mmu_tlb(vma, addr + PAGE_SIZE * i, vmf->pte + i); 4485 goto release; 4486 } 4487 4488 ret = check_stable_address_space(vma->vm_mm); 4489 if (ret) 4490 goto release; 4491 4492 /* Deliver the page fault to userland, check inside PT lock */ 4493 if (userfaultfd_missing(vma)) { 4494 pte_unmap_unlock(vmf->pte, vmf->ptl); 4495 folio_put(folio); 4496 return handle_userfault(vmf, VM_UFFD_MISSING); 4497 } 4498 4499 folio_ref_add(folio, nr_pages - 1); 4500 add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages); 4501 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4502 count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_FAULT_ALLOC); 4503 #endif 4504 folio_add_new_anon_rmap(folio, vma, addr); 4505 folio_add_lru_vma(folio, vma); 4506 setpte: 4507 if (vmf_orig_pte_uffd_wp(vmf)) 4508 entry = pte_mkuffd_wp(entry); 4509 set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr_pages); 4510 4511 /* No need to invalidate - it was non-present before */ 4512 update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr_pages); 4513 unlock: 4514 if (vmf->pte) 4515 pte_unmap_unlock(vmf->pte, vmf->ptl); 4516 return ret; 4517 release: 4518 folio_put(folio); 4519 goto unlock; 4520 oom: 4521 return VM_FAULT_OOM; 4522 } 4523 4524 /* 4525 * The mmap_lock must have been held on entry, and may have been 4526 * released depending on flags and vma->vm_ops->fault() return value. 4527 * See filemap_fault() and __lock_page_retry(). 4528 */ 4529 static vm_fault_t __do_fault(struct vm_fault *vmf) 4530 { 4531 struct vm_area_struct *vma = vmf->vma; 4532 struct folio *folio; 4533 vm_fault_t ret; 4534 4535 /* 4536 * Preallocate pte before we take page_lock because this might lead to 4537 * deadlocks for memcg reclaim which waits for pages under writeback: 4538 * lock_page(A) 4539 * SetPageWriteback(A) 4540 * unlock_page(A) 4541 * lock_page(B) 4542 * lock_page(B) 4543 * pte_alloc_one 4544 * shrink_page_list 4545 * wait_on_page_writeback(A) 4546 * SetPageWriteback(B) 4547 * unlock_page(B) 4548 * # flush A, B to clear the writeback 4549 */ 4550 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) { 4551 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); 4552 if (!vmf->prealloc_pte) 4553 return VM_FAULT_OOM; 4554 } 4555 4556 ret = vma->vm_ops->fault(vmf); 4557 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY | 4558 VM_FAULT_DONE_COW))) 4559 return ret; 4560 4561 folio = page_folio(vmf->page); 4562 if (unlikely(PageHWPoison(vmf->page))) { 4563 vm_fault_t poisonret = VM_FAULT_HWPOISON; 4564 if (ret & VM_FAULT_LOCKED) { 4565 if (page_mapped(vmf->page)) 4566 unmap_mapping_folio(folio); 4567 /* Retry if a clean folio was removed from the cache. */ 4568 if (mapping_evict_folio(folio->mapping, folio)) 4569 poisonret = VM_FAULT_NOPAGE; 4570 folio_unlock(folio); 4571 } 4572 folio_put(folio); 4573 vmf->page = NULL; 4574 return poisonret; 4575 } 4576 4577 if (unlikely(!(ret & VM_FAULT_LOCKED))) 4578 folio_lock(folio); 4579 else 4580 VM_BUG_ON_PAGE(!folio_test_locked(folio), vmf->page); 4581 4582 return ret; 4583 } 4584 4585 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4586 static void deposit_prealloc_pte(struct vm_fault *vmf) 4587 { 4588 struct vm_area_struct *vma = vmf->vma; 4589 4590 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); 4591 /* 4592 * We are going to consume the prealloc table, 4593 * count that as nr_ptes. 4594 */ 4595 mm_inc_nr_ptes(vma->vm_mm); 4596 vmf->prealloc_pte = NULL; 4597 } 4598 4599 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) 4600 { 4601 struct folio *folio = page_folio(page); 4602 struct vm_area_struct *vma = vmf->vma; 4603 bool write = vmf->flags & FAULT_FLAG_WRITE; 4604 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 4605 pmd_t entry; 4606 vm_fault_t ret = VM_FAULT_FALLBACK; 4607 4608 if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER)) 4609 return ret; 4610 4611 if (page != &folio->page || folio_order(folio) != HPAGE_PMD_ORDER) 4612 return ret; 4613 4614 /* 4615 * Just backoff if any subpage of a THP is corrupted otherwise 4616 * the corrupted page may mapped by PMD silently to escape the 4617 * check. This kind of THP just can be PTE mapped. Access to 4618 * the corrupted subpage should trigger SIGBUS as expected. 4619 */ 4620 if (unlikely(folio_test_has_hwpoisoned(folio))) 4621 return ret; 4622 4623 /* 4624 * Archs like ppc64 need additional space to store information 4625 * related to pte entry. Use the preallocated table for that. 4626 */ 4627 if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) { 4628 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); 4629 if (!vmf->prealloc_pte) 4630 return VM_FAULT_OOM; 4631 } 4632 4633 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 4634 if (unlikely(!pmd_none(*vmf->pmd))) 4635 goto out; 4636 4637 flush_icache_pages(vma, page, HPAGE_PMD_NR); 4638 4639 entry = mk_huge_pmd(page, vma->vm_page_prot); 4640 if (write) 4641 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 4642 4643 add_mm_counter(vma->vm_mm, mm_counter_file(folio), HPAGE_PMD_NR); 4644 folio_add_file_rmap_pmd(folio, page, vma); 4645 4646 /* 4647 * deposit and withdraw with pmd lock held 4648 */ 4649 if (arch_needs_pgtable_deposit()) 4650 deposit_prealloc_pte(vmf); 4651 4652 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); 4653 4654 update_mmu_cache_pmd(vma, haddr, vmf->pmd); 4655 4656 /* fault is handled */ 4657 ret = 0; 4658 count_vm_event(THP_FILE_MAPPED); 4659 out: 4660 spin_unlock(vmf->ptl); 4661 return ret; 4662 } 4663 #else 4664 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) 4665 { 4666 return VM_FAULT_FALLBACK; 4667 } 4668 #endif 4669 4670 /** 4671 * set_pte_range - Set a range of PTEs to point to pages in a folio. 4672 * @vmf: Fault decription. 4673 * @folio: The folio that contains @page. 4674 * @page: The first page to create a PTE for. 4675 * @nr: The number of PTEs to create. 4676 * @addr: The first address to create a PTE for. 4677 */ 4678 void set_pte_range(struct vm_fault *vmf, struct folio *folio, 4679 struct page *page, unsigned int nr, unsigned long addr) 4680 { 4681 struct vm_area_struct *vma = vmf->vma; 4682 bool write = vmf->flags & FAULT_FLAG_WRITE; 4683 bool prefault = in_range(vmf->address, addr, nr * PAGE_SIZE); 4684 pte_t entry; 4685 4686 flush_icache_pages(vma, page, nr); 4687 entry = mk_pte(page, vma->vm_page_prot); 4688 4689 if (prefault && arch_wants_old_prefaulted_pte()) 4690 entry = pte_mkold(entry); 4691 else 4692 entry = pte_sw_mkyoung(entry); 4693 4694 if (write) 4695 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 4696 if (unlikely(vmf_orig_pte_uffd_wp(vmf))) 4697 entry = pte_mkuffd_wp(entry); 4698 /* copy-on-write page */ 4699 if (write && !(vma->vm_flags & VM_SHARED)) { 4700 VM_BUG_ON_FOLIO(nr != 1, folio); 4701 folio_add_new_anon_rmap(folio, vma, addr); 4702 folio_add_lru_vma(folio, vma); 4703 } else { 4704 folio_add_file_rmap_ptes(folio, page, nr, vma); 4705 } 4706 set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr); 4707 4708 /* no need to invalidate: a not-present page won't be cached */ 4709 update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr); 4710 } 4711 4712 static bool vmf_pte_changed(struct vm_fault *vmf) 4713 { 4714 if (vmf->flags & FAULT_FLAG_ORIG_PTE_VALID) 4715 return !pte_same(ptep_get(vmf->pte), vmf->orig_pte); 4716 4717 return !pte_none(ptep_get(vmf->pte)); 4718 } 4719 4720 /** 4721 * finish_fault - finish page fault once we have prepared the page to fault 4722 * 4723 * @vmf: structure describing the fault 4724 * 4725 * This function handles all that is needed to finish a page fault once the 4726 * page to fault in is prepared. It handles locking of PTEs, inserts PTE for 4727 * given page, adds reverse page mapping, handles memcg charges and LRU 4728 * addition. 4729 * 4730 * The function expects the page to be locked and on success it consumes a 4731 * reference of a page being mapped (for the PTE which maps it). 4732 * 4733 * Return: %0 on success, %VM_FAULT_ code in case of error. 4734 */ 4735 vm_fault_t finish_fault(struct vm_fault *vmf) 4736 { 4737 struct vm_area_struct *vma = vmf->vma; 4738 struct page *page; 4739 vm_fault_t ret; 4740 bool is_cow = (vmf->flags & FAULT_FLAG_WRITE) && 4741 !(vma->vm_flags & VM_SHARED); 4742 4743 /* Did we COW the page? */ 4744 if (is_cow) 4745 page = vmf->cow_page; 4746 else 4747 page = vmf->page; 4748 4749 /* 4750 * check even for read faults because we might have lost our CoWed 4751 * page 4752 */ 4753 if (!(vma->vm_flags & VM_SHARED)) { 4754 ret = check_stable_address_space(vma->vm_mm); 4755 if (ret) 4756 return ret; 4757 } 4758 4759 if (pmd_none(*vmf->pmd)) { 4760 if (PageTransCompound(page)) { 4761 ret = do_set_pmd(vmf, page); 4762 if (ret != VM_FAULT_FALLBACK) 4763 return ret; 4764 } 4765 4766 if (vmf->prealloc_pte) 4767 pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte); 4768 else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) 4769 return VM_FAULT_OOM; 4770 } 4771 4772 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 4773 vmf->address, &vmf->ptl); 4774 if (!vmf->pte) 4775 return VM_FAULT_NOPAGE; 4776 4777 /* Re-check under ptl */ 4778 if (likely(!vmf_pte_changed(vmf))) { 4779 struct folio *folio = page_folio(page); 4780 int type = is_cow ? MM_ANONPAGES : mm_counter_file(folio); 4781 4782 set_pte_range(vmf, folio, page, 1, vmf->address); 4783 add_mm_counter(vma->vm_mm, type, 1); 4784 ret = 0; 4785 } else { 4786 update_mmu_tlb(vma, vmf->address, vmf->pte); 4787 ret = VM_FAULT_NOPAGE; 4788 } 4789 4790 pte_unmap_unlock(vmf->pte, vmf->ptl); 4791 return ret; 4792 } 4793 4794 static unsigned long fault_around_pages __read_mostly = 4795 65536 >> PAGE_SHIFT; 4796 4797 #ifdef CONFIG_DEBUG_FS 4798 static int fault_around_bytes_get(void *data, u64 *val) 4799 { 4800 *val = fault_around_pages << PAGE_SHIFT; 4801 return 0; 4802 } 4803 4804 /* 4805 * fault_around_bytes must be rounded down to the nearest page order as it's 4806 * what do_fault_around() expects to see. 4807 */ 4808 static int fault_around_bytes_set(void *data, u64 val) 4809 { 4810 if (val / PAGE_SIZE > PTRS_PER_PTE) 4811 return -EINVAL; 4812 4813 /* 4814 * The minimum value is 1 page, however this results in no fault-around 4815 * at all. See should_fault_around(). 4816 */ 4817 val = max(val, PAGE_SIZE); 4818 fault_around_pages = rounddown_pow_of_two(val) >> PAGE_SHIFT; 4819 4820 return 0; 4821 } 4822 DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops, 4823 fault_around_bytes_get, fault_around_bytes_set, "%llu\n"); 4824 4825 static int __init fault_around_debugfs(void) 4826 { 4827 debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL, 4828 &fault_around_bytes_fops); 4829 return 0; 4830 } 4831 late_initcall(fault_around_debugfs); 4832 #endif 4833 4834 /* 4835 * do_fault_around() tries to map few pages around the fault address. The hope 4836 * is that the pages will be needed soon and this will lower the number of 4837 * faults to handle. 4838 * 4839 * It uses vm_ops->map_pages() to map the pages, which skips the page if it's 4840 * not ready to be mapped: not up-to-date, locked, etc. 4841 * 4842 * This function doesn't cross VMA or page table boundaries, in order to call 4843 * map_pages() and acquire a PTE lock only once. 4844 * 4845 * fault_around_pages defines how many pages we'll try to map. 4846 * do_fault_around() expects it to be set to a power of two less than or equal 4847 * to PTRS_PER_PTE. 4848 * 4849 * The virtual address of the area that we map is naturally aligned to 4850 * fault_around_pages * PAGE_SIZE rounded down to the machine page size 4851 * (and therefore to page order). This way it's easier to guarantee 4852 * that we don't cross page table boundaries. 4853 */ 4854 static vm_fault_t do_fault_around(struct vm_fault *vmf) 4855 { 4856 pgoff_t nr_pages = READ_ONCE(fault_around_pages); 4857 pgoff_t pte_off = pte_index(vmf->address); 4858 /* The page offset of vmf->address within the VMA. */ 4859 pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff; 4860 pgoff_t from_pte, to_pte; 4861 vm_fault_t ret; 4862 4863 /* The PTE offset of the start address, clamped to the VMA. */ 4864 from_pte = max(ALIGN_DOWN(pte_off, nr_pages), 4865 pte_off - min(pte_off, vma_off)); 4866 4867 /* The PTE offset of the end address, clamped to the VMA and PTE. */ 4868 to_pte = min3(from_pte + nr_pages, (pgoff_t)PTRS_PER_PTE, 4869 pte_off + vma_pages(vmf->vma) - vma_off) - 1; 4870 4871 if (pmd_none(*vmf->pmd)) { 4872 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); 4873 if (!vmf->prealloc_pte) 4874 return VM_FAULT_OOM; 4875 } 4876 4877 rcu_read_lock(); 4878 ret = vmf->vma->vm_ops->map_pages(vmf, 4879 vmf->pgoff + from_pte - pte_off, 4880 vmf->pgoff + to_pte - pte_off); 4881 rcu_read_unlock(); 4882 4883 return ret; 4884 } 4885 4886 /* Return true if we should do read fault-around, false otherwise */ 4887 static inline bool should_fault_around(struct vm_fault *vmf) 4888 { 4889 /* No ->map_pages? No way to fault around... */ 4890 if (!vmf->vma->vm_ops->map_pages) 4891 return false; 4892 4893 if (uffd_disable_fault_around(vmf->vma)) 4894 return false; 4895 4896 /* A single page implies no faulting 'around' at all. */ 4897 return fault_around_pages > 1; 4898 } 4899 4900 static vm_fault_t do_read_fault(struct vm_fault *vmf) 4901 { 4902 vm_fault_t ret = 0; 4903 struct folio *folio; 4904 4905 /* 4906 * Let's call ->map_pages() first and use ->fault() as fallback 4907 * if page by the offset is not ready to be mapped (cold cache or 4908 * something). 4909 */ 4910 if (should_fault_around(vmf)) { 4911 ret = do_fault_around(vmf); 4912 if (ret) 4913 return ret; 4914 } 4915 4916 ret = vmf_can_call_fault(vmf); 4917 if (ret) 4918 return ret; 4919 4920 ret = __do_fault(vmf); 4921 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 4922 return ret; 4923 4924 ret |= finish_fault(vmf); 4925 folio = page_folio(vmf->page); 4926 folio_unlock(folio); 4927 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 4928 folio_put(folio); 4929 return ret; 4930 } 4931 4932 static vm_fault_t do_cow_fault(struct vm_fault *vmf) 4933 { 4934 struct vm_area_struct *vma = vmf->vma; 4935 struct folio *folio; 4936 vm_fault_t ret; 4937 4938 ret = vmf_can_call_fault(vmf); 4939 if (!ret) 4940 ret = vmf_anon_prepare(vmf); 4941 if (ret) 4942 return ret; 4943 4944 folio = folio_prealloc(vma->vm_mm, vma, vmf->address, false); 4945 if (!folio) 4946 return VM_FAULT_OOM; 4947 4948 vmf->cow_page = &folio->page; 4949 4950 ret = __do_fault(vmf); 4951 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 4952 goto uncharge_out; 4953 if (ret & VM_FAULT_DONE_COW) 4954 return ret; 4955 4956 copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); 4957 __folio_mark_uptodate(folio); 4958 4959 ret |= finish_fault(vmf); 4960 unlock_page(vmf->page); 4961 put_page(vmf->page); 4962 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 4963 goto uncharge_out; 4964 return ret; 4965 uncharge_out: 4966 folio_put(folio); 4967 return ret; 4968 } 4969 4970 static vm_fault_t do_shared_fault(struct vm_fault *vmf) 4971 { 4972 struct vm_area_struct *vma = vmf->vma; 4973 vm_fault_t ret, tmp; 4974 struct folio *folio; 4975 4976 ret = vmf_can_call_fault(vmf); 4977 if (ret) 4978 return ret; 4979 4980 ret = __do_fault(vmf); 4981 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 4982 return ret; 4983 4984 folio = page_folio(vmf->page); 4985 4986 /* 4987 * Check if the backing address space wants to know that the page is 4988 * about to become writable 4989 */ 4990 if (vma->vm_ops->page_mkwrite) { 4991 folio_unlock(folio); 4992 tmp = do_page_mkwrite(vmf, folio); 4993 if (unlikely(!tmp || 4994 (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { 4995 folio_put(folio); 4996 return tmp; 4997 } 4998 } 4999 5000 ret |= finish_fault(vmf); 5001 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | 5002 VM_FAULT_RETRY))) { 5003 folio_unlock(folio); 5004 folio_put(folio); 5005 return ret; 5006 } 5007 5008 ret |= fault_dirty_shared_page(vmf); 5009 return ret; 5010 } 5011 5012 /* 5013 * We enter with non-exclusive mmap_lock (to exclude vma changes, 5014 * but allow concurrent faults). 5015 * The mmap_lock may have been released depending on flags and our 5016 * return value. See filemap_fault() and __folio_lock_or_retry(). 5017 * If mmap_lock is released, vma may become invalid (for example 5018 * by other thread calling munmap()). 5019 */ 5020 static vm_fault_t do_fault(struct vm_fault *vmf) 5021 { 5022 struct vm_area_struct *vma = vmf->vma; 5023 struct mm_struct *vm_mm = vma->vm_mm; 5024 vm_fault_t ret; 5025 5026 /* 5027 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND 5028 */ 5029 if (!vma->vm_ops->fault) { 5030 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, 5031 vmf->address, &vmf->ptl); 5032 if (unlikely(!vmf->pte)) 5033 ret = VM_FAULT_SIGBUS; 5034 else { 5035 /* 5036 * Make sure this is not a temporary clearing of pte 5037 * by holding ptl and checking again. A R/M/W update 5038 * of pte involves: take ptl, clearing the pte so that 5039 * we don't have concurrent modification by hardware 5040 * followed by an update. 5041 */ 5042 if (unlikely(pte_none(ptep_get(vmf->pte)))) 5043 ret = VM_FAULT_SIGBUS; 5044 else 5045 ret = VM_FAULT_NOPAGE; 5046 5047 pte_unmap_unlock(vmf->pte, vmf->ptl); 5048 } 5049 } else if (!(vmf->flags & FAULT_FLAG_WRITE)) 5050 ret = do_read_fault(vmf); 5051 else if (!(vma->vm_flags & VM_SHARED)) 5052 ret = do_cow_fault(vmf); 5053 else 5054 ret = do_shared_fault(vmf); 5055 5056 /* preallocated pagetable is unused: free it */ 5057 if (vmf->prealloc_pte) { 5058 pte_free(vm_mm, vmf->prealloc_pte); 5059 vmf->prealloc_pte = NULL; 5060 } 5061 return ret; 5062 } 5063 5064 int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf, 5065 unsigned long addr, int page_nid, int *flags) 5066 { 5067 struct vm_area_struct *vma = vmf->vma; 5068 5069 folio_get(folio); 5070 5071 /* Record the current PID acceesing VMA */ 5072 vma_set_access_pid_bit(vma); 5073 5074 count_vm_numa_event(NUMA_HINT_FAULTS); 5075 if (page_nid == numa_node_id()) { 5076 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); 5077 *flags |= TNF_FAULT_LOCAL; 5078 } 5079 5080 return mpol_misplaced(folio, vmf, addr); 5081 } 5082 5083 static void numa_rebuild_single_mapping(struct vm_fault *vmf, struct vm_area_struct *vma, 5084 unsigned long fault_addr, pte_t *fault_pte, 5085 bool writable) 5086 { 5087 pte_t pte, old_pte; 5088 5089 old_pte = ptep_modify_prot_start(vma, fault_addr, fault_pte); 5090 pte = pte_modify(old_pte, vma->vm_page_prot); 5091 pte = pte_mkyoung(pte); 5092 if (writable) 5093 pte = pte_mkwrite(pte, vma); 5094 ptep_modify_prot_commit(vma, fault_addr, fault_pte, old_pte, pte); 5095 update_mmu_cache_range(vmf, vma, fault_addr, fault_pte, 1); 5096 } 5097 5098 static void numa_rebuild_large_mapping(struct vm_fault *vmf, struct vm_area_struct *vma, 5099 struct folio *folio, pte_t fault_pte, 5100 bool ignore_writable, bool pte_write_upgrade) 5101 { 5102 int nr = pte_pfn(fault_pte) - folio_pfn(folio); 5103 unsigned long start, end, addr = vmf->address; 5104 unsigned long addr_start = addr - (nr << PAGE_SHIFT); 5105 unsigned long pt_start = ALIGN_DOWN(addr, PMD_SIZE); 5106 pte_t *start_ptep; 5107 5108 /* Stay within the VMA and within the page table. */ 5109 start = max3(addr_start, pt_start, vma->vm_start); 5110 end = min3(addr_start + folio_size(folio), pt_start + PMD_SIZE, 5111 vma->vm_end); 5112 start_ptep = vmf->pte - ((addr - start) >> PAGE_SHIFT); 5113 5114 /* Restore all PTEs' mapping of the large folio */ 5115 for (addr = start; addr != end; start_ptep++, addr += PAGE_SIZE) { 5116 pte_t ptent = ptep_get(start_ptep); 5117 bool writable = false; 5118 5119 if (!pte_present(ptent) || !pte_protnone(ptent)) 5120 continue; 5121 5122 if (pfn_folio(pte_pfn(ptent)) != folio) 5123 continue; 5124 5125 if (!ignore_writable) { 5126 ptent = pte_modify(ptent, vma->vm_page_prot); 5127 writable = pte_write(ptent); 5128 if (!writable && pte_write_upgrade && 5129 can_change_pte_writable(vma, addr, ptent)) 5130 writable = true; 5131 } 5132 5133 numa_rebuild_single_mapping(vmf, vma, addr, start_ptep, writable); 5134 } 5135 } 5136 5137 static vm_fault_t do_numa_page(struct vm_fault *vmf) 5138 { 5139 struct vm_area_struct *vma = vmf->vma; 5140 struct folio *folio = NULL; 5141 int nid = NUMA_NO_NODE; 5142 bool writable = false, ignore_writable = false; 5143 bool pte_write_upgrade = vma_wants_manual_pte_write_upgrade(vma); 5144 int last_cpupid; 5145 int target_nid; 5146 pte_t pte, old_pte; 5147 int flags = 0, nr_pages; 5148 5149 /* 5150 * The pte cannot be used safely until we verify, while holding the page 5151 * table lock, that its contents have not changed during fault handling. 5152 */ 5153 spin_lock(vmf->ptl); 5154 /* Read the live PTE from the page tables: */ 5155 old_pte = ptep_get(vmf->pte); 5156 5157 if (unlikely(!pte_same(old_pte, vmf->orig_pte))) { 5158 pte_unmap_unlock(vmf->pte, vmf->ptl); 5159 goto out; 5160 } 5161 5162 pte = pte_modify(old_pte, vma->vm_page_prot); 5163 5164 /* 5165 * Detect now whether the PTE could be writable; this information 5166 * is only valid while holding the PT lock. 5167 */ 5168 writable = pte_write(pte); 5169 if (!writable && pte_write_upgrade && 5170 can_change_pte_writable(vma, vmf->address, pte)) 5171 writable = true; 5172 5173 folio = vm_normal_folio(vma, vmf->address, pte); 5174 if (!folio || folio_is_zone_device(folio)) 5175 goto out_map; 5176 5177 /* 5178 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as 5179 * much anyway since they can be in shared cache state. This misses 5180 * the case where a mapping is writable but the process never writes 5181 * to it but pte_write gets cleared during protection updates and 5182 * pte_dirty has unpredictable behaviour between PTE scan updates, 5183 * background writeback, dirty balancing and application behaviour. 5184 */ 5185 if (!writable) 5186 flags |= TNF_NO_GROUP; 5187 5188 /* 5189 * Flag if the folio is shared between multiple address spaces. This 5190 * is later used when determining whether to group tasks together 5191 */ 5192 if (folio_likely_mapped_shared(folio) && (vma->vm_flags & VM_SHARED)) 5193 flags |= TNF_SHARED; 5194 5195 nid = folio_nid(folio); 5196 nr_pages = folio_nr_pages(folio); 5197 /* 5198 * For memory tiering mode, cpupid of slow memory page is used 5199 * to record page access time. So use default value. 5200 */ 5201 if ((sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) && 5202 !node_is_toptier(nid)) 5203 last_cpupid = (-1 & LAST_CPUPID_MASK); 5204 else 5205 last_cpupid = folio_last_cpupid(folio); 5206 target_nid = numa_migrate_prep(folio, vmf, vmf->address, nid, &flags); 5207 if (target_nid == NUMA_NO_NODE) { 5208 folio_put(folio); 5209 goto out_map; 5210 } 5211 pte_unmap_unlock(vmf->pte, vmf->ptl); 5212 writable = false; 5213 ignore_writable = true; 5214 5215 /* Migrate to the requested node */ 5216 if (migrate_misplaced_folio(folio, vma, target_nid)) { 5217 nid = target_nid; 5218 flags |= TNF_MIGRATED; 5219 } else { 5220 flags |= TNF_MIGRATE_FAIL; 5221 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 5222 vmf->address, &vmf->ptl); 5223 if (unlikely(!vmf->pte)) 5224 goto out; 5225 if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { 5226 pte_unmap_unlock(vmf->pte, vmf->ptl); 5227 goto out; 5228 } 5229 goto out_map; 5230 } 5231 5232 out: 5233 if (nid != NUMA_NO_NODE) 5234 task_numa_fault(last_cpupid, nid, nr_pages, flags); 5235 return 0; 5236 out_map: 5237 /* 5238 * Make it present again, depending on how arch implements 5239 * non-accessible ptes, some can allow access by kernel mode. 5240 */ 5241 if (folio && folio_test_large(folio)) 5242 numa_rebuild_large_mapping(vmf, vma, folio, pte, ignore_writable, 5243 pte_write_upgrade); 5244 else 5245 numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte, 5246 writable); 5247 pte_unmap_unlock(vmf->pte, vmf->ptl); 5248 goto out; 5249 } 5250 5251 static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf) 5252 { 5253 struct vm_area_struct *vma = vmf->vma; 5254 if (vma_is_anonymous(vma)) 5255 return do_huge_pmd_anonymous_page(vmf); 5256 if (vma->vm_ops->huge_fault) 5257 return vma->vm_ops->huge_fault(vmf, PMD_ORDER); 5258 return VM_FAULT_FALLBACK; 5259 } 5260 5261 /* `inline' is required to avoid gcc 4.1.2 build error */ 5262 static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf) 5263 { 5264 struct vm_area_struct *vma = vmf->vma; 5265 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; 5266 vm_fault_t ret; 5267 5268 if (vma_is_anonymous(vma)) { 5269 if (likely(!unshare) && 5270 userfaultfd_huge_pmd_wp(vma, vmf->orig_pmd)) { 5271 if (userfaultfd_wp_async(vmf->vma)) 5272 goto split; 5273 return handle_userfault(vmf, VM_UFFD_WP); 5274 } 5275 return do_huge_pmd_wp_page(vmf); 5276 } 5277 5278 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { 5279 if (vma->vm_ops->huge_fault) { 5280 ret = vma->vm_ops->huge_fault(vmf, PMD_ORDER); 5281 if (!(ret & VM_FAULT_FALLBACK)) 5282 return ret; 5283 } 5284 } 5285 5286 split: 5287 /* COW or write-notify handled on pte level: split pmd. */ 5288 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL); 5289 5290 return VM_FAULT_FALLBACK; 5291 } 5292 5293 static vm_fault_t create_huge_pud(struct vm_fault *vmf) 5294 { 5295 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ 5296 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 5297 struct vm_area_struct *vma = vmf->vma; 5298 /* No support for anonymous transparent PUD pages yet */ 5299 if (vma_is_anonymous(vma)) 5300 return VM_FAULT_FALLBACK; 5301 if (vma->vm_ops->huge_fault) 5302 return vma->vm_ops->huge_fault(vmf, PUD_ORDER); 5303 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 5304 return VM_FAULT_FALLBACK; 5305 } 5306 5307 static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud) 5308 { 5309 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ 5310 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 5311 struct vm_area_struct *vma = vmf->vma; 5312 vm_fault_t ret; 5313 5314 /* No support for anonymous transparent PUD pages yet */ 5315 if (vma_is_anonymous(vma)) 5316 goto split; 5317 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { 5318 if (vma->vm_ops->huge_fault) { 5319 ret = vma->vm_ops->huge_fault(vmf, PUD_ORDER); 5320 if (!(ret & VM_FAULT_FALLBACK)) 5321 return ret; 5322 } 5323 } 5324 split: 5325 /* COW or write-notify not handled on PUD level: split pud.*/ 5326 __split_huge_pud(vma, vmf->pud, vmf->address); 5327 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 5328 return VM_FAULT_FALLBACK; 5329 } 5330 5331 /* 5332 * These routines also need to handle stuff like marking pages dirty 5333 * and/or accessed for architectures that don't do it in hardware (most 5334 * RISC architectures). The early dirtying is also good on the i386. 5335 * 5336 * There is also a hook called "update_mmu_cache()" that architectures 5337 * with external mmu caches can use to update those (ie the Sparc or 5338 * PowerPC hashed page tables that act as extended TLBs). 5339 * 5340 * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow 5341 * concurrent faults). 5342 * 5343 * The mmap_lock may have been released depending on flags and our return value. 5344 * See filemap_fault() and __folio_lock_or_retry(). 5345 */ 5346 static vm_fault_t handle_pte_fault(struct vm_fault *vmf) 5347 { 5348 pte_t entry; 5349 5350 if (unlikely(pmd_none(*vmf->pmd))) { 5351 /* 5352 * Leave __pte_alloc() until later: because vm_ops->fault may 5353 * want to allocate huge page, and if we expose page table 5354 * for an instant, it will be difficult to retract from 5355 * concurrent faults and from rmap lookups. 5356 */ 5357 vmf->pte = NULL; 5358 vmf->flags &= ~FAULT_FLAG_ORIG_PTE_VALID; 5359 } else { 5360 /* 5361 * A regular pmd is established and it can't morph into a huge 5362 * pmd by anon khugepaged, since that takes mmap_lock in write 5363 * mode; but shmem or file collapse to THP could still morph 5364 * it into a huge pmd: just retry later if so. 5365 */ 5366 vmf->pte = pte_offset_map_nolock(vmf->vma->vm_mm, vmf->pmd, 5367 vmf->address, &vmf->ptl); 5368 if (unlikely(!vmf->pte)) 5369 return 0; 5370 vmf->orig_pte = ptep_get_lockless(vmf->pte); 5371 vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID; 5372 5373 if (pte_none(vmf->orig_pte)) { 5374 pte_unmap(vmf->pte); 5375 vmf->pte = NULL; 5376 } 5377 } 5378 5379 if (!vmf->pte) 5380 return do_pte_missing(vmf); 5381 5382 if (!pte_present(vmf->orig_pte)) 5383 return do_swap_page(vmf); 5384 5385 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) 5386 return do_numa_page(vmf); 5387 5388 spin_lock(vmf->ptl); 5389 entry = vmf->orig_pte; 5390 if (unlikely(!pte_same(ptep_get(vmf->pte), entry))) { 5391 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); 5392 goto unlock; 5393 } 5394 if (vmf->flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) { 5395 if (!pte_write(entry)) 5396 return do_wp_page(vmf); 5397 else if (likely(vmf->flags & FAULT_FLAG_WRITE)) 5398 entry = pte_mkdirty(entry); 5399 } 5400 entry = pte_mkyoung(entry); 5401 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry, 5402 vmf->flags & FAULT_FLAG_WRITE)) { 5403 update_mmu_cache_range(vmf, vmf->vma, vmf->address, 5404 vmf->pte, 1); 5405 } else { 5406 /* Skip spurious TLB flush for retried page fault */ 5407 if (vmf->flags & FAULT_FLAG_TRIED) 5408 goto unlock; 5409 /* 5410 * This is needed only for protection faults but the arch code 5411 * is not yet telling us if this is a protection fault or not. 5412 * This still avoids useless tlb flushes for .text page faults 5413 * with threads. 5414 */ 5415 if (vmf->flags & FAULT_FLAG_WRITE) 5416 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address, 5417 vmf->pte); 5418 } 5419 unlock: 5420 pte_unmap_unlock(vmf->pte, vmf->ptl); 5421 return 0; 5422 } 5423 5424 /* 5425 * On entry, we hold either the VMA lock or the mmap_lock 5426 * (FAULT_FLAG_VMA_LOCK tells you which). If VM_FAULT_RETRY is set in 5427 * the result, the mmap_lock is not held on exit. See filemap_fault() 5428 * and __folio_lock_or_retry(). 5429 */ 5430 static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, 5431 unsigned long address, unsigned int flags) 5432 { 5433 struct vm_fault vmf = { 5434 .vma = vma, 5435 .address = address & PAGE_MASK, 5436 .real_address = address, 5437 .flags = flags, 5438 .pgoff = linear_page_index(vma, address), 5439 .gfp_mask = __get_fault_gfp_mask(vma), 5440 }; 5441 struct mm_struct *mm = vma->vm_mm; 5442 unsigned long vm_flags = vma->vm_flags; 5443 pgd_t *pgd; 5444 p4d_t *p4d; 5445 vm_fault_t ret; 5446 5447 pgd = pgd_offset(mm, address); 5448 p4d = p4d_alloc(mm, pgd, address); 5449 if (!p4d) 5450 return VM_FAULT_OOM; 5451 5452 vmf.pud = pud_alloc(mm, p4d, address); 5453 if (!vmf.pud) 5454 return VM_FAULT_OOM; 5455 retry_pud: 5456 if (pud_none(*vmf.pud) && 5457 thp_vma_allowable_order(vma, vm_flags, 5458 TVA_IN_PF | TVA_ENFORCE_SYSFS, PUD_ORDER)) { 5459 ret = create_huge_pud(&vmf); 5460 if (!(ret & VM_FAULT_FALLBACK)) 5461 return ret; 5462 } else { 5463 pud_t orig_pud = *vmf.pud; 5464 5465 barrier(); 5466 if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) { 5467 5468 /* 5469 * TODO once we support anonymous PUDs: NUMA case and 5470 * FAULT_FLAG_UNSHARE handling. 5471 */ 5472 if ((flags & FAULT_FLAG_WRITE) && !pud_write(orig_pud)) { 5473 ret = wp_huge_pud(&vmf, orig_pud); 5474 if (!(ret & VM_FAULT_FALLBACK)) 5475 return ret; 5476 } else { 5477 huge_pud_set_accessed(&vmf, orig_pud); 5478 return 0; 5479 } 5480 } 5481 } 5482 5483 vmf.pmd = pmd_alloc(mm, vmf.pud, address); 5484 if (!vmf.pmd) 5485 return VM_FAULT_OOM; 5486 5487 /* Huge pud page fault raced with pmd_alloc? */ 5488 if (pud_trans_unstable(vmf.pud)) 5489 goto retry_pud; 5490 5491 if (pmd_none(*vmf.pmd) && 5492 thp_vma_allowable_order(vma, vm_flags, 5493 TVA_IN_PF | TVA_ENFORCE_SYSFS, PMD_ORDER)) { 5494 ret = create_huge_pmd(&vmf); 5495 if (!(ret & VM_FAULT_FALLBACK)) 5496 return ret; 5497 } else { 5498 vmf.orig_pmd = pmdp_get_lockless(vmf.pmd); 5499 5500 if (unlikely(is_swap_pmd(vmf.orig_pmd))) { 5501 VM_BUG_ON(thp_migration_supported() && 5502 !is_pmd_migration_entry(vmf.orig_pmd)); 5503 if (is_pmd_migration_entry(vmf.orig_pmd)) 5504 pmd_migration_entry_wait(mm, vmf.pmd); 5505 return 0; 5506 } 5507 if (pmd_trans_huge(vmf.orig_pmd) || pmd_devmap(vmf.orig_pmd)) { 5508 if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma)) 5509 return do_huge_pmd_numa_page(&vmf); 5510 5511 if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) && 5512 !pmd_write(vmf.orig_pmd)) { 5513 ret = wp_huge_pmd(&vmf); 5514 if (!(ret & VM_FAULT_FALLBACK)) 5515 return ret; 5516 } else { 5517 huge_pmd_set_accessed(&vmf); 5518 return 0; 5519 } 5520 } 5521 } 5522 5523 return handle_pte_fault(&vmf); 5524 } 5525 5526 /** 5527 * mm_account_fault - Do page fault accounting 5528 * @mm: mm from which memcg should be extracted. It can be NULL. 5529 * @regs: the pt_regs struct pointer. When set to NULL, will skip accounting 5530 * of perf event counters, but we'll still do the per-task accounting to 5531 * the task who triggered this page fault. 5532 * @address: the faulted address. 5533 * @flags: the fault flags. 5534 * @ret: the fault retcode. 5535 * 5536 * This will take care of most of the page fault accounting. Meanwhile, it 5537 * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter 5538 * updates. However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should 5539 * still be in per-arch page fault handlers at the entry of page fault. 5540 */ 5541 static inline void mm_account_fault(struct mm_struct *mm, struct pt_regs *regs, 5542 unsigned long address, unsigned int flags, 5543 vm_fault_t ret) 5544 { 5545 bool major; 5546 5547 /* Incomplete faults will be accounted upon completion. */ 5548 if (ret & VM_FAULT_RETRY) 5549 return; 5550 5551 /* 5552 * To preserve the behavior of older kernels, PGFAULT counters record 5553 * both successful and failed faults, as opposed to perf counters, 5554 * which ignore failed cases. 5555 */ 5556 count_vm_event(PGFAULT); 5557 count_memcg_event_mm(mm, PGFAULT); 5558 5559 /* 5560 * Do not account for unsuccessful faults (e.g. when the address wasn't 5561 * valid). That includes arch_vma_access_permitted() failing before 5562 * reaching here. So this is not a "this many hardware page faults" 5563 * counter. We should use the hw profiling for that. 5564 */ 5565 if (ret & VM_FAULT_ERROR) 5566 return; 5567 5568 /* 5569 * We define the fault as a major fault when the final successful fault 5570 * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't 5571 * handle it immediately previously). 5572 */ 5573 major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED); 5574 5575 if (major) 5576 current->maj_flt++; 5577 else 5578 current->min_flt++; 5579 5580 /* 5581 * If the fault is done for GUP, regs will be NULL. We only do the 5582 * accounting for the per thread fault counters who triggered the 5583 * fault, and we skip the perf event updates. 5584 */ 5585 if (!regs) 5586 return; 5587 5588 if (major) 5589 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); 5590 else 5591 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); 5592 } 5593 5594 #ifdef CONFIG_LRU_GEN 5595 static void lru_gen_enter_fault(struct vm_area_struct *vma) 5596 { 5597 /* the LRU algorithm only applies to accesses with recency */ 5598 current->in_lru_fault = vma_has_recency(vma); 5599 } 5600 5601 static void lru_gen_exit_fault(void) 5602 { 5603 current->in_lru_fault = false; 5604 } 5605 #else 5606 static void lru_gen_enter_fault(struct vm_area_struct *vma) 5607 { 5608 } 5609 5610 static void lru_gen_exit_fault(void) 5611 { 5612 } 5613 #endif /* CONFIG_LRU_GEN */ 5614 5615 static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma, 5616 unsigned int *flags) 5617 { 5618 if (unlikely(*flags & FAULT_FLAG_UNSHARE)) { 5619 if (WARN_ON_ONCE(*flags & FAULT_FLAG_WRITE)) 5620 return VM_FAULT_SIGSEGV; 5621 /* 5622 * FAULT_FLAG_UNSHARE only applies to COW mappings. Let's 5623 * just treat it like an ordinary read-fault otherwise. 5624 */ 5625 if (!is_cow_mapping(vma->vm_flags)) 5626 *flags &= ~FAULT_FLAG_UNSHARE; 5627 } else if (*flags & FAULT_FLAG_WRITE) { 5628 /* Write faults on read-only mappings are impossible ... */ 5629 if (WARN_ON_ONCE(!(vma->vm_flags & VM_MAYWRITE))) 5630 return VM_FAULT_SIGSEGV; 5631 /* ... and FOLL_FORCE only applies to COW mappings. */ 5632 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE) && 5633 !is_cow_mapping(vma->vm_flags))) 5634 return VM_FAULT_SIGSEGV; 5635 } 5636 #ifdef CONFIG_PER_VMA_LOCK 5637 /* 5638 * Per-VMA locks can't be used with FAULT_FLAG_RETRY_NOWAIT because of 5639 * the assumption that lock is dropped on VM_FAULT_RETRY. 5640 */ 5641 if (WARN_ON_ONCE((*flags & 5642 (FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT)) == 5643 (FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT))) 5644 return VM_FAULT_SIGSEGV; 5645 #endif 5646 5647 return 0; 5648 } 5649 5650 /* 5651 * By the time we get here, we already hold the mm semaphore 5652 * 5653 * The mmap_lock may have been released depending on flags and our 5654 * return value. See filemap_fault() and __folio_lock_or_retry(). 5655 */ 5656 vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, 5657 unsigned int flags, struct pt_regs *regs) 5658 { 5659 /* If the fault handler drops the mmap_lock, vma may be freed */ 5660 struct mm_struct *mm = vma->vm_mm; 5661 vm_fault_t ret; 5662 5663 __set_current_state(TASK_RUNNING); 5664 5665 ret = sanitize_fault_flags(vma, &flags); 5666 if (ret) 5667 goto out; 5668 5669 if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE, 5670 flags & FAULT_FLAG_INSTRUCTION, 5671 flags & FAULT_FLAG_REMOTE)) { 5672 ret = VM_FAULT_SIGSEGV; 5673 goto out; 5674 } 5675 5676 /* 5677 * Enable the memcg OOM handling for faults triggered in user 5678 * space. Kernel faults are handled more gracefully. 5679 */ 5680 if (flags & FAULT_FLAG_USER) 5681 mem_cgroup_enter_user_fault(); 5682 5683 lru_gen_enter_fault(vma); 5684 5685 if (unlikely(is_vm_hugetlb_page(vma))) 5686 ret = hugetlb_fault(vma->vm_mm, vma, address, flags); 5687 else 5688 ret = __handle_mm_fault(vma, address, flags); 5689 5690 lru_gen_exit_fault(); 5691 5692 if (flags & FAULT_FLAG_USER) { 5693 mem_cgroup_exit_user_fault(); 5694 /* 5695 * The task may have entered a memcg OOM situation but 5696 * if the allocation error was handled gracefully (no 5697 * VM_FAULT_OOM), there is no need to kill anything. 5698 * Just clean up the OOM state peacefully. 5699 */ 5700 if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM)) 5701 mem_cgroup_oom_synchronize(false); 5702 } 5703 out: 5704 mm_account_fault(mm, regs, address, flags, ret); 5705 5706 return ret; 5707 } 5708 EXPORT_SYMBOL_GPL(handle_mm_fault); 5709 5710 #ifdef CONFIG_LOCK_MM_AND_FIND_VMA 5711 #include <linux/extable.h> 5712 5713 static inline bool get_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs) 5714 { 5715 if (likely(mmap_read_trylock(mm))) 5716 return true; 5717 5718 if (regs && !user_mode(regs)) { 5719 unsigned long ip = exception_ip(regs); 5720 if (!search_exception_tables(ip)) 5721 return false; 5722 } 5723 5724 return !mmap_read_lock_killable(mm); 5725 } 5726 5727 static inline bool mmap_upgrade_trylock(struct mm_struct *mm) 5728 { 5729 /* 5730 * We don't have this operation yet. 5731 * 5732 * It should be easy enough to do: it's basically a 5733 * atomic_long_try_cmpxchg_acquire() 5734 * from RWSEM_READER_BIAS -> RWSEM_WRITER_LOCKED, but 5735 * it also needs the proper lockdep magic etc. 5736 */ 5737 return false; 5738 } 5739 5740 static inline bool upgrade_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs) 5741 { 5742 mmap_read_unlock(mm); 5743 if (regs && !user_mode(regs)) { 5744 unsigned long ip = exception_ip(regs); 5745 if (!search_exception_tables(ip)) 5746 return false; 5747 } 5748 return !mmap_write_lock_killable(mm); 5749 } 5750 5751 /* 5752 * Helper for page fault handling. 5753 * 5754 * This is kind of equivalend to "mmap_read_lock()" followed 5755 * by "find_extend_vma()", except it's a lot more careful about 5756 * the locking (and will drop the lock on failure). 5757 * 5758 * For example, if we have a kernel bug that causes a page 5759 * fault, we don't want to just use mmap_read_lock() to get 5760 * the mm lock, because that would deadlock if the bug were 5761 * to happen while we're holding the mm lock for writing. 5762 * 5763 * So this checks the exception tables on kernel faults in 5764 * order to only do this all for instructions that are actually 5765 * expected to fault. 5766 * 5767 * We can also actually take the mm lock for writing if we 5768 * need to extend the vma, which helps the VM layer a lot. 5769 */ 5770 struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm, 5771 unsigned long addr, struct pt_regs *regs) 5772 { 5773 struct vm_area_struct *vma; 5774 5775 if (!get_mmap_lock_carefully(mm, regs)) 5776 return NULL; 5777 5778 vma = find_vma(mm, addr); 5779 if (likely(vma && (vma->vm_start <= addr))) 5780 return vma; 5781 5782 /* 5783 * Well, dang. We might still be successful, but only 5784 * if we can extend a vma to do so. 5785 */ 5786 if (!vma || !(vma->vm_flags & VM_GROWSDOWN)) { 5787 mmap_read_unlock(mm); 5788 return NULL; 5789 } 5790 5791 /* 5792 * We can try to upgrade the mmap lock atomically, 5793 * in which case we can continue to use the vma 5794 * we already looked up. 5795 * 5796 * Otherwise we'll have to drop the mmap lock and 5797 * re-take it, and also look up the vma again, 5798 * re-checking it. 5799 */ 5800 if (!mmap_upgrade_trylock(mm)) { 5801 if (!upgrade_mmap_lock_carefully(mm, regs)) 5802 return NULL; 5803 5804 vma = find_vma(mm, addr); 5805 if (!vma) 5806 goto fail; 5807 if (vma->vm_start <= addr) 5808 goto success; 5809 if (!(vma->vm_flags & VM_GROWSDOWN)) 5810 goto fail; 5811 } 5812 5813 if (expand_stack_locked(vma, addr)) 5814 goto fail; 5815 5816 success: 5817 mmap_write_downgrade(mm); 5818 return vma; 5819 5820 fail: 5821 mmap_write_unlock(mm); 5822 return NULL; 5823 } 5824 #endif 5825 5826 #ifdef CONFIG_PER_VMA_LOCK 5827 /* 5828 * Lookup and lock a VMA under RCU protection. Returned VMA is guaranteed to be 5829 * stable and not isolated. If the VMA is not found or is being modified the 5830 * function returns NULL. 5831 */ 5832 struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, 5833 unsigned long address) 5834 { 5835 MA_STATE(mas, &mm->mm_mt, address, address); 5836 struct vm_area_struct *vma; 5837 5838 rcu_read_lock(); 5839 retry: 5840 vma = mas_walk(&mas); 5841 if (!vma) 5842 goto inval; 5843 5844 if (!vma_start_read(vma)) 5845 goto inval; 5846 5847 /* Check since vm_start/vm_end might change before we lock the VMA */ 5848 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) 5849 goto inval_end_read; 5850 5851 /* Check if the VMA got isolated after we found it */ 5852 if (vma->detached) { 5853 vma_end_read(vma); 5854 count_vm_vma_lock_event(VMA_LOCK_MISS); 5855 /* The area was replaced with another one */ 5856 goto retry; 5857 } 5858 5859 rcu_read_unlock(); 5860 return vma; 5861 5862 inval_end_read: 5863 vma_end_read(vma); 5864 inval: 5865 rcu_read_unlock(); 5866 count_vm_vma_lock_event(VMA_LOCK_ABORT); 5867 return NULL; 5868 } 5869 #endif /* CONFIG_PER_VMA_LOCK */ 5870 5871 #ifndef __PAGETABLE_P4D_FOLDED 5872 /* 5873 * Allocate p4d page table. 5874 * We've already handled the fast-path in-line. 5875 */ 5876 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) 5877 { 5878 p4d_t *new = p4d_alloc_one(mm, address); 5879 if (!new) 5880 return -ENOMEM; 5881 5882 spin_lock(&mm->page_table_lock); 5883 if (pgd_present(*pgd)) { /* Another has populated it */ 5884 p4d_free(mm, new); 5885 } else { 5886 smp_wmb(); /* See comment in pmd_install() */ 5887 pgd_populate(mm, pgd, new); 5888 } 5889 spin_unlock(&mm->page_table_lock); 5890 return 0; 5891 } 5892 #endif /* __PAGETABLE_P4D_FOLDED */ 5893 5894 #ifndef __PAGETABLE_PUD_FOLDED 5895 /* 5896 * Allocate page upper directory. 5897 * We've already handled the fast-path in-line. 5898 */ 5899 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address) 5900 { 5901 pud_t *new = pud_alloc_one(mm, address); 5902 if (!new) 5903 return -ENOMEM; 5904 5905 spin_lock(&mm->page_table_lock); 5906 if (!p4d_present(*p4d)) { 5907 mm_inc_nr_puds(mm); 5908 smp_wmb(); /* See comment in pmd_install() */ 5909 p4d_populate(mm, p4d, new); 5910 } else /* Another has populated it */ 5911 pud_free(mm, new); 5912 spin_unlock(&mm->page_table_lock); 5913 return 0; 5914 } 5915 #endif /* __PAGETABLE_PUD_FOLDED */ 5916 5917 #ifndef __PAGETABLE_PMD_FOLDED 5918 /* 5919 * Allocate page middle directory. 5920 * We've already handled the fast-path in-line. 5921 */ 5922 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 5923 { 5924 spinlock_t *ptl; 5925 pmd_t *new = pmd_alloc_one(mm, address); 5926 if (!new) 5927 return -ENOMEM; 5928 5929 ptl = pud_lock(mm, pud); 5930 if (!pud_present(*pud)) { 5931 mm_inc_nr_pmds(mm); 5932 smp_wmb(); /* See comment in pmd_install() */ 5933 pud_populate(mm, pud, new); 5934 } else { /* Another has populated it */ 5935 pmd_free(mm, new); 5936 } 5937 spin_unlock(ptl); 5938 return 0; 5939 } 5940 #endif /* __PAGETABLE_PMD_FOLDED */ 5941 5942 /** 5943 * follow_pte - look up PTE at a user virtual address 5944 * @vma: the memory mapping 5945 * @address: user virtual address 5946 * @ptepp: location to store found PTE 5947 * @ptlp: location to store the lock for the PTE 5948 * 5949 * On a successful return, the pointer to the PTE is stored in @ptepp; 5950 * the corresponding lock is taken and its location is stored in @ptlp. 5951 * 5952 * The contents of the PTE are only stable until @ptlp is released using 5953 * pte_unmap_unlock(). This function will fail if the PTE is non-present. 5954 * Present PTEs may include PTEs that map refcounted pages, such as 5955 * anonymous folios in COW mappings. 5956 * 5957 * Callers must be careful when relying on PTE content after 5958 * pte_unmap_unlock(). Especially if the PTE maps a refcounted page, 5959 * callers must protect against invalidation with MMU notifiers; otherwise 5960 * access to the PFN at a later point in time can trigger use-after-free. 5961 * 5962 * Only IO mappings and raw PFN mappings are allowed. The mmap semaphore 5963 * should be taken for read. 5964 * 5965 * This function must not be used to modify PTE content. 5966 * 5967 * Return: zero on success, -ve otherwise. 5968 */ 5969 int follow_pte(struct vm_area_struct *vma, unsigned long address, 5970 pte_t **ptepp, spinlock_t **ptlp) 5971 { 5972 struct mm_struct *mm = vma->vm_mm; 5973 pgd_t *pgd; 5974 p4d_t *p4d; 5975 pud_t *pud; 5976 pmd_t *pmd; 5977 pte_t *ptep; 5978 5979 mmap_assert_locked(mm); 5980 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) 5981 goto out; 5982 5983 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) 5984 goto out; 5985 5986 pgd = pgd_offset(mm, address); 5987 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 5988 goto out; 5989 5990 p4d = p4d_offset(pgd, address); 5991 if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d))) 5992 goto out; 5993 5994 pud = pud_offset(p4d, address); 5995 if (pud_none(*pud) || unlikely(pud_bad(*pud))) 5996 goto out; 5997 5998 pmd = pmd_offset(pud, address); 5999 VM_BUG_ON(pmd_trans_huge(*pmd)); 6000 6001 ptep = pte_offset_map_lock(mm, pmd, address, ptlp); 6002 if (!ptep) 6003 goto out; 6004 if (!pte_present(ptep_get(ptep))) 6005 goto unlock; 6006 *ptepp = ptep; 6007 return 0; 6008 unlock: 6009 pte_unmap_unlock(ptep, *ptlp); 6010 out: 6011 return -EINVAL; 6012 } 6013 EXPORT_SYMBOL_GPL(follow_pte); 6014 6015 #ifdef CONFIG_HAVE_IOREMAP_PROT 6016 /** 6017 * generic_access_phys - generic implementation for iomem mmap access 6018 * @vma: the vma to access 6019 * @addr: userspace address, not relative offset within @vma 6020 * @buf: buffer to read/write 6021 * @len: length of transfer 6022 * @write: set to FOLL_WRITE when writing, otherwise reading 6023 * 6024 * This is a generic implementation for &vm_operations_struct.access for an 6025 * iomem mapping. This callback is used by access_process_vm() when the @vma is 6026 * not page based. 6027 */ 6028 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, 6029 void *buf, int len, int write) 6030 { 6031 resource_size_t phys_addr; 6032 unsigned long prot = 0; 6033 void __iomem *maddr; 6034 pte_t *ptep, pte; 6035 spinlock_t *ptl; 6036 int offset = offset_in_page(addr); 6037 int ret = -EINVAL; 6038 6039 retry: 6040 if (follow_pte(vma, addr, &ptep, &ptl)) 6041 return -EINVAL; 6042 pte = ptep_get(ptep); 6043 pte_unmap_unlock(ptep, ptl); 6044 6045 prot = pgprot_val(pte_pgprot(pte)); 6046 phys_addr = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT; 6047 6048 if ((write & FOLL_WRITE) && !pte_write(pte)) 6049 return -EINVAL; 6050 6051 maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot); 6052 if (!maddr) 6053 return -ENOMEM; 6054 6055 if (follow_pte(vma, addr, &ptep, &ptl)) 6056 goto out_unmap; 6057 6058 if (!pte_same(pte, ptep_get(ptep))) { 6059 pte_unmap_unlock(ptep, ptl); 6060 iounmap(maddr); 6061 6062 goto retry; 6063 } 6064 6065 if (write) 6066 memcpy_toio(maddr + offset, buf, len); 6067 else 6068 memcpy_fromio(buf, maddr + offset, len); 6069 ret = len; 6070 pte_unmap_unlock(ptep, ptl); 6071 out_unmap: 6072 iounmap(maddr); 6073 6074 return ret; 6075 } 6076 EXPORT_SYMBOL_GPL(generic_access_phys); 6077 #endif 6078 6079 /* 6080 * Access another process' address space as given in mm. 6081 */ 6082 static int __access_remote_vm(struct mm_struct *mm, unsigned long addr, 6083 void *buf, int len, unsigned int gup_flags) 6084 { 6085 void *old_buf = buf; 6086 int write = gup_flags & FOLL_WRITE; 6087 6088 if (mmap_read_lock_killable(mm)) 6089 return 0; 6090 6091 /* Untag the address before looking up the VMA */ 6092 addr = untagged_addr_remote(mm, addr); 6093 6094 /* Avoid triggering the temporary warning in __get_user_pages */ 6095 if (!vma_lookup(mm, addr) && !expand_stack(mm, addr)) 6096 return 0; 6097 6098 /* ignore errors, just check how much was successfully transferred */ 6099 while (len) { 6100 int bytes, offset; 6101 void *maddr; 6102 struct vm_area_struct *vma = NULL; 6103 struct page *page = get_user_page_vma_remote(mm, addr, 6104 gup_flags, &vma); 6105 6106 if (IS_ERR(page)) { 6107 /* We might need to expand the stack to access it */ 6108 vma = vma_lookup(mm, addr); 6109 if (!vma) { 6110 vma = expand_stack(mm, addr); 6111 6112 /* mmap_lock was dropped on failure */ 6113 if (!vma) 6114 return buf - old_buf; 6115 6116 /* Try again if stack expansion worked */ 6117 continue; 6118 } 6119 6120 /* 6121 * Check if this is a VM_IO | VM_PFNMAP VMA, which 6122 * we can access using slightly different code. 6123 */ 6124 bytes = 0; 6125 #ifdef CONFIG_HAVE_IOREMAP_PROT 6126 if (vma->vm_ops && vma->vm_ops->access) 6127 bytes = vma->vm_ops->access(vma, addr, buf, 6128 len, write); 6129 #endif 6130 if (bytes <= 0) 6131 break; 6132 } else { 6133 bytes = len; 6134 offset = addr & (PAGE_SIZE-1); 6135 if (bytes > PAGE_SIZE-offset) 6136 bytes = PAGE_SIZE-offset; 6137 6138 maddr = kmap_local_page(page); 6139 if (write) { 6140 copy_to_user_page(vma, page, addr, 6141 maddr + offset, buf, bytes); 6142 set_page_dirty_lock(page); 6143 } else { 6144 copy_from_user_page(vma, page, addr, 6145 buf, maddr + offset, bytes); 6146 } 6147 unmap_and_put_page(page, maddr); 6148 } 6149 len -= bytes; 6150 buf += bytes; 6151 addr += bytes; 6152 } 6153 mmap_read_unlock(mm); 6154 6155 return buf - old_buf; 6156 } 6157 6158 /** 6159 * access_remote_vm - access another process' address space 6160 * @mm: the mm_struct of the target address space 6161 * @addr: start address to access 6162 * @buf: source or destination buffer 6163 * @len: number of bytes to transfer 6164 * @gup_flags: flags modifying lookup behaviour 6165 * 6166 * The caller must hold a reference on @mm. 6167 * 6168 * Return: number of bytes copied from source to destination. 6169 */ 6170 int access_remote_vm(struct mm_struct *mm, unsigned long addr, 6171 void *buf, int len, unsigned int gup_flags) 6172 { 6173 return __access_remote_vm(mm, addr, buf, len, gup_flags); 6174 } 6175 6176 /* 6177 * Access another process' address space. 6178 * Source/target buffer must be kernel space, 6179 * Do not walk the page table directly, use get_user_pages 6180 */ 6181 int access_process_vm(struct task_struct *tsk, unsigned long addr, 6182 void *buf, int len, unsigned int gup_flags) 6183 { 6184 struct mm_struct *mm; 6185 int ret; 6186 6187 mm = get_task_mm(tsk); 6188 if (!mm) 6189 return 0; 6190 6191 ret = __access_remote_vm(mm, addr, buf, len, gup_flags); 6192 6193 mmput(mm); 6194 6195 return ret; 6196 } 6197 EXPORT_SYMBOL_GPL(access_process_vm); 6198 6199 /* 6200 * Print the name of a VMA. 6201 */ 6202 void print_vma_addr(char *prefix, unsigned long ip) 6203 { 6204 struct mm_struct *mm = current->mm; 6205 struct vm_area_struct *vma; 6206 6207 /* 6208 * we might be running from an atomic context so we cannot sleep 6209 */ 6210 if (!mmap_read_trylock(mm)) 6211 return; 6212 6213 vma = vma_lookup(mm, ip); 6214 if (vma && vma->vm_file) { 6215 struct file *f = vma->vm_file; 6216 ip -= vma->vm_start; 6217 ip += vma->vm_pgoff << PAGE_SHIFT; 6218 printk("%s%pD[%lx,%lx+%lx]", prefix, f, ip, 6219 vma->vm_start, 6220 vma->vm_end - vma->vm_start); 6221 } 6222 mmap_read_unlock(mm); 6223 } 6224 6225 #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP) 6226 void __might_fault(const char *file, int line) 6227 { 6228 if (pagefault_disabled()) 6229 return; 6230 __might_sleep(file, line); 6231 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) 6232 if (current->mm) 6233 might_lock_read(¤t->mm->mmap_lock); 6234 #endif 6235 } 6236 EXPORT_SYMBOL(__might_fault); 6237 #endif 6238 6239 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) 6240 /* 6241 * Process all subpages of the specified huge page with the specified 6242 * operation. The target subpage will be processed last to keep its 6243 * cache lines hot. 6244 */ 6245 static inline int process_huge_page( 6246 unsigned long addr_hint, unsigned int pages_per_huge_page, 6247 int (*process_subpage)(unsigned long addr, int idx, void *arg), 6248 void *arg) 6249 { 6250 int i, n, base, l, ret; 6251 unsigned long addr = addr_hint & 6252 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1); 6253 6254 /* Process target subpage last to keep its cache lines hot */ 6255 might_sleep(); 6256 n = (addr_hint - addr) / PAGE_SIZE; 6257 if (2 * n <= pages_per_huge_page) { 6258 /* If target subpage in first half of huge page */ 6259 base = 0; 6260 l = n; 6261 /* Process subpages at the end of huge page */ 6262 for (i = pages_per_huge_page - 1; i >= 2 * n; i--) { 6263 cond_resched(); 6264 ret = process_subpage(addr + i * PAGE_SIZE, i, arg); 6265 if (ret) 6266 return ret; 6267 } 6268 } else { 6269 /* If target subpage in second half of huge page */ 6270 base = pages_per_huge_page - 2 * (pages_per_huge_page - n); 6271 l = pages_per_huge_page - n; 6272 /* Process subpages at the begin of huge page */ 6273 for (i = 0; i < base; i++) { 6274 cond_resched(); 6275 ret = process_subpage(addr + i * PAGE_SIZE, i, arg); 6276 if (ret) 6277 return ret; 6278 } 6279 } 6280 /* 6281 * Process remaining subpages in left-right-left-right pattern 6282 * towards the target subpage 6283 */ 6284 for (i = 0; i < l; i++) { 6285 int left_idx = base + i; 6286 int right_idx = base + 2 * l - 1 - i; 6287 6288 cond_resched(); 6289 ret = process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg); 6290 if (ret) 6291 return ret; 6292 cond_resched(); 6293 ret = process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg); 6294 if (ret) 6295 return ret; 6296 } 6297 return 0; 6298 } 6299 6300 static void clear_gigantic_page(struct page *page, 6301 unsigned long addr, 6302 unsigned int pages_per_huge_page) 6303 { 6304 int i; 6305 struct page *p; 6306 6307 might_sleep(); 6308 for (i = 0; i < pages_per_huge_page; i++) { 6309 p = nth_page(page, i); 6310 cond_resched(); 6311 clear_user_highpage(p, addr + i * PAGE_SIZE); 6312 } 6313 } 6314 6315 static int clear_subpage(unsigned long addr, int idx, void *arg) 6316 { 6317 struct page *page = arg; 6318 6319 clear_user_highpage(nth_page(page, idx), addr); 6320 return 0; 6321 } 6322 6323 void clear_huge_page(struct page *page, 6324 unsigned long addr_hint, unsigned int pages_per_huge_page) 6325 { 6326 unsigned long addr = addr_hint & 6327 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1); 6328 6329 if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) { 6330 clear_gigantic_page(page, addr, pages_per_huge_page); 6331 return; 6332 } 6333 6334 process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page); 6335 } 6336 6337 static int copy_user_gigantic_page(struct folio *dst, struct folio *src, 6338 unsigned long addr, 6339 struct vm_area_struct *vma, 6340 unsigned int pages_per_huge_page) 6341 { 6342 int i; 6343 struct page *dst_page; 6344 struct page *src_page; 6345 6346 for (i = 0; i < pages_per_huge_page; i++) { 6347 dst_page = folio_page(dst, i); 6348 src_page = folio_page(src, i); 6349 6350 cond_resched(); 6351 if (copy_mc_user_highpage(dst_page, src_page, 6352 addr + i*PAGE_SIZE, vma)) { 6353 memory_failure_queue(page_to_pfn(src_page), 0); 6354 return -EHWPOISON; 6355 } 6356 } 6357 return 0; 6358 } 6359 6360 struct copy_subpage_arg { 6361 struct page *dst; 6362 struct page *src; 6363 struct vm_area_struct *vma; 6364 }; 6365 6366 static int copy_subpage(unsigned long addr, int idx, void *arg) 6367 { 6368 struct copy_subpage_arg *copy_arg = arg; 6369 struct page *dst = nth_page(copy_arg->dst, idx); 6370 struct page *src = nth_page(copy_arg->src, idx); 6371 6372 if (copy_mc_user_highpage(dst, src, addr, copy_arg->vma)) { 6373 memory_failure_queue(page_to_pfn(src), 0); 6374 return -EHWPOISON; 6375 } 6376 return 0; 6377 } 6378 6379 int copy_user_large_folio(struct folio *dst, struct folio *src, 6380 unsigned long addr_hint, struct vm_area_struct *vma) 6381 { 6382 unsigned int pages_per_huge_page = folio_nr_pages(dst); 6383 unsigned long addr = addr_hint & 6384 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1); 6385 struct copy_subpage_arg arg = { 6386 .dst = &dst->page, 6387 .src = &src->page, 6388 .vma = vma, 6389 }; 6390 6391 if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) 6392 return copy_user_gigantic_page(dst, src, addr, vma, 6393 pages_per_huge_page); 6394 6395 return process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg); 6396 } 6397 6398 long copy_folio_from_user(struct folio *dst_folio, 6399 const void __user *usr_src, 6400 bool allow_pagefault) 6401 { 6402 void *kaddr; 6403 unsigned long i, rc = 0; 6404 unsigned int nr_pages = folio_nr_pages(dst_folio); 6405 unsigned long ret_val = nr_pages * PAGE_SIZE; 6406 struct page *subpage; 6407 6408 for (i = 0; i < nr_pages; i++) { 6409 subpage = folio_page(dst_folio, i); 6410 kaddr = kmap_local_page(subpage); 6411 if (!allow_pagefault) 6412 pagefault_disable(); 6413 rc = copy_from_user(kaddr, usr_src + i * PAGE_SIZE, PAGE_SIZE); 6414 if (!allow_pagefault) 6415 pagefault_enable(); 6416 kunmap_local(kaddr); 6417 6418 ret_val -= (PAGE_SIZE - rc); 6419 if (rc) 6420 break; 6421 6422 flush_dcache_page(subpage); 6423 6424 cond_resched(); 6425 } 6426 return ret_val; 6427 } 6428 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ 6429 6430 #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS 6431 6432 static struct kmem_cache *page_ptl_cachep; 6433 6434 void __init ptlock_cache_init(void) 6435 { 6436 page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0, 6437 SLAB_PANIC, NULL); 6438 } 6439 6440 bool ptlock_alloc(struct ptdesc *ptdesc) 6441 { 6442 spinlock_t *ptl; 6443 6444 ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL); 6445 if (!ptl) 6446 return false; 6447 ptdesc->ptl = ptl; 6448 return true; 6449 } 6450 6451 void ptlock_free(struct ptdesc *ptdesc) 6452 { 6453 kmem_cache_free(page_ptl_cachep, ptdesc->ptl); 6454 } 6455 #endif 6456 6457 void vma_pgtable_walk_begin(struct vm_area_struct *vma) 6458 { 6459 if (is_vm_hugetlb_page(vma)) 6460 hugetlb_vma_lock_read(vma); 6461 } 6462 6463 void vma_pgtable_walk_end(struct vm_area_struct *vma) 6464 { 6465 if (is_vm_hugetlb_page(vma)) 6466 hugetlb_vma_unlock_read(vma); 6467 } 6468