1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/memory.c 4 * 5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 6 */ 7 8 /* 9 * demand-loading started 01.12.91 - seems it is high on the list of 10 * things wanted, and it should be easy to implement. - Linus 11 */ 12 13 /* 14 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared 15 * pages started 02.12.91, seems to work. - Linus. 16 * 17 * Tested sharing by executing about 30 /bin/sh: under the old kernel it 18 * would have taken more than the 6M I have free, but it worked well as 19 * far as I could see. 20 * 21 * Also corrected some "invalidate()"s - I wasn't doing enough of them. 22 */ 23 24 /* 25 * Real VM (paging to/from disk) started 18.12.91. Much more work and 26 * thought has to go into this. Oh, well.. 27 * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why. 28 * Found it. Everything seems to work now. 29 * 20.12.91 - Ok, making the swap-device changeable like the root. 30 */ 31 32 /* 33 * 05.04.94 - Multi-page memory management added for v1.1. 34 * Idea by Alex Bligh (alex@cconcepts.co.uk) 35 * 36 * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG 37 * (Gerhard.Wichert@pdb.siemens.de) 38 * 39 * Aug/Sep 2004 Changed to four level page tables (Andi Kleen) 40 */ 41 42 #include <linux/kernel_stat.h> 43 #include <linux/mm.h> 44 #include <linux/mm_inline.h> 45 #include <linux/sched/mm.h> 46 #include <linux/sched/numa_balancing.h> 47 #include <linux/sched/task.h> 48 #include <linux/hugetlb.h> 49 #include <linux/mman.h> 50 #include <linux/swap.h> 51 #include <linux/highmem.h> 52 #include <linux/pagemap.h> 53 #include <linux/memremap.h> 54 #include <linux/kmsan.h> 55 #include <linux/ksm.h> 56 #include <linux/rmap.h> 57 #include <linux/export.h> 58 #include <linux/delayacct.h> 59 #include <linux/init.h> 60 #include <linux/pfn_t.h> 61 #include <linux/writeback.h> 62 #include <linux/memcontrol.h> 63 #include <linux/mmu_notifier.h> 64 #include <linux/swapops.h> 65 #include <linux/elf.h> 66 #include <linux/gfp.h> 67 #include <linux/migrate.h> 68 #include <linux/string.h> 69 #include <linux/memory-tiers.h> 70 #include <linux/debugfs.h> 71 #include <linux/userfaultfd_k.h> 72 #include <linux/dax.h> 73 #include <linux/oom.h> 74 #include <linux/numa.h> 75 #include <linux/perf_event.h> 76 #include <linux/ptrace.h> 77 #include <linux/vmalloc.h> 78 #include <linux/sched/sysctl.h> 79 80 #include <trace/events/kmem.h> 81 82 #include <asm/io.h> 83 #include <asm/mmu_context.h> 84 #include <asm/pgalloc.h> 85 #include <linux/uaccess.h> 86 #include <asm/tlb.h> 87 #include <asm/tlbflush.h> 88 89 #include "pgalloc-track.h" 90 #include "internal.h" 91 #include "swap.h" 92 93 #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST) 94 #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid. 95 #endif 96 97 #ifndef CONFIG_NUMA 98 unsigned long max_mapnr; 99 EXPORT_SYMBOL(max_mapnr); 100 101 struct page *mem_map; 102 EXPORT_SYMBOL(mem_map); 103 #endif 104 105 static vm_fault_t do_fault(struct vm_fault *vmf); 106 static vm_fault_t do_anonymous_page(struct vm_fault *vmf); 107 static bool vmf_pte_changed(struct vm_fault *vmf); 108 109 /* 110 * Return true if the original pte was a uffd-wp pte marker (so the pte was 111 * wr-protected). 112 */ 113 static __always_inline bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf) 114 { 115 if (!userfaultfd_wp(vmf->vma)) 116 return false; 117 if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)) 118 return false; 119 120 return pte_marker_uffd_wp(vmf->orig_pte); 121 } 122 123 /* 124 * A number of key systems in x86 including ioremap() rely on the assumption 125 * that high_memory defines the upper bound on direct map memory, then end 126 * of ZONE_NORMAL. 127 */ 128 void *high_memory; 129 EXPORT_SYMBOL(high_memory); 130 131 /* 132 * Randomize the address space (stacks, mmaps, brk, etc.). 133 * 134 * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization, 135 * as ancient (libc5 based) binaries can segfault. ) 136 */ 137 int randomize_va_space __read_mostly = 138 #ifdef CONFIG_COMPAT_BRK 139 1; 140 #else 141 2; 142 #endif 143 144 #ifndef arch_wants_old_prefaulted_pte 145 static inline bool arch_wants_old_prefaulted_pte(void) 146 { 147 /* 148 * Transitioning a PTE from 'old' to 'young' can be expensive on 149 * some architectures, even if it's performed in hardware. By 150 * default, "false" means prefaulted entries will be 'young'. 151 */ 152 return false; 153 } 154 #endif 155 156 static int __init disable_randmaps(char *s) 157 { 158 randomize_va_space = 0; 159 return 1; 160 } 161 __setup("norandmaps", disable_randmaps); 162 163 unsigned long zero_pfn __read_mostly; 164 EXPORT_SYMBOL(zero_pfn); 165 166 unsigned long highest_memmap_pfn __read_mostly; 167 168 /* 169 * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() 170 */ 171 static int __init init_zero_pfn(void) 172 { 173 zero_pfn = page_to_pfn(ZERO_PAGE(0)); 174 return 0; 175 } 176 early_initcall(init_zero_pfn); 177 178 void mm_trace_rss_stat(struct mm_struct *mm, int member) 179 { 180 trace_rss_stat(mm, member); 181 } 182 183 /* 184 * Note: this doesn't free the actual pages themselves. That 185 * has been handled earlier when unmapping all the memory regions. 186 */ 187 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, 188 unsigned long addr) 189 { 190 pgtable_t token = pmd_pgtable(*pmd); 191 pmd_clear(pmd); 192 pte_free_tlb(tlb, token, addr); 193 mm_dec_nr_ptes(tlb->mm); 194 } 195 196 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, 197 unsigned long addr, unsigned long end, 198 unsigned long floor, unsigned long ceiling) 199 { 200 pmd_t *pmd; 201 unsigned long next; 202 unsigned long start; 203 204 start = addr; 205 pmd = pmd_offset(pud, addr); 206 do { 207 next = pmd_addr_end(addr, end); 208 if (pmd_none_or_clear_bad(pmd)) 209 continue; 210 free_pte_range(tlb, pmd, addr); 211 } while (pmd++, addr = next, addr != end); 212 213 start &= PUD_MASK; 214 if (start < floor) 215 return; 216 if (ceiling) { 217 ceiling &= PUD_MASK; 218 if (!ceiling) 219 return; 220 } 221 if (end - 1 > ceiling - 1) 222 return; 223 224 pmd = pmd_offset(pud, start); 225 pud_clear(pud); 226 pmd_free_tlb(tlb, pmd, start); 227 mm_dec_nr_pmds(tlb->mm); 228 } 229 230 static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d, 231 unsigned long addr, unsigned long end, 232 unsigned long floor, unsigned long ceiling) 233 { 234 pud_t *pud; 235 unsigned long next; 236 unsigned long start; 237 238 start = addr; 239 pud = pud_offset(p4d, addr); 240 do { 241 next = pud_addr_end(addr, end); 242 if (pud_none_or_clear_bad(pud)) 243 continue; 244 free_pmd_range(tlb, pud, addr, next, floor, ceiling); 245 } while (pud++, addr = next, addr != end); 246 247 start &= P4D_MASK; 248 if (start < floor) 249 return; 250 if (ceiling) { 251 ceiling &= P4D_MASK; 252 if (!ceiling) 253 return; 254 } 255 if (end - 1 > ceiling - 1) 256 return; 257 258 pud = pud_offset(p4d, start); 259 p4d_clear(p4d); 260 pud_free_tlb(tlb, pud, start); 261 mm_dec_nr_puds(tlb->mm); 262 } 263 264 static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd, 265 unsigned long addr, unsigned long end, 266 unsigned long floor, unsigned long ceiling) 267 { 268 p4d_t *p4d; 269 unsigned long next; 270 unsigned long start; 271 272 start = addr; 273 p4d = p4d_offset(pgd, addr); 274 do { 275 next = p4d_addr_end(addr, end); 276 if (p4d_none_or_clear_bad(p4d)) 277 continue; 278 free_pud_range(tlb, p4d, addr, next, floor, ceiling); 279 } while (p4d++, addr = next, addr != end); 280 281 start &= PGDIR_MASK; 282 if (start < floor) 283 return; 284 if (ceiling) { 285 ceiling &= PGDIR_MASK; 286 if (!ceiling) 287 return; 288 } 289 if (end - 1 > ceiling - 1) 290 return; 291 292 p4d = p4d_offset(pgd, start); 293 pgd_clear(pgd); 294 p4d_free_tlb(tlb, p4d, start); 295 } 296 297 /* 298 * This function frees user-level page tables of a process. 299 */ 300 void free_pgd_range(struct mmu_gather *tlb, 301 unsigned long addr, unsigned long end, 302 unsigned long floor, unsigned long ceiling) 303 { 304 pgd_t *pgd; 305 unsigned long next; 306 307 /* 308 * The next few lines have given us lots of grief... 309 * 310 * Why are we testing PMD* at this top level? Because often 311 * there will be no work to do at all, and we'd prefer not to 312 * go all the way down to the bottom just to discover that. 313 * 314 * Why all these "- 1"s? Because 0 represents both the bottom 315 * of the address space and the top of it (using -1 for the 316 * top wouldn't help much: the masks would do the wrong thing). 317 * The rule is that addr 0 and floor 0 refer to the bottom of 318 * the address space, but end 0 and ceiling 0 refer to the top 319 * Comparisons need to use "end - 1" and "ceiling - 1" (though 320 * that end 0 case should be mythical). 321 * 322 * Wherever addr is brought up or ceiling brought down, we must 323 * be careful to reject "the opposite 0" before it confuses the 324 * subsequent tests. But what about where end is brought down 325 * by PMD_SIZE below? no, end can't go down to 0 there. 326 * 327 * Whereas we round start (addr) and ceiling down, by different 328 * masks at different levels, in order to test whether a table 329 * now has no other vmas using it, so can be freed, we don't 330 * bother to round floor or end up - the tests don't need that. 331 */ 332 333 addr &= PMD_MASK; 334 if (addr < floor) { 335 addr += PMD_SIZE; 336 if (!addr) 337 return; 338 } 339 if (ceiling) { 340 ceiling &= PMD_MASK; 341 if (!ceiling) 342 return; 343 } 344 if (end - 1 > ceiling - 1) 345 end -= PMD_SIZE; 346 if (addr > end - 1) 347 return; 348 /* 349 * We add page table cache pages with PAGE_SIZE, 350 * (see pte_free_tlb()), flush the tlb if we need 351 */ 352 tlb_change_page_size(tlb, PAGE_SIZE); 353 pgd = pgd_offset(tlb->mm, addr); 354 do { 355 next = pgd_addr_end(addr, end); 356 if (pgd_none_or_clear_bad(pgd)) 357 continue; 358 free_p4d_range(tlb, pgd, addr, next, floor, ceiling); 359 } while (pgd++, addr = next, addr != end); 360 } 361 362 void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, 363 struct vm_area_struct *vma, unsigned long floor, 364 unsigned long ceiling, bool mm_wr_locked) 365 { 366 struct unlink_vma_file_batch vb; 367 368 do { 369 unsigned long addr = vma->vm_start; 370 struct vm_area_struct *next; 371 372 /* 373 * Note: USER_PGTABLES_CEILING may be passed as ceiling and may 374 * be 0. This will underflow and is okay. 375 */ 376 next = mas_find(mas, ceiling - 1); 377 if (unlikely(xa_is_zero(next))) 378 next = NULL; 379 380 /* 381 * Hide vma from rmap and truncate_pagecache before freeing 382 * pgtables 383 */ 384 if (mm_wr_locked) 385 vma_start_write(vma); 386 unlink_anon_vmas(vma); 387 388 if (is_vm_hugetlb_page(vma)) { 389 unlink_file_vma(vma); 390 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, 391 floor, next ? next->vm_start : ceiling); 392 } else { 393 unlink_file_vma_batch_init(&vb); 394 unlink_file_vma_batch_add(&vb, vma); 395 396 /* 397 * Optimization: gather nearby vmas into one call down 398 */ 399 while (next && next->vm_start <= vma->vm_end + PMD_SIZE 400 && !is_vm_hugetlb_page(next)) { 401 vma = next; 402 next = mas_find(mas, ceiling - 1); 403 if (unlikely(xa_is_zero(next))) 404 next = NULL; 405 if (mm_wr_locked) 406 vma_start_write(vma); 407 unlink_anon_vmas(vma); 408 unlink_file_vma_batch_add(&vb, vma); 409 } 410 unlink_file_vma_batch_final(&vb); 411 free_pgd_range(tlb, addr, vma->vm_end, 412 floor, next ? next->vm_start : ceiling); 413 } 414 vma = next; 415 } while (vma); 416 } 417 418 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte) 419 { 420 spinlock_t *ptl = pmd_lock(mm, pmd); 421 422 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ 423 mm_inc_nr_ptes(mm); 424 /* 425 * Ensure all pte setup (eg. pte page lock and page clearing) are 426 * visible before the pte is made visible to other CPUs by being 427 * put into page tables. 428 * 429 * The other side of the story is the pointer chasing in the page 430 * table walking code (when walking the page table without locking; 431 * ie. most of the time). Fortunately, these data accesses consist 432 * of a chain of data-dependent loads, meaning most CPUs (alpha 433 * being the notable exception) will already guarantee loads are 434 * seen in-order. See the alpha page table accessors for the 435 * smp_rmb() barriers in page table walking code. 436 */ 437 smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ 438 pmd_populate(mm, pmd, *pte); 439 *pte = NULL; 440 } 441 spin_unlock(ptl); 442 } 443 444 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd) 445 { 446 pgtable_t new = pte_alloc_one(mm); 447 if (!new) 448 return -ENOMEM; 449 450 pmd_install(mm, pmd, &new); 451 if (new) 452 pte_free(mm, new); 453 return 0; 454 } 455 456 int __pte_alloc_kernel(pmd_t *pmd) 457 { 458 pte_t *new = pte_alloc_one_kernel(&init_mm); 459 if (!new) 460 return -ENOMEM; 461 462 spin_lock(&init_mm.page_table_lock); 463 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ 464 smp_wmb(); /* See comment in pmd_install() */ 465 pmd_populate_kernel(&init_mm, pmd, new); 466 new = NULL; 467 } 468 spin_unlock(&init_mm.page_table_lock); 469 if (new) 470 pte_free_kernel(&init_mm, new); 471 return 0; 472 } 473 474 static inline void init_rss_vec(int *rss) 475 { 476 memset(rss, 0, sizeof(int) * NR_MM_COUNTERS); 477 } 478 479 static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss) 480 { 481 int i; 482 483 for (i = 0; i < NR_MM_COUNTERS; i++) 484 if (rss[i]) 485 add_mm_counter(mm, i, rss[i]); 486 } 487 488 /* 489 * This function is called to print an error when a bad pte 490 * is found. For example, we might have a PFN-mapped pte in 491 * a region that doesn't allow it. 492 * 493 * The calling function must still handle the error. 494 */ 495 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, 496 pte_t pte, struct page *page) 497 { 498 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); 499 p4d_t *p4d = p4d_offset(pgd, addr); 500 pud_t *pud = pud_offset(p4d, addr); 501 pmd_t *pmd = pmd_offset(pud, addr); 502 struct address_space *mapping; 503 pgoff_t index; 504 static unsigned long resume; 505 static unsigned long nr_shown; 506 static unsigned long nr_unshown; 507 508 /* 509 * Allow a burst of 60 reports, then keep quiet for that minute; 510 * or allow a steady drip of one report per second. 511 */ 512 if (nr_shown == 60) { 513 if (time_before(jiffies, resume)) { 514 nr_unshown++; 515 return; 516 } 517 if (nr_unshown) { 518 pr_alert("BUG: Bad page map: %lu messages suppressed\n", 519 nr_unshown); 520 nr_unshown = 0; 521 } 522 nr_shown = 0; 523 } 524 if (nr_shown++ == 0) 525 resume = jiffies + 60 * HZ; 526 527 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; 528 index = linear_page_index(vma, addr); 529 530 pr_alert("BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n", 531 current->comm, 532 (long long)pte_val(pte), (long long)pmd_val(*pmd)); 533 if (page) 534 dump_page(page, "bad pte"); 535 pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n", 536 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); 537 pr_alert("file:%pD fault:%ps mmap:%ps read_folio:%ps\n", 538 vma->vm_file, 539 vma->vm_ops ? vma->vm_ops->fault : NULL, 540 vma->vm_file ? vma->vm_file->f_op->mmap : NULL, 541 mapping ? mapping->a_ops->read_folio : NULL); 542 dump_stack(); 543 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 544 } 545 546 /* 547 * vm_normal_page -- This function gets the "struct page" associated with a pte. 548 * 549 * "Special" mappings do not wish to be associated with a "struct page" (either 550 * it doesn't exist, or it exists but they don't want to touch it). In this 551 * case, NULL is returned here. "Normal" mappings do have a struct page. 552 * 553 * There are 2 broad cases. Firstly, an architecture may define a pte_special() 554 * pte bit, in which case this function is trivial. Secondly, an architecture 555 * may not have a spare pte bit, which requires a more complicated scheme, 556 * described below. 557 * 558 * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a 559 * special mapping (even if there are underlying and valid "struct pages"). 560 * COWed pages of a VM_PFNMAP are always normal. 561 * 562 * The way we recognize COWed pages within VM_PFNMAP mappings is through the 563 * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit 564 * set, and the vm_pgoff will point to the first PFN mapped: thus every special 565 * mapping will always honor the rule 566 * 567 * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT) 568 * 569 * And for normal mappings this is false. 570 * 571 * This restricts such mappings to be a linear translation from virtual address 572 * to pfn. To get around this restriction, we allow arbitrary mappings so long 573 * as the vma is not a COW mapping; in that case, we know that all ptes are 574 * special (because none can have been COWed). 575 * 576 * 577 * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP. 578 * 579 * VM_MIXEDMAP mappings can likewise contain memory with or without "struct 580 * page" backing, however the difference is that _all_ pages with a struct 581 * page (that is, those where pfn_valid is true) are refcounted and considered 582 * normal pages by the VM. The only exception are zeropages, which are 583 * *never* refcounted. 584 * 585 * The disadvantage is that pages are refcounted (which can be slower and 586 * simply not an option for some PFNMAP users). The advantage is that we 587 * don't have to follow the strict linearity rule of PFNMAP mappings in 588 * order to support COWable mappings. 589 * 590 */ 591 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 592 pte_t pte) 593 { 594 unsigned long pfn = pte_pfn(pte); 595 596 if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) { 597 if (likely(!pte_special(pte))) 598 goto check_pfn; 599 if (vma->vm_ops && vma->vm_ops->find_special_page) 600 return vma->vm_ops->find_special_page(vma, addr); 601 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) 602 return NULL; 603 if (is_zero_pfn(pfn)) 604 return NULL; 605 if (pte_devmap(pte)) 606 /* 607 * NOTE: New users of ZONE_DEVICE will not set pte_devmap() 608 * and will have refcounts incremented on their struct pages 609 * when they are inserted into PTEs, thus they are safe to 610 * return here. Legacy ZONE_DEVICE pages that set pte_devmap() 611 * do not have refcounts. Example of legacy ZONE_DEVICE is 612 * MEMORY_DEVICE_FS_DAX type in pmem or virtio_fs drivers. 613 */ 614 return NULL; 615 616 print_bad_pte(vma, addr, pte, NULL); 617 return NULL; 618 } 619 620 /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */ 621 622 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { 623 if (vma->vm_flags & VM_MIXEDMAP) { 624 if (!pfn_valid(pfn)) 625 return NULL; 626 if (is_zero_pfn(pfn)) 627 return NULL; 628 goto out; 629 } else { 630 unsigned long off; 631 off = (addr - vma->vm_start) >> PAGE_SHIFT; 632 if (pfn == vma->vm_pgoff + off) 633 return NULL; 634 if (!is_cow_mapping(vma->vm_flags)) 635 return NULL; 636 } 637 } 638 639 if (is_zero_pfn(pfn)) 640 return NULL; 641 642 check_pfn: 643 if (unlikely(pfn > highest_memmap_pfn)) { 644 print_bad_pte(vma, addr, pte, NULL); 645 return NULL; 646 } 647 648 /* 649 * NOTE! We still have PageReserved() pages in the page tables. 650 * eg. VDSO mappings can cause them to exist. 651 */ 652 out: 653 VM_WARN_ON_ONCE(is_zero_pfn(pfn)); 654 return pfn_to_page(pfn); 655 } 656 657 struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr, 658 pte_t pte) 659 { 660 struct page *page = vm_normal_page(vma, addr, pte); 661 662 if (page) 663 return page_folio(page); 664 return NULL; 665 } 666 667 #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES 668 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, 669 pmd_t pmd) 670 { 671 unsigned long pfn = pmd_pfn(pmd); 672 673 /* Currently it's only used for huge pfnmaps */ 674 if (unlikely(pmd_special(pmd))) 675 return NULL; 676 677 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { 678 if (vma->vm_flags & VM_MIXEDMAP) { 679 if (!pfn_valid(pfn)) 680 return NULL; 681 goto out; 682 } else { 683 unsigned long off; 684 off = (addr - vma->vm_start) >> PAGE_SHIFT; 685 if (pfn == vma->vm_pgoff + off) 686 return NULL; 687 if (!is_cow_mapping(vma->vm_flags)) 688 return NULL; 689 } 690 } 691 692 if (pmd_devmap(pmd)) 693 return NULL; 694 if (is_huge_zero_pmd(pmd)) 695 return NULL; 696 if (unlikely(pfn > highest_memmap_pfn)) 697 return NULL; 698 699 /* 700 * NOTE! We still have PageReserved() pages in the page tables. 701 * eg. VDSO mappings can cause them to exist. 702 */ 703 out: 704 return pfn_to_page(pfn); 705 } 706 707 struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma, 708 unsigned long addr, pmd_t pmd) 709 { 710 struct page *page = vm_normal_page_pmd(vma, addr, pmd); 711 712 if (page) 713 return page_folio(page); 714 return NULL; 715 } 716 #endif 717 718 static void restore_exclusive_pte(struct vm_area_struct *vma, 719 struct page *page, unsigned long address, 720 pte_t *ptep) 721 { 722 struct folio *folio = page_folio(page); 723 pte_t orig_pte; 724 pte_t pte; 725 swp_entry_t entry; 726 727 orig_pte = ptep_get(ptep); 728 pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot))); 729 if (pte_swp_soft_dirty(orig_pte)) 730 pte = pte_mksoft_dirty(pte); 731 732 entry = pte_to_swp_entry(orig_pte); 733 if (pte_swp_uffd_wp(orig_pte)) 734 pte = pte_mkuffd_wp(pte); 735 else if (is_writable_device_exclusive_entry(entry)) 736 pte = maybe_mkwrite(pte_mkdirty(pte), vma); 737 738 VM_BUG_ON_FOLIO(pte_write(pte) && (!folio_test_anon(folio) && 739 PageAnonExclusive(page)), folio); 740 741 /* 742 * No need to take a page reference as one was already 743 * created when the swap entry was made. 744 */ 745 if (folio_test_anon(folio)) 746 folio_add_anon_rmap_pte(folio, page, vma, address, RMAP_NONE); 747 else 748 /* 749 * Currently device exclusive access only supports anonymous 750 * memory so the entry shouldn't point to a filebacked page. 751 */ 752 WARN_ON_ONCE(1); 753 754 set_pte_at(vma->vm_mm, address, ptep, pte); 755 756 /* 757 * No need to invalidate - it was non-present before. However 758 * secondary CPUs may have mappings that need invalidating. 759 */ 760 update_mmu_cache(vma, address, ptep); 761 } 762 763 /* 764 * Tries to restore an exclusive pte if the page lock can be acquired without 765 * sleeping. 766 */ 767 static int 768 try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma, 769 unsigned long addr) 770 { 771 swp_entry_t entry = pte_to_swp_entry(ptep_get(src_pte)); 772 struct page *page = pfn_swap_entry_to_page(entry); 773 774 if (trylock_page(page)) { 775 restore_exclusive_pte(vma, page, addr, src_pte); 776 unlock_page(page); 777 return 0; 778 } 779 780 return -EBUSY; 781 } 782 783 /* 784 * copy one vm_area from one task to the other. Assumes the page tables 785 * already present in the new task to be cleared in the whole range 786 * covered by this vma. 787 */ 788 789 static unsigned long 790 copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, 791 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma, 792 struct vm_area_struct *src_vma, unsigned long addr, int *rss) 793 { 794 unsigned long vm_flags = dst_vma->vm_flags; 795 pte_t orig_pte = ptep_get(src_pte); 796 pte_t pte = orig_pte; 797 struct folio *folio; 798 struct page *page; 799 swp_entry_t entry = pte_to_swp_entry(orig_pte); 800 801 if (likely(!non_swap_entry(entry))) { 802 if (swap_duplicate(entry) < 0) 803 return -EIO; 804 805 /* make sure dst_mm is on swapoff's mmlist. */ 806 if (unlikely(list_empty(&dst_mm->mmlist))) { 807 spin_lock(&mmlist_lock); 808 if (list_empty(&dst_mm->mmlist)) 809 list_add(&dst_mm->mmlist, 810 &src_mm->mmlist); 811 spin_unlock(&mmlist_lock); 812 } 813 /* Mark the swap entry as shared. */ 814 if (pte_swp_exclusive(orig_pte)) { 815 pte = pte_swp_clear_exclusive(orig_pte); 816 set_pte_at(src_mm, addr, src_pte, pte); 817 } 818 rss[MM_SWAPENTS]++; 819 } else if (is_migration_entry(entry)) { 820 folio = pfn_swap_entry_folio(entry); 821 822 rss[mm_counter(folio)]++; 823 824 if (!is_readable_migration_entry(entry) && 825 is_cow_mapping(vm_flags)) { 826 /* 827 * COW mappings require pages in both parent and child 828 * to be set to read. A previously exclusive entry is 829 * now shared. 830 */ 831 entry = make_readable_migration_entry( 832 swp_offset(entry)); 833 pte = swp_entry_to_pte(entry); 834 if (pte_swp_soft_dirty(orig_pte)) 835 pte = pte_swp_mksoft_dirty(pte); 836 if (pte_swp_uffd_wp(orig_pte)) 837 pte = pte_swp_mkuffd_wp(pte); 838 set_pte_at(src_mm, addr, src_pte, pte); 839 } 840 } else if (is_device_private_entry(entry)) { 841 page = pfn_swap_entry_to_page(entry); 842 folio = page_folio(page); 843 844 /* 845 * Update rss count even for unaddressable pages, as 846 * they should treated just like normal pages in this 847 * respect. 848 * 849 * We will likely want to have some new rss counters 850 * for unaddressable pages, at some point. But for now 851 * keep things as they are. 852 */ 853 folio_get(folio); 854 rss[mm_counter(folio)]++; 855 /* Cannot fail as these pages cannot get pinned. */ 856 folio_try_dup_anon_rmap_pte(folio, page, src_vma); 857 858 /* 859 * We do not preserve soft-dirty information, because so 860 * far, checkpoint/restore is the only feature that 861 * requires that. And checkpoint/restore does not work 862 * when a device driver is involved (you cannot easily 863 * save and restore device driver state). 864 */ 865 if (is_writable_device_private_entry(entry) && 866 is_cow_mapping(vm_flags)) { 867 entry = make_readable_device_private_entry( 868 swp_offset(entry)); 869 pte = swp_entry_to_pte(entry); 870 if (pte_swp_uffd_wp(orig_pte)) 871 pte = pte_swp_mkuffd_wp(pte); 872 set_pte_at(src_mm, addr, src_pte, pte); 873 } 874 } else if (is_device_exclusive_entry(entry)) { 875 /* 876 * Make device exclusive entries present by restoring the 877 * original entry then copying as for a present pte. Device 878 * exclusive entries currently only support private writable 879 * (ie. COW) mappings. 880 */ 881 VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags)); 882 if (try_restore_exclusive_pte(src_pte, src_vma, addr)) 883 return -EBUSY; 884 return -ENOENT; 885 } else if (is_pte_marker_entry(entry)) { 886 pte_marker marker = copy_pte_marker(entry, dst_vma); 887 888 if (marker) 889 set_pte_at(dst_mm, addr, dst_pte, 890 make_pte_marker(marker)); 891 return 0; 892 } 893 if (!userfaultfd_wp(dst_vma)) 894 pte = pte_swp_clear_uffd_wp(pte); 895 set_pte_at(dst_mm, addr, dst_pte, pte); 896 return 0; 897 } 898 899 /* 900 * Copy a present and normal page. 901 * 902 * NOTE! The usual case is that this isn't required; 903 * instead, the caller can just increase the page refcount 904 * and re-use the pte the traditional way. 905 * 906 * And if we need a pre-allocated page but don't yet have 907 * one, return a negative error to let the preallocation 908 * code know so that it can do so outside the page table 909 * lock. 910 */ 911 static inline int 912 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 913 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss, 914 struct folio **prealloc, struct page *page) 915 { 916 struct folio *new_folio; 917 pte_t pte; 918 919 new_folio = *prealloc; 920 if (!new_folio) 921 return -EAGAIN; 922 923 /* 924 * We have a prealloc page, all good! Take it 925 * over and copy the page & arm it. 926 */ 927 928 if (copy_mc_user_highpage(&new_folio->page, page, addr, src_vma)) 929 return -EHWPOISON; 930 931 *prealloc = NULL; 932 __folio_mark_uptodate(new_folio); 933 folio_add_new_anon_rmap(new_folio, dst_vma, addr, RMAP_EXCLUSIVE); 934 folio_add_lru_vma(new_folio, dst_vma); 935 rss[MM_ANONPAGES]++; 936 937 /* All done, just insert the new page copy in the child */ 938 pte = mk_pte(&new_folio->page, dst_vma->vm_page_prot); 939 pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma); 940 if (userfaultfd_pte_wp(dst_vma, ptep_get(src_pte))) 941 /* Uffd-wp needs to be delivered to dest pte as well */ 942 pte = pte_mkuffd_wp(pte); 943 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); 944 return 0; 945 } 946 947 static __always_inline void __copy_present_ptes(struct vm_area_struct *dst_vma, 948 struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte, 949 pte_t pte, unsigned long addr, int nr) 950 { 951 struct mm_struct *src_mm = src_vma->vm_mm; 952 953 /* If it's a COW mapping, write protect it both processes. */ 954 if (is_cow_mapping(src_vma->vm_flags) && pte_write(pte)) { 955 wrprotect_ptes(src_mm, addr, src_pte, nr); 956 pte = pte_wrprotect(pte); 957 } 958 959 /* If it's a shared mapping, mark it clean in the child. */ 960 if (src_vma->vm_flags & VM_SHARED) 961 pte = pte_mkclean(pte); 962 pte = pte_mkold(pte); 963 964 if (!userfaultfd_wp(dst_vma)) 965 pte = pte_clear_uffd_wp(pte); 966 967 set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr); 968 } 969 970 /* 971 * Copy one present PTE, trying to batch-process subsequent PTEs that map 972 * consecutive pages of the same folio by copying them as well. 973 * 974 * Returns -EAGAIN if one preallocated page is required to copy the next PTE. 975 * Otherwise, returns the number of copied PTEs (at least 1). 976 */ 977 static inline int 978 copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 979 pte_t *dst_pte, pte_t *src_pte, pte_t pte, unsigned long addr, 980 int max_nr, int *rss, struct folio **prealloc) 981 { 982 struct page *page; 983 struct folio *folio; 984 bool any_writable; 985 fpb_t flags = 0; 986 int err, nr; 987 988 page = vm_normal_page(src_vma, addr, pte); 989 if (unlikely(!page)) 990 goto copy_pte; 991 992 folio = page_folio(page); 993 994 /* 995 * If we likely have to copy, just don't bother with batching. Make 996 * sure that the common "small folio" case is as fast as possible 997 * by keeping the batching logic separate. 998 */ 999 if (unlikely(!*prealloc && folio_test_large(folio) && max_nr != 1)) { 1000 if (src_vma->vm_flags & VM_SHARED) 1001 flags |= FPB_IGNORE_DIRTY; 1002 if (!vma_soft_dirty_enabled(src_vma)) 1003 flags |= FPB_IGNORE_SOFT_DIRTY; 1004 1005 nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr, flags, 1006 &any_writable, NULL, NULL); 1007 folio_ref_add(folio, nr); 1008 if (folio_test_anon(folio)) { 1009 if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page, 1010 nr, src_vma))) { 1011 folio_ref_sub(folio, nr); 1012 return -EAGAIN; 1013 } 1014 rss[MM_ANONPAGES] += nr; 1015 VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio); 1016 } else { 1017 folio_dup_file_rmap_ptes(folio, page, nr); 1018 rss[mm_counter_file(folio)] += nr; 1019 } 1020 if (any_writable) 1021 pte = pte_mkwrite(pte, src_vma); 1022 __copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte, 1023 addr, nr); 1024 return nr; 1025 } 1026 1027 folio_get(folio); 1028 if (folio_test_anon(folio)) { 1029 /* 1030 * If this page may have been pinned by the parent process, 1031 * copy the page immediately for the child so that we'll always 1032 * guarantee the pinned page won't be randomly replaced in the 1033 * future. 1034 */ 1035 if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, src_vma))) { 1036 /* Page may be pinned, we have to copy. */ 1037 folio_put(folio); 1038 err = copy_present_page(dst_vma, src_vma, dst_pte, src_pte, 1039 addr, rss, prealloc, page); 1040 return err ? err : 1; 1041 } 1042 rss[MM_ANONPAGES]++; 1043 VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio); 1044 } else { 1045 folio_dup_file_rmap_pte(folio, page); 1046 rss[mm_counter_file(folio)]++; 1047 } 1048 1049 copy_pte: 1050 __copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte, addr, 1); 1051 return 1; 1052 } 1053 1054 static inline struct folio *folio_prealloc(struct mm_struct *src_mm, 1055 struct vm_area_struct *vma, unsigned long addr, bool need_zero) 1056 { 1057 struct folio *new_folio; 1058 1059 if (need_zero) 1060 new_folio = vma_alloc_zeroed_movable_folio(vma, addr); 1061 else 1062 new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr); 1063 1064 if (!new_folio) 1065 return NULL; 1066 1067 if (mem_cgroup_charge(new_folio, src_mm, GFP_KERNEL)) { 1068 folio_put(new_folio); 1069 return NULL; 1070 } 1071 folio_throttle_swaprate(new_folio, GFP_KERNEL); 1072 1073 return new_folio; 1074 } 1075 1076 static int 1077 copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 1078 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 1079 unsigned long end) 1080 { 1081 struct mm_struct *dst_mm = dst_vma->vm_mm; 1082 struct mm_struct *src_mm = src_vma->vm_mm; 1083 pte_t *orig_src_pte, *orig_dst_pte; 1084 pte_t *src_pte, *dst_pte; 1085 pmd_t dummy_pmdval; 1086 pte_t ptent; 1087 spinlock_t *src_ptl, *dst_ptl; 1088 int progress, max_nr, ret = 0; 1089 int rss[NR_MM_COUNTERS]; 1090 swp_entry_t entry = (swp_entry_t){0}; 1091 struct folio *prealloc = NULL; 1092 int nr; 1093 1094 again: 1095 progress = 0; 1096 init_rss_vec(rss); 1097 1098 /* 1099 * copy_pmd_range()'s prior pmd_none_or_clear_bad(src_pmd), and the 1100 * error handling here, assume that exclusive mmap_lock on dst and src 1101 * protects anon from unexpected THP transitions; with shmem and file 1102 * protected by mmap_lock-less collapse skipping areas with anon_vma 1103 * (whereas vma_needs_copy() skips areas without anon_vma). A rework 1104 * can remove such assumptions later, but this is good enough for now. 1105 */ 1106 dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); 1107 if (!dst_pte) { 1108 ret = -ENOMEM; 1109 goto out; 1110 } 1111 1112 /* 1113 * We already hold the exclusive mmap_lock, the copy_pte_range() and 1114 * retract_page_tables() are using vma->anon_vma to be exclusive, so 1115 * the PTE page is stable, and there is no need to get pmdval and do 1116 * pmd_same() check. 1117 */ 1118 src_pte = pte_offset_map_rw_nolock(src_mm, src_pmd, addr, &dummy_pmdval, 1119 &src_ptl); 1120 if (!src_pte) { 1121 pte_unmap_unlock(dst_pte, dst_ptl); 1122 /* ret == 0 */ 1123 goto out; 1124 } 1125 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 1126 orig_src_pte = src_pte; 1127 orig_dst_pte = dst_pte; 1128 arch_enter_lazy_mmu_mode(); 1129 1130 do { 1131 nr = 1; 1132 1133 /* 1134 * We are holding two locks at this point - either of them 1135 * could generate latencies in another task on another CPU. 1136 */ 1137 if (progress >= 32) { 1138 progress = 0; 1139 if (need_resched() || 1140 spin_needbreak(src_ptl) || spin_needbreak(dst_ptl)) 1141 break; 1142 } 1143 ptent = ptep_get(src_pte); 1144 if (pte_none(ptent)) { 1145 progress++; 1146 continue; 1147 } 1148 if (unlikely(!pte_present(ptent))) { 1149 ret = copy_nonpresent_pte(dst_mm, src_mm, 1150 dst_pte, src_pte, 1151 dst_vma, src_vma, 1152 addr, rss); 1153 if (ret == -EIO) { 1154 entry = pte_to_swp_entry(ptep_get(src_pte)); 1155 break; 1156 } else if (ret == -EBUSY) { 1157 break; 1158 } else if (!ret) { 1159 progress += 8; 1160 continue; 1161 } 1162 ptent = ptep_get(src_pte); 1163 VM_WARN_ON_ONCE(!pte_present(ptent)); 1164 1165 /* 1166 * Device exclusive entry restored, continue by copying 1167 * the now present pte. 1168 */ 1169 WARN_ON_ONCE(ret != -ENOENT); 1170 } 1171 /* copy_present_ptes() will clear `*prealloc' if consumed */ 1172 max_nr = (end - addr) / PAGE_SIZE; 1173 ret = copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, 1174 ptent, addr, max_nr, rss, &prealloc); 1175 /* 1176 * If we need a pre-allocated page for this pte, drop the 1177 * locks, allocate, and try again. 1178 * If copy failed due to hwpoison in source page, break out. 1179 */ 1180 if (unlikely(ret == -EAGAIN || ret == -EHWPOISON)) 1181 break; 1182 if (unlikely(prealloc)) { 1183 /* 1184 * pre-alloc page cannot be reused by next time so as 1185 * to strictly follow mempolicy (e.g., alloc_page_vma() 1186 * will allocate page according to address). This 1187 * could only happen if one pinned pte changed. 1188 */ 1189 folio_put(prealloc); 1190 prealloc = NULL; 1191 } 1192 nr = ret; 1193 progress += 8 * nr; 1194 } while (dst_pte += nr, src_pte += nr, addr += PAGE_SIZE * nr, 1195 addr != end); 1196 1197 arch_leave_lazy_mmu_mode(); 1198 pte_unmap_unlock(orig_src_pte, src_ptl); 1199 add_mm_rss_vec(dst_mm, rss); 1200 pte_unmap_unlock(orig_dst_pte, dst_ptl); 1201 cond_resched(); 1202 1203 if (ret == -EIO) { 1204 VM_WARN_ON_ONCE(!entry.val); 1205 if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) { 1206 ret = -ENOMEM; 1207 goto out; 1208 } 1209 entry.val = 0; 1210 } else if (ret == -EBUSY || unlikely(ret == -EHWPOISON)) { 1211 goto out; 1212 } else if (ret == -EAGAIN) { 1213 prealloc = folio_prealloc(src_mm, src_vma, addr, false); 1214 if (!prealloc) 1215 return -ENOMEM; 1216 } else if (ret < 0) { 1217 VM_WARN_ON_ONCE(1); 1218 } 1219 1220 /* We've captured and resolved the error. Reset, try again. */ 1221 ret = 0; 1222 1223 if (addr != end) 1224 goto again; 1225 out: 1226 if (unlikely(prealloc)) 1227 folio_put(prealloc); 1228 return ret; 1229 } 1230 1231 static inline int 1232 copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 1233 pud_t *dst_pud, pud_t *src_pud, unsigned long addr, 1234 unsigned long end) 1235 { 1236 struct mm_struct *dst_mm = dst_vma->vm_mm; 1237 struct mm_struct *src_mm = src_vma->vm_mm; 1238 pmd_t *src_pmd, *dst_pmd; 1239 unsigned long next; 1240 1241 dst_pmd = pmd_alloc(dst_mm, dst_pud, addr); 1242 if (!dst_pmd) 1243 return -ENOMEM; 1244 src_pmd = pmd_offset(src_pud, addr); 1245 do { 1246 next = pmd_addr_end(addr, end); 1247 if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd) 1248 || pmd_devmap(*src_pmd)) { 1249 int err; 1250 VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma); 1251 err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd, 1252 addr, dst_vma, src_vma); 1253 if (err == -ENOMEM) 1254 return -ENOMEM; 1255 if (!err) 1256 continue; 1257 /* fall through */ 1258 } 1259 if (pmd_none_or_clear_bad(src_pmd)) 1260 continue; 1261 if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd, 1262 addr, next)) 1263 return -ENOMEM; 1264 } while (dst_pmd++, src_pmd++, addr = next, addr != end); 1265 return 0; 1266 } 1267 1268 static inline int 1269 copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 1270 p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr, 1271 unsigned long end) 1272 { 1273 struct mm_struct *dst_mm = dst_vma->vm_mm; 1274 struct mm_struct *src_mm = src_vma->vm_mm; 1275 pud_t *src_pud, *dst_pud; 1276 unsigned long next; 1277 1278 dst_pud = pud_alloc(dst_mm, dst_p4d, addr); 1279 if (!dst_pud) 1280 return -ENOMEM; 1281 src_pud = pud_offset(src_p4d, addr); 1282 do { 1283 next = pud_addr_end(addr, end); 1284 if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) { 1285 int err; 1286 1287 VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma); 1288 err = copy_huge_pud(dst_mm, src_mm, 1289 dst_pud, src_pud, addr, src_vma); 1290 if (err == -ENOMEM) 1291 return -ENOMEM; 1292 if (!err) 1293 continue; 1294 /* fall through */ 1295 } 1296 if (pud_none_or_clear_bad(src_pud)) 1297 continue; 1298 if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud, 1299 addr, next)) 1300 return -ENOMEM; 1301 } while (dst_pud++, src_pud++, addr = next, addr != end); 1302 return 0; 1303 } 1304 1305 static inline int 1306 copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 1307 pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr, 1308 unsigned long end) 1309 { 1310 struct mm_struct *dst_mm = dst_vma->vm_mm; 1311 p4d_t *src_p4d, *dst_p4d; 1312 unsigned long next; 1313 1314 dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr); 1315 if (!dst_p4d) 1316 return -ENOMEM; 1317 src_p4d = p4d_offset(src_pgd, addr); 1318 do { 1319 next = p4d_addr_end(addr, end); 1320 if (p4d_none_or_clear_bad(src_p4d)) 1321 continue; 1322 if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d, 1323 addr, next)) 1324 return -ENOMEM; 1325 } while (dst_p4d++, src_p4d++, addr = next, addr != end); 1326 return 0; 1327 } 1328 1329 /* 1330 * Return true if the vma needs to copy the pgtable during this fork(). Return 1331 * false when we can speed up fork() by allowing lazy page faults later until 1332 * when the child accesses the memory range. 1333 */ 1334 static bool 1335 vma_needs_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) 1336 { 1337 /* 1338 * Always copy pgtables when dst_vma has uffd-wp enabled even if it's 1339 * file-backed (e.g. shmem). Because when uffd-wp is enabled, pgtable 1340 * contains uffd-wp protection information, that's something we can't 1341 * retrieve from page cache, and skip copying will lose those info. 1342 */ 1343 if (userfaultfd_wp(dst_vma)) 1344 return true; 1345 1346 if (src_vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) 1347 return true; 1348 1349 if (src_vma->anon_vma) 1350 return true; 1351 1352 /* 1353 * Don't copy ptes where a page fault will fill them correctly. Fork 1354 * becomes much lighter when there are big shared or private readonly 1355 * mappings. The tradeoff is that copy_page_range is more efficient 1356 * than faulting. 1357 */ 1358 return false; 1359 } 1360 1361 int 1362 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) 1363 { 1364 pgd_t *src_pgd, *dst_pgd; 1365 unsigned long next; 1366 unsigned long addr = src_vma->vm_start; 1367 unsigned long end = src_vma->vm_end; 1368 struct mm_struct *dst_mm = dst_vma->vm_mm; 1369 struct mm_struct *src_mm = src_vma->vm_mm; 1370 struct mmu_notifier_range range; 1371 bool is_cow; 1372 int ret; 1373 1374 if (!vma_needs_copy(dst_vma, src_vma)) 1375 return 0; 1376 1377 if (is_vm_hugetlb_page(src_vma)) 1378 return copy_hugetlb_page_range(dst_mm, src_mm, dst_vma, src_vma); 1379 1380 if (unlikely(src_vma->vm_flags & VM_PFNMAP)) { 1381 /* 1382 * We do not free on error cases below as remove_vma 1383 * gets called on error from higher level routine 1384 */ 1385 ret = track_pfn_copy(src_vma); 1386 if (ret) 1387 return ret; 1388 } 1389 1390 /* 1391 * We need to invalidate the secondary MMU mappings only when 1392 * there could be a permission downgrade on the ptes of the 1393 * parent mm. And a permission downgrade will only happen if 1394 * is_cow_mapping() returns true. 1395 */ 1396 is_cow = is_cow_mapping(src_vma->vm_flags); 1397 1398 if (is_cow) { 1399 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 1400 0, src_mm, addr, end); 1401 mmu_notifier_invalidate_range_start(&range); 1402 /* 1403 * Disabling preemption is not needed for the write side, as 1404 * the read side doesn't spin, but goes to the mmap_lock. 1405 * 1406 * Use the raw variant of the seqcount_t write API to avoid 1407 * lockdep complaining about preemptibility. 1408 */ 1409 vma_assert_write_locked(src_vma); 1410 raw_write_seqcount_begin(&src_mm->write_protect_seq); 1411 } 1412 1413 ret = 0; 1414 dst_pgd = pgd_offset(dst_mm, addr); 1415 src_pgd = pgd_offset(src_mm, addr); 1416 do { 1417 next = pgd_addr_end(addr, end); 1418 if (pgd_none_or_clear_bad(src_pgd)) 1419 continue; 1420 if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd, 1421 addr, next))) { 1422 untrack_pfn_clear(dst_vma); 1423 ret = -ENOMEM; 1424 break; 1425 } 1426 } while (dst_pgd++, src_pgd++, addr = next, addr != end); 1427 1428 if (is_cow) { 1429 raw_write_seqcount_end(&src_mm->write_protect_seq); 1430 mmu_notifier_invalidate_range_end(&range); 1431 } 1432 return ret; 1433 } 1434 1435 /* Whether we should zap all COWed (private) pages too */ 1436 static inline bool should_zap_cows(struct zap_details *details) 1437 { 1438 /* By default, zap all pages */ 1439 if (!details || details->reclaim_pt) 1440 return true; 1441 1442 /* Or, we zap COWed pages only if the caller wants to */ 1443 return details->even_cows; 1444 } 1445 1446 /* Decides whether we should zap this folio with the folio pointer specified */ 1447 static inline bool should_zap_folio(struct zap_details *details, 1448 struct folio *folio) 1449 { 1450 /* If we can make a decision without *folio.. */ 1451 if (should_zap_cows(details)) 1452 return true; 1453 1454 /* Otherwise we should only zap non-anon folios */ 1455 return !folio_test_anon(folio); 1456 } 1457 1458 static inline bool zap_drop_markers(struct zap_details *details) 1459 { 1460 if (!details) 1461 return false; 1462 1463 return details->zap_flags & ZAP_FLAG_DROP_MARKER; 1464 } 1465 1466 /* 1467 * This function makes sure that we'll replace the none pte with an uffd-wp 1468 * swap special pte marker when necessary. Must be with the pgtable lock held. 1469 * 1470 * Returns true if uffd-wp ptes was installed, false otherwise. 1471 */ 1472 static inline bool 1473 zap_install_uffd_wp_if_needed(struct vm_area_struct *vma, 1474 unsigned long addr, pte_t *pte, int nr, 1475 struct zap_details *details, pte_t pteval) 1476 { 1477 bool was_installed = false; 1478 1479 #ifdef CONFIG_PTE_MARKER_UFFD_WP 1480 /* Zap on anonymous always means dropping everything */ 1481 if (vma_is_anonymous(vma)) 1482 return false; 1483 1484 if (zap_drop_markers(details)) 1485 return false; 1486 1487 for (;;) { 1488 /* the PFN in the PTE is irrelevant. */ 1489 if (pte_install_uffd_wp_if_needed(vma, addr, pte, pteval)) 1490 was_installed = true; 1491 if (--nr == 0) 1492 break; 1493 pte++; 1494 addr += PAGE_SIZE; 1495 } 1496 #endif 1497 return was_installed; 1498 } 1499 1500 static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb, 1501 struct vm_area_struct *vma, struct folio *folio, 1502 struct page *page, pte_t *pte, pte_t ptent, unsigned int nr, 1503 unsigned long addr, struct zap_details *details, int *rss, 1504 bool *force_flush, bool *force_break, bool *any_skipped) 1505 { 1506 struct mm_struct *mm = tlb->mm; 1507 bool delay_rmap = false; 1508 1509 if (!folio_test_anon(folio)) { 1510 ptent = get_and_clear_full_ptes(mm, addr, pte, nr, tlb->fullmm); 1511 if (pte_dirty(ptent)) { 1512 folio_mark_dirty(folio); 1513 if (tlb_delay_rmap(tlb)) { 1514 delay_rmap = true; 1515 *force_flush = true; 1516 } 1517 } 1518 if (pte_young(ptent) && likely(vma_has_recency(vma))) 1519 folio_mark_accessed(folio); 1520 rss[mm_counter(folio)] -= nr; 1521 } else { 1522 /* We don't need up-to-date accessed/dirty bits. */ 1523 clear_full_ptes(mm, addr, pte, nr, tlb->fullmm); 1524 rss[MM_ANONPAGES] -= nr; 1525 } 1526 /* Checking a single PTE in a batch is sufficient. */ 1527 arch_check_zapped_pte(vma, ptent); 1528 tlb_remove_tlb_entries(tlb, pte, nr, addr); 1529 if (unlikely(userfaultfd_pte_wp(vma, ptent))) 1530 *any_skipped = zap_install_uffd_wp_if_needed(vma, addr, pte, 1531 nr, details, ptent); 1532 1533 if (!delay_rmap) { 1534 folio_remove_rmap_ptes(folio, page, nr, vma); 1535 1536 if (unlikely(folio_mapcount(folio) < 0)) 1537 print_bad_pte(vma, addr, ptent, page); 1538 } 1539 if (unlikely(__tlb_remove_folio_pages(tlb, page, nr, delay_rmap))) { 1540 *force_flush = true; 1541 *force_break = true; 1542 } 1543 } 1544 1545 /* 1546 * Zap or skip at least one present PTE, trying to batch-process subsequent 1547 * PTEs that map consecutive pages of the same folio. 1548 * 1549 * Returns the number of processed (skipped or zapped) PTEs (at least 1). 1550 */ 1551 static inline int zap_present_ptes(struct mmu_gather *tlb, 1552 struct vm_area_struct *vma, pte_t *pte, pte_t ptent, 1553 unsigned int max_nr, unsigned long addr, 1554 struct zap_details *details, int *rss, bool *force_flush, 1555 bool *force_break, bool *any_skipped) 1556 { 1557 const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY; 1558 struct mm_struct *mm = tlb->mm; 1559 struct folio *folio; 1560 struct page *page; 1561 int nr; 1562 1563 page = vm_normal_page(vma, addr, ptent); 1564 if (!page) { 1565 /* We don't need up-to-date accessed/dirty bits. */ 1566 ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm); 1567 arch_check_zapped_pte(vma, ptent); 1568 tlb_remove_tlb_entry(tlb, pte, addr); 1569 if (userfaultfd_pte_wp(vma, ptent)) 1570 *any_skipped = zap_install_uffd_wp_if_needed(vma, addr, 1571 pte, 1, details, ptent); 1572 ksm_might_unmap_zero_page(mm, ptent); 1573 return 1; 1574 } 1575 1576 folio = page_folio(page); 1577 if (unlikely(!should_zap_folio(details, folio))) { 1578 *any_skipped = true; 1579 return 1; 1580 } 1581 1582 /* 1583 * Make sure that the common "small folio" case is as fast as possible 1584 * by keeping the batching logic separate. 1585 */ 1586 if (unlikely(folio_test_large(folio) && max_nr != 1)) { 1587 nr = folio_pte_batch(folio, addr, pte, ptent, max_nr, fpb_flags, 1588 NULL, NULL, NULL); 1589 1590 zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr, 1591 addr, details, rss, force_flush, 1592 force_break, any_skipped); 1593 return nr; 1594 } 1595 zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, 1, addr, 1596 details, rss, force_flush, force_break, any_skipped); 1597 return 1; 1598 } 1599 1600 static inline int zap_nonpresent_ptes(struct mmu_gather *tlb, 1601 struct vm_area_struct *vma, pte_t *pte, pte_t ptent, 1602 unsigned int max_nr, unsigned long addr, 1603 struct zap_details *details, int *rss, bool *any_skipped) 1604 { 1605 swp_entry_t entry; 1606 int nr = 1; 1607 1608 *any_skipped = true; 1609 entry = pte_to_swp_entry(ptent); 1610 if (is_device_private_entry(entry) || 1611 is_device_exclusive_entry(entry)) { 1612 struct page *page = pfn_swap_entry_to_page(entry); 1613 struct folio *folio = page_folio(page); 1614 1615 if (unlikely(!should_zap_folio(details, folio))) 1616 return 1; 1617 /* 1618 * Both device private/exclusive mappings should only 1619 * work with anonymous page so far, so we don't need to 1620 * consider uffd-wp bit when zap. For more information, 1621 * see zap_install_uffd_wp_if_needed(). 1622 */ 1623 WARN_ON_ONCE(!vma_is_anonymous(vma)); 1624 rss[mm_counter(folio)]--; 1625 if (is_device_private_entry(entry)) 1626 folio_remove_rmap_pte(folio, page, vma); 1627 folio_put(folio); 1628 } else if (!non_swap_entry(entry)) { 1629 /* Genuine swap entries, hence a private anon pages */ 1630 if (!should_zap_cows(details)) 1631 return 1; 1632 1633 nr = swap_pte_batch(pte, max_nr, ptent); 1634 rss[MM_SWAPENTS] -= nr; 1635 free_swap_and_cache_nr(entry, nr); 1636 } else if (is_migration_entry(entry)) { 1637 struct folio *folio = pfn_swap_entry_folio(entry); 1638 1639 if (!should_zap_folio(details, folio)) 1640 return 1; 1641 rss[mm_counter(folio)]--; 1642 } else if (pte_marker_entry_uffd_wp(entry)) { 1643 /* 1644 * For anon: always drop the marker; for file: only 1645 * drop the marker if explicitly requested. 1646 */ 1647 if (!vma_is_anonymous(vma) && !zap_drop_markers(details)) 1648 return 1; 1649 } else if (is_guard_swp_entry(entry)) { 1650 /* 1651 * Ordinary zapping should not remove guard PTE 1652 * markers. Only do so if we should remove PTE markers 1653 * in general. 1654 */ 1655 if (!zap_drop_markers(details)) 1656 return 1; 1657 } else if (is_hwpoison_entry(entry) || is_poisoned_swp_entry(entry)) { 1658 if (!should_zap_cows(details)) 1659 return 1; 1660 } else { 1661 /* We should have covered all the swap entry types */ 1662 pr_alert("unrecognized swap entry 0x%lx\n", entry.val); 1663 WARN_ON_ONCE(1); 1664 } 1665 clear_not_present_full_ptes(vma->vm_mm, addr, pte, nr, tlb->fullmm); 1666 *any_skipped = zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, ptent); 1667 1668 return nr; 1669 } 1670 1671 static inline int do_zap_pte_range(struct mmu_gather *tlb, 1672 struct vm_area_struct *vma, pte_t *pte, 1673 unsigned long addr, unsigned long end, 1674 struct zap_details *details, int *rss, 1675 bool *force_flush, bool *force_break, 1676 bool *any_skipped) 1677 { 1678 pte_t ptent = ptep_get(pte); 1679 int max_nr = (end - addr) / PAGE_SIZE; 1680 int nr = 0; 1681 1682 /* Skip all consecutive none ptes */ 1683 if (pte_none(ptent)) { 1684 for (nr = 1; nr < max_nr; nr++) { 1685 ptent = ptep_get(pte + nr); 1686 if (!pte_none(ptent)) 1687 break; 1688 } 1689 max_nr -= nr; 1690 if (!max_nr) 1691 return nr; 1692 pte += nr; 1693 addr += nr * PAGE_SIZE; 1694 } 1695 1696 if (pte_present(ptent)) 1697 nr += zap_present_ptes(tlb, vma, pte, ptent, max_nr, addr, 1698 details, rss, force_flush, force_break, 1699 any_skipped); 1700 else 1701 nr += zap_nonpresent_ptes(tlb, vma, pte, ptent, max_nr, addr, 1702 details, rss, any_skipped); 1703 1704 return nr; 1705 } 1706 1707 static unsigned long zap_pte_range(struct mmu_gather *tlb, 1708 struct vm_area_struct *vma, pmd_t *pmd, 1709 unsigned long addr, unsigned long end, 1710 struct zap_details *details) 1711 { 1712 bool force_flush = false, force_break = false; 1713 struct mm_struct *mm = tlb->mm; 1714 int rss[NR_MM_COUNTERS]; 1715 spinlock_t *ptl; 1716 pte_t *start_pte; 1717 pte_t *pte; 1718 pmd_t pmdval; 1719 unsigned long start = addr; 1720 bool can_reclaim_pt = reclaim_pt_is_enabled(start, end, details); 1721 bool direct_reclaim = true; 1722 int nr; 1723 1724 retry: 1725 tlb_change_page_size(tlb, PAGE_SIZE); 1726 init_rss_vec(rss); 1727 start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 1728 if (!pte) 1729 return addr; 1730 1731 flush_tlb_batched_pending(mm); 1732 arch_enter_lazy_mmu_mode(); 1733 do { 1734 bool any_skipped = false; 1735 1736 if (need_resched()) { 1737 direct_reclaim = false; 1738 break; 1739 } 1740 1741 nr = do_zap_pte_range(tlb, vma, pte, addr, end, details, rss, 1742 &force_flush, &force_break, &any_skipped); 1743 if (any_skipped) 1744 can_reclaim_pt = false; 1745 if (unlikely(force_break)) { 1746 addr += nr * PAGE_SIZE; 1747 direct_reclaim = false; 1748 break; 1749 } 1750 } while (pte += nr, addr += PAGE_SIZE * nr, addr != end); 1751 1752 /* 1753 * Fast path: try to hold the pmd lock and unmap the PTE page. 1754 * 1755 * If the pte lock was released midway (retry case), or if the attempt 1756 * to hold the pmd lock failed, then we need to recheck all pte entries 1757 * to ensure they are still none, thereby preventing the pte entries 1758 * from being repopulated by another thread. 1759 */ 1760 if (can_reclaim_pt && direct_reclaim && addr == end) 1761 direct_reclaim = try_get_and_clear_pmd(mm, pmd, &pmdval); 1762 1763 add_mm_rss_vec(mm, rss); 1764 arch_leave_lazy_mmu_mode(); 1765 1766 /* Do the actual TLB flush before dropping ptl */ 1767 if (force_flush) { 1768 tlb_flush_mmu_tlbonly(tlb); 1769 tlb_flush_rmaps(tlb, vma); 1770 } 1771 pte_unmap_unlock(start_pte, ptl); 1772 1773 /* 1774 * If we forced a TLB flush (either due to running out of 1775 * batch buffers or because we needed to flush dirty TLB 1776 * entries before releasing the ptl), free the batched 1777 * memory too. Come back again if we didn't do everything. 1778 */ 1779 if (force_flush) 1780 tlb_flush_mmu(tlb); 1781 1782 if (addr != end) { 1783 cond_resched(); 1784 force_flush = false; 1785 force_break = false; 1786 goto retry; 1787 } 1788 1789 if (can_reclaim_pt) { 1790 if (direct_reclaim) 1791 free_pte(mm, start, tlb, pmdval); 1792 else 1793 try_to_free_pte(mm, pmd, start, tlb); 1794 } 1795 1796 return addr; 1797 } 1798 1799 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, 1800 struct vm_area_struct *vma, pud_t *pud, 1801 unsigned long addr, unsigned long end, 1802 struct zap_details *details) 1803 { 1804 pmd_t *pmd; 1805 unsigned long next; 1806 1807 pmd = pmd_offset(pud, addr); 1808 do { 1809 next = pmd_addr_end(addr, end); 1810 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { 1811 if (next - addr != HPAGE_PMD_SIZE) 1812 __split_huge_pmd(vma, pmd, addr, false, NULL); 1813 else if (zap_huge_pmd(tlb, vma, pmd, addr)) { 1814 addr = next; 1815 continue; 1816 } 1817 /* fall through */ 1818 } else if (details && details->single_folio && 1819 folio_test_pmd_mappable(details->single_folio) && 1820 next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) { 1821 spinlock_t *ptl = pmd_lock(tlb->mm, pmd); 1822 /* 1823 * Take and drop THP pmd lock so that we cannot return 1824 * prematurely, while zap_huge_pmd() has cleared *pmd, 1825 * but not yet decremented compound_mapcount(). 1826 */ 1827 spin_unlock(ptl); 1828 } 1829 if (pmd_none(*pmd)) { 1830 addr = next; 1831 continue; 1832 } 1833 addr = zap_pte_range(tlb, vma, pmd, addr, next, details); 1834 if (addr != next) 1835 pmd--; 1836 } while (pmd++, cond_resched(), addr != end); 1837 1838 return addr; 1839 } 1840 1841 static inline unsigned long zap_pud_range(struct mmu_gather *tlb, 1842 struct vm_area_struct *vma, p4d_t *p4d, 1843 unsigned long addr, unsigned long end, 1844 struct zap_details *details) 1845 { 1846 pud_t *pud; 1847 unsigned long next; 1848 1849 pud = pud_offset(p4d, addr); 1850 do { 1851 next = pud_addr_end(addr, end); 1852 if (pud_trans_huge(*pud) || pud_devmap(*pud)) { 1853 if (next - addr != HPAGE_PUD_SIZE) { 1854 mmap_assert_locked(tlb->mm); 1855 split_huge_pud(vma, pud, addr); 1856 } else if (zap_huge_pud(tlb, vma, pud, addr)) 1857 goto next; 1858 /* fall through */ 1859 } 1860 if (pud_none_or_clear_bad(pud)) 1861 continue; 1862 next = zap_pmd_range(tlb, vma, pud, addr, next, details); 1863 next: 1864 cond_resched(); 1865 } while (pud++, addr = next, addr != end); 1866 1867 return addr; 1868 } 1869 1870 static inline unsigned long zap_p4d_range(struct mmu_gather *tlb, 1871 struct vm_area_struct *vma, pgd_t *pgd, 1872 unsigned long addr, unsigned long end, 1873 struct zap_details *details) 1874 { 1875 p4d_t *p4d; 1876 unsigned long next; 1877 1878 p4d = p4d_offset(pgd, addr); 1879 do { 1880 next = p4d_addr_end(addr, end); 1881 if (p4d_none_or_clear_bad(p4d)) 1882 continue; 1883 next = zap_pud_range(tlb, vma, p4d, addr, next, details); 1884 } while (p4d++, addr = next, addr != end); 1885 1886 return addr; 1887 } 1888 1889 void unmap_page_range(struct mmu_gather *tlb, 1890 struct vm_area_struct *vma, 1891 unsigned long addr, unsigned long end, 1892 struct zap_details *details) 1893 { 1894 pgd_t *pgd; 1895 unsigned long next; 1896 1897 BUG_ON(addr >= end); 1898 tlb_start_vma(tlb, vma); 1899 pgd = pgd_offset(vma->vm_mm, addr); 1900 do { 1901 next = pgd_addr_end(addr, end); 1902 if (pgd_none_or_clear_bad(pgd)) 1903 continue; 1904 next = zap_p4d_range(tlb, vma, pgd, addr, next, details); 1905 } while (pgd++, addr = next, addr != end); 1906 tlb_end_vma(tlb, vma); 1907 } 1908 1909 1910 static void unmap_single_vma(struct mmu_gather *tlb, 1911 struct vm_area_struct *vma, unsigned long start_addr, 1912 unsigned long end_addr, 1913 struct zap_details *details, bool mm_wr_locked) 1914 { 1915 unsigned long start = max(vma->vm_start, start_addr); 1916 unsigned long end; 1917 1918 if (start >= vma->vm_end) 1919 return; 1920 end = min(vma->vm_end, end_addr); 1921 if (end <= vma->vm_start) 1922 return; 1923 1924 if (vma->vm_file) 1925 uprobe_munmap(vma, start, end); 1926 1927 if (unlikely(vma->vm_flags & VM_PFNMAP)) 1928 untrack_pfn(vma, 0, 0, mm_wr_locked); 1929 1930 if (start != end) { 1931 if (unlikely(is_vm_hugetlb_page(vma))) { 1932 /* 1933 * It is undesirable to test vma->vm_file as it 1934 * should be non-null for valid hugetlb area. 1935 * However, vm_file will be NULL in the error 1936 * cleanup path of mmap_region. When 1937 * hugetlbfs ->mmap method fails, 1938 * mmap_region() nullifies vma->vm_file 1939 * before calling this function to clean up. 1940 * Since no pte has actually been setup, it is 1941 * safe to do nothing in this case. 1942 */ 1943 if (vma->vm_file) { 1944 zap_flags_t zap_flags = details ? 1945 details->zap_flags : 0; 1946 __unmap_hugepage_range(tlb, vma, start, end, 1947 NULL, zap_flags); 1948 } 1949 } else 1950 unmap_page_range(tlb, vma, start, end, details); 1951 } 1952 } 1953 1954 /** 1955 * unmap_vmas - unmap a range of memory covered by a list of vma's 1956 * @tlb: address of the caller's struct mmu_gather 1957 * @mas: the maple state 1958 * @vma: the starting vma 1959 * @start_addr: virtual address at which to start unmapping 1960 * @end_addr: virtual address at which to end unmapping 1961 * @tree_end: The maximum index to check 1962 * @mm_wr_locked: lock flag 1963 * 1964 * Unmap all pages in the vma list. 1965 * 1966 * Only addresses between `start' and `end' will be unmapped. 1967 * 1968 * The VMA list must be sorted in ascending virtual address order. 1969 * 1970 * unmap_vmas() assumes that the caller will flush the whole unmapped address 1971 * range after unmap_vmas() returns. So the only responsibility here is to 1972 * ensure that any thus-far unmapped pages are flushed before unmap_vmas() 1973 * drops the lock and schedules. 1974 */ 1975 void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas, 1976 struct vm_area_struct *vma, unsigned long start_addr, 1977 unsigned long end_addr, unsigned long tree_end, 1978 bool mm_wr_locked) 1979 { 1980 struct mmu_notifier_range range; 1981 struct zap_details details = { 1982 .zap_flags = ZAP_FLAG_DROP_MARKER | ZAP_FLAG_UNMAP, 1983 /* Careful - we need to zap private pages too! */ 1984 .even_cows = true, 1985 }; 1986 1987 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm, 1988 start_addr, end_addr); 1989 mmu_notifier_invalidate_range_start(&range); 1990 do { 1991 unsigned long start = start_addr; 1992 unsigned long end = end_addr; 1993 hugetlb_zap_begin(vma, &start, &end); 1994 unmap_single_vma(tlb, vma, start, end, &details, 1995 mm_wr_locked); 1996 hugetlb_zap_end(vma, &details); 1997 vma = mas_find(mas, tree_end - 1); 1998 } while (vma && likely(!xa_is_zero(vma))); 1999 mmu_notifier_invalidate_range_end(&range); 2000 } 2001 2002 /** 2003 * zap_page_range_single - remove user pages in a given range 2004 * @vma: vm_area_struct holding the applicable pages 2005 * @address: starting address of pages to zap 2006 * @size: number of bytes to zap 2007 * @details: details of shared cache invalidation 2008 * 2009 * The range must fit into one VMA. 2010 */ 2011 void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, 2012 unsigned long size, struct zap_details *details) 2013 { 2014 const unsigned long end = address + size; 2015 struct mmu_notifier_range range; 2016 struct mmu_gather tlb; 2017 2018 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 2019 address, end); 2020 hugetlb_zap_begin(vma, &range.start, &range.end); 2021 tlb_gather_mmu(&tlb, vma->vm_mm); 2022 update_hiwater_rss(vma->vm_mm); 2023 mmu_notifier_invalidate_range_start(&range); 2024 /* 2025 * unmap 'address-end' not 'range.start-range.end' as range 2026 * could have been expanded for hugetlb pmd sharing. 2027 */ 2028 unmap_single_vma(&tlb, vma, address, end, details, false); 2029 mmu_notifier_invalidate_range_end(&range); 2030 tlb_finish_mmu(&tlb); 2031 hugetlb_zap_end(vma, details); 2032 } 2033 2034 /** 2035 * zap_vma_ptes - remove ptes mapping the vma 2036 * @vma: vm_area_struct holding ptes to be zapped 2037 * @address: starting address of pages to zap 2038 * @size: number of bytes to zap 2039 * 2040 * This function only unmaps ptes assigned to VM_PFNMAP vmas. 2041 * 2042 * The entire address range must be fully contained within the vma. 2043 * 2044 */ 2045 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 2046 unsigned long size) 2047 { 2048 if (!range_in_vma(vma, address, address + size) || 2049 !(vma->vm_flags & VM_PFNMAP)) 2050 return; 2051 2052 zap_page_range_single(vma, address, size, NULL); 2053 } 2054 EXPORT_SYMBOL_GPL(zap_vma_ptes); 2055 2056 static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr) 2057 { 2058 pgd_t *pgd; 2059 p4d_t *p4d; 2060 pud_t *pud; 2061 pmd_t *pmd; 2062 2063 pgd = pgd_offset(mm, addr); 2064 p4d = p4d_alloc(mm, pgd, addr); 2065 if (!p4d) 2066 return NULL; 2067 pud = pud_alloc(mm, p4d, addr); 2068 if (!pud) 2069 return NULL; 2070 pmd = pmd_alloc(mm, pud, addr); 2071 if (!pmd) 2072 return NULL; 2073 2074 VM_BUG_ON(pmd_trans_huge(*pmd)); 2075 return pmd; 2076 } 2077 2078 pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, 2079 spinlock_t **ptl) 2080 { 2081 pmd_t *pmd = walk_to_pmd(mm, addr); 2082 2083 if (!pmd) 2084 return NULL; 2085 return pte_alloc_map_lock(mm, pmd, addr, ptl); 2086 } 2087 2088 static bool vm_mixed_zeropage_allowed(struct vm_area_struct *vma) 2089 { 2090 VM_WARN_ON_ONCE(vma->vm_flags & VM_PFNMAP); 2091 /* 2092 * Whoever wants to forbid the zeropage after some zeropages 2093 * might already have been mapped has to scan the page tables and 2094 * bail out on any zeropages. Zeropages in COW mappings can 2095 * be unshared using FAULT_FLAG_UNSHARE faults. 2096 */ 2097 if (mm_forbids_zeropage(vma->vm_mm)) 2098 return false; 2099 /* zeropages in COW mappings are common and unproblematic. */ 2100 if (is_cow_mapping(vma->vm_flags)) 2101 return true; 2102 /* Mappings that do not allow for writable PTEs are unproblematic. */ 2103 if (!(vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) 2104 return true; 2105 /* 2106 * Why not allow any VMA that has vm_ops->pfn_mkwrite? GUP could 2107 * find the shared zeropage and longterm-pin it, which would 2108 * be problematic as soon as the zeropage gets replaced by a different 2109 * page due to vma->vm_ops->pfn_mkwrite, because what's mapped would 2110 * now differ to what GUP looked up. FSDAX is incompatible to 2111 * FOLL_LONGTERM and VM_IO is incompatible to GUP completely (see 2112 * check_vma_flags). 2113 */ 2114 return vma->vm_ops && vma->vm_ops->pfn_mkwrite && 2115 (vma_is_fsdax(vma) || vma->vm_flags & VM_IO); 2116 } 2117 2118 static int validate_page_before_insert(struct vm_area_struct *vma, 2119 struct page *page) 2120 { 2121 struct folio *folio = page_folio(page); 2122 2123 if (!folio_ref_count(folio)) 2124 return -EINVAL; 2125 if (unlikely(is_zero_folio(folio))) { 2126 if (!vm_mixed_zeropage_allowed(vma)) 2127 return -EINVAL; 2128 return 0; 2129 } 2130 if (folio_test_anon(folio) || folio_test_slab(folio) || 2131 page_has_type(page)) 2132 return -EINVAL; 2133 flush_dcache_folio(folio); 2134 return 0; 2135 } 2136 2137 static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte, 2138 unsigned long addr, struct page *page, pgprot_t prot) 2139 { 2140 struct folio *folio = page_folio(page); 2141 pte_t pteval; 2142 2143 if (!pte_none(ptep_get(pte))) 2144 return -EBUSY; 2145 /* Ok, finally just insert the thing.. */ 2146 pteval = mk_pte(page, prot); 2147 if (unlikely(is_zero_folio(folio))) { 2148 pteval = pte_mkspecial(pteval); 2149 } else { 2150 folio_get(folio); 2151 inc_mm_counter(vma->vm_mm, mm_counter_file(folio)); 2152 folio_add_file_rmap_pte(folio, page, vma); 2153 } 2154 set_pte_at(vma->vm_mm, addr, pte, pteval); 2155 return 0; 2156 } 2157 2158 static int insert_page(struct vm_area_struct *vma, unsigned long addr, 2159 struct page *page, pgprot_t prot) 2160 { 2161 int retval; 2162 pte_t *pte; 2163 spinlock_t *ptl; 2164 2165 retval = validate_page_before_insert(vma, page); 2166 if (retval) 2167 goto out; 2168 retval = -ENOMEM; 2169 pte = get_locked_pte(vma->vm_mm, addr, &ptl); 2170 if (!pte) 2171 goto out; 2172 retval = insert_page_into_pte_locked(vma, pte, addr, page, prot); 2173 pte_unmap_unlock(pte, ptl); 2174 out: 2175 return retval; 2176 } 2177 2178 static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte, 2179 unsigned long addr, struct page *page, pgprot_t prot) 2180 { 2181 int err; 2182 2183 err = validate_page_before_insert(vma, page); 2184 if (err) 2185 return err; 2186 return insert_page_into_pte_locked(vma, pte, addr, page, prot); 2187 } 2188 2189 /* insert_pages() amortizes the cost of spinlock operations 2190 * when inserting pages in a loop. 2191 */ 2192 static int insert_pages(struct vm_area_struct *vma, unsigned long addr, 2193 struct page **pages, unsigned long *num, pgprot_t prot) 2194 { 2195 pmd_t *pmd = NULL; 2196 pte_t *start_pte, *pte; 2197 spinlock_t *pte_lock; 2198 struct mm_struct *const mm = vma->vm_mm; 2199 unsigned long curr_page_idx = 0; 2200 unsigned long remaining_pages_total = *num; 2201 unsigned long pages_to_write_in_pmd; 2202 int ret; 2203 more: 2204 ret = -EFAULT; 2205 pmd = walk_to_pmd(mm, addr); 2206 if (!pmd) 2207 goto out; 2208 2209 pages_to_write_in_pmd = min_t(unsigned long, 2210 remaining_pages_total, PTRS_PER_PTE - pte_index(addr)); 2211 2212 /* Allocate the PTE if necessary; takes PMD lock once only. */ 2213 ret = -ENOMEM; 2214 if (pte_alloc(mm, pmd)) 2215 goto out; 2216 2217 while (pages_to_write_in_pmd) { 2218 int pte_idx = 0; 2219 const int batch_size = min_t(int, pages_to_write_in_pmd, 8); 2220 2221 start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock); 2222 if (!start_pte) { 2223 ret = -EFAULT; 2224 goto out; 2225 } 2226 for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) { 2227 int err = insert_page_in_batch_locked(vma, pte, 2228 addr, pages[curr_page_idx], prot); 2229 if (unlikely(err)) { 2230 pte_unmap_unlock(start_pte, pte_lock); 2231 ret = err; 2232 remaining_pages_total -= pte_idx; 2233 goto out; 2234 } 2235 addr += PAGE_SIZE; 2236 ++curr_page_idx; 2237 } 2238 pte_unmap_unlock(start_pte, pte_lock); 2239 pages_to_write_in_pmd -= batch_size; 2240 remaining_pages_total -= batch_size; 2241 } 2242 if (remaining_pages_total) 2243 goto more; 2244 ret = 0; 2245 out: 2246 *num = remaining_pages_total; 2247 return ret; 2248 } 2249 2250 /** 2251 * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock. 2252 * @vma: user vma to map to 2253 * @addr: target start user address of these pages 2254 * @pages: source kernel pages 2255 * @num: in: number of pages to map. out: number of pages that were *not* 2256 * mapped. (0 means all pages were successfully mapped). 2257 * 2258 * Preferred over vm_insert_page() when inserting multiple pages. 2259 * 2260 * In case of error, we may have mapped a subset of the provided 2261 * pages. It is the caller's responsibility to account for this case. 2262 * 2263 * The same restrictions apply as in vm_insert_page(). 2264 */ 2265 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, 2266 struct page **pages, unsigned long *num) 2267 { 2268 const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1; 2269 2270 if (addr < vma->vm_start || end_addr >= vma->vm_end) 2271 return -EFAULT; 2272 if (!(vma->vm_flags & VM_MIXEDMAP)) { 2273 BUG_ON(mmap_read_trylock(vma->vm_mm)); 2274 BUG_ON(vma->vm_flags & VM_PFNMAP); 2275 vm_flags_set(vma, VM_MIXEDMAP); 2276 } 2277 /* Defer page refcount checking till we're about to map that page. */ 2278 return insert_pages(vma, addr, pages, num, vma->vm_page_prot); 2279 } 2280 EXPORT_SYMBOL(vm_insert_pages); 2281 2282 /** 2283 * vm_insert_page - insert single page into user vma 2284 * @vma: user vma to map to 2285 * @addr: target user address of this page 2286 * @page: source kernel page 2287 * 2288 * This allows drivers to insert individual pages they've allocated 2289 * into a user vma. The zeropage is supported in some VMAs, 2290 * see vm_mixed_zeropage_allowed(). 2291 * 2292 * The page has to be a nice clean _individual_ kernel allocation. 2293 * If you allocate a compound page, you need to have marked it as 2294 * such (__GFP_COMP), or manually just split the page up yourself 2295 * (see split_page()). 2296 * 2297 * NOTE! Traditionally this was done with "remap_pfn_range()" which 2298 * took an arbitrary page protection parameter. This doesn't allow 2299 * that. Your vma protection will have to be set up correctly, which 2300 * means that if you want a shared writable mapping, you'd better 2301 * ask for a shared writable mapping! 2302 * 2303 * The page does not need to be reserved. 2304 * 2305 * Usually this function is called from f_op->mmap() handler 2306 * under mm->mmap_lock write-lock, so it can change vma->vm_flags. 2307 * Caller must set VM_MIXEDMAP on vma if it wants to call this 2308 * function from other places, for example from page-fault handler. 2309 * 2310 * Return: %0 on success, negative error code otherwise. 2311 */ 2312 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, 2313 struct page *page) 2314 { 2315 if (addr < vma->vm_start || addr >= vma->vm_end) 2316 return -EFAULT; 2317 if (!(vma->vm_flags & VM_MIXEDMAP)) { 2318 BUG_ON(mmap_read_trylock(vma->vm_mm)); 2319 BUG_ON(vma->vm_flags & VM_PFNMAP); 2320 vm_flags_set(vma, VM_MIXEDMAP); 2321 } 2322 return insert_page(vma, addr, page, vma->vm_page_prot); 2323 } 2324 EXPORT_SYMBOL(vm_insert_page); 2325 2326 /* 2327 * __vm_map_pages - maps range of kernel pages into user vma 2328 * @vma: user vma to map to 2329 * @pages: pointer to array of source kernel pages 2330 * @num: number of pages in page array 2331 * @offset: user's requested vm_pgoff 2332 * 2333 * This allows drivers to map range of kernel pages into a user vma. 2334 * The zeropage is supported in some VMAs, see 2335 * vm_mixed_zeropage_allowed(). 2336 * 2337 * Return: 0 on success and error code otherwise. 2338 */ 2339 static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages, 2340 unsigned long num, unsigned long offset) 2341 { 2342 unsigned long count = vma_pages(vma); 2343 unsigned long uaddr = vma->vm_start; 2344 int ret, i; 2345 2346 /* Fail if the user requested offset is beyond the end of the object */ 2347 if (offset >= num) 2348 return -ENXIO; 2349 2350 /* Fail if the user requested size exceeds available object size */ 2351 if (count > num - offset) 2352 return -ENXIO; 2353 2354 for (i = 0; i < count; i++) { 2355 ret = vm_insert_page(vma, uaddr, pages[offset + i]); 2356 if (ret < 0) 2357 return ret; 2358 uaddr += PAGE_SIZE; 2359 } 2360 2361 return 0; 2362 } 2363 2364 /** 2365 * vm_map_pages - maps range of kernel pages starts with non zero offset 2366 * @vma: user vma to map to 2367 * @pages: pointer to array of source kernel pages 2368 * @num: number of pages in page array 2369 * 2370 * Maps an object consisting of @num pages, catering for the user's 2371 * requested vm_pgoff 2372 * 2373 * If we fail to insert any page into the vma, the function will return 2374 * immediately leaving any previously inserted pages present. Callers 2375 * from the mmap handler may immediately return the error as their caller 2376 * will destroy the vma, removing any successfully inserted pages. Other 2377 * callers should make their own arrangements for calling unmap_region(). 2378 * 2379 * Context: Process context. Called by mmap handlers. 2380 * Return: 0 on success and error code otherwise. 2381 */ 2382 int vm_map_pages(struct vm_area_struct *vma, struct page **pages, 2383 unsigned long num) 2384 { 2385 return __vm_map_pages(vma, pages, num, vma->vm_pgoff); 2386 } 2387 EXPORT_SYMBOL(vm_map_pages); 2388 2389 /** 2390 * vm_map_pages_zero - map range of kernel pages starts with zero offset 2391 * @vma: user vma to map to 2392 * @pages: pointer to array of source kernel pages 2393 * @num: number of pages in page array 2394 * 2395 * Similar to vm_map_pages(), except that it explicitly sets the offset 2396 * to 0. This function is intended for the drivers that did not consider 2397 * vm_pgoff. 2398 * 2399 * Context: Process context. Called by mmap handlers. 2400 * Return: 0 on success and error code otherwise. 2401 */ 2402 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, 2403 unsigned long num) 2404 { 2405 return __vm_map_pages(vma, pages, num, 0); 2406 } 2407 EXPORT_SYMBOL(vm_map_pages_zero); 2408 2409 static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr, 2410 pfn_t pfn, pgprot_t prot, bool mkwrite) 2411 { 2412 struct mm_struct *mm = vma->vm_mm; 2413 pte_t *pte, entry; 2414 spinlock_t *ptl; 2415 2416 pte = get_locked_pte(mm, addr, &ptl); 2417 if (!pte) 2418 return VM_FAULT_OOM; 2419 entry = ptep_get(pte); 2420 if (!pte_none(entry)) { 2421 if (mkwrite) { 2422 /* 2423 * For read faults on private mappings the PFN passed 2424 * in may not match the PFN we have mapped if the 2425 * mapped PFN is a writeable COW page. In the mkwrite 2426 * case we are creating a writable PTE for a shared 2427 * mapping and we expect the PFNs to match. If they 2428 * don't match, we are likely racing with block 2429 * allocation and mapping invalidation so just skip the 2430 * update. 2431 */ 2432 if (pte_pfn(entry) != pfn_t_to_pfn(pfn)) { 2433 WARN_ON_ONCE(!is_zero_pfn(pte_pfn(entry))); 2434 goto out_unlock; 2435 } 2436 entry = pte_mkyoung(entry); 2437 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2438 if (ptep_set_access_flags(vma, addr, pte, entry, 1)) 2439 update_mmu_cache(vma, addr, pte); 2440 } 2441 goto out_unlock; 2442 } 2443 2444 /* Ok, finally just insert the thing.. */ 2445 if (pfn_t_devmap(pfn)) 2446 entry = pte_mkdevmap(pfn_t_pte(pfn, prot)); 2447 else 2448 entry = pte_mkspecial(pfn_t_pte(pfn, prot)); 2449 2450 if (mkwrite) { 2451 entry = pte_mkyoung(entry); 2452 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2453 } 2454 2455 set_pte_at(mm, addr, pte, entry); 2456 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ 2457 2458 out_unlock: 2459 pte_unmap_unlock(pte, ptl); 2460 return VM_FAULT_NOPAGE; 2461 } 2462 2463 /** 2464 * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot 2465 * @vma: user vma to map to 2466 * @addr: target user address of this page 2467 * @pfn: source kernel pfn 2468 * @pgprot: pgprot flags for the inserted page 2469 * 2470 * This is exactly like vmf_insert_pfn(), except that it allows drivers 2471 * to override pgprot on a per-page basis. 2472 * 2473 * This only makes sense for IO mappings, and it makes no sense for 2474 * COW mappings. In general, using multiple vmas is preferable; 2475 * vmf_insert_pfn_prot should only be used if using multiple VMAs is 2476 * impractical. 2477 * 2478 * pgprot typically only differs from @vma->vm_page_prot when drivers set 2479 * caching- and encryption bits different than those of @vma->vm_page_prot, 2480 * because the caching- or encryption mode may not be known at mmap() time. 2481 * 2482 * This is ok as long as @vma->vm_page_prot is not used by the core vm 2483 * to set caching and encryption bits for those vmas (except for COW pages). 2484 * This is ensured by core vm only modifying these page table entries using 2485 * functions that don't touch caching- or encryption bits, using pte_modify() 2486 * if needed. (See for example mprotect()). 2487 * 2488 * Also when new page-table entries are created, this is only done using the 2489 * fault() callback, and never using the value of vma->vm_page_prot, 2490 * except for page-table entries that point to anonymous pages as the result 2491 * of COW. 2492 * 2493 * Context: Process context. May allocate using %GFP_KERNEL. 2494 * Return: vm_fault_t value. 2495 */ 2496 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, 2497 unsigned long pfn, pgprot_t pgprot) 2498 { 2499 /* 2500 * Technically, architectures with pte_special can avoid all these 2501 * restrictions (same for remap_pfn_range). However we would like 2502 * consistency in testing and feature parity among all, so we should 2503 * try to keep these invariants in place for everybody. 2504 */ 2505 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); 2506 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 2507 (VM_PFNMAP|VM_MIXEDMAP)); 2508 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 2509 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); 2510 2511 if (addr < vma->vm_start || addr >= vma->vm_end) 2512 return VM_FAULT_SIGBUS; 2513 2514 if (!pfn_modify_allowed(pfn, pgprot)) 2515 return VM_FAULT_SIGBUS; 2516 2517 track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV)); 2518 2519 return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot, 2520 false); 2521 } 2522 EXPORT_SYMBOL(vmf_insert_pfn_prot); 2523 2524 /** 2525 * vmf_insert_pfn - insert single pfn into user vma 2526 * @vma: user vma to map to 2527 * @addr: target user address of this page 2528 * @pfn: source kernel pfn 2529 * 2530 * Similar to vm_insert_page, this allows drivers to insert individual pages 2531 * they've allocated into a user vma. Same comments apply. 2532 * 2533 * This function should only be called from a vm_ops->fault handler, and 2534 * in that case the handler should return the result of this function. 2535 * 2536 * vma cannot be a COW mapping. 2537 * 2538 * As this is called only for pages that do not currently exist, we 2539 * do not need to flush old virtual caches or the TLB. 2540 * 2541 * Context: Process context. May allocate using %GFP_KERNEL. 2542 * Return: vm_fault_t value. 2543 */ 2544 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 2545 unsigned long pfn) 2546 { 2547 return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot); 2548 } 2549 EXPORT_SYMBOL(vmf_insert_pfn); 2550 2551 static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn, bool mkwrite) 2552 { 2553 if (unlikely(is_zero_pfn(pfn_t_to_pfn(pfn))) && 2554 (mkwrite || !vm_mixed_zeropage_allowed(vma))) 2555 return false; 2556 /* these checks mirror the abort conditions in vm_normal_page */ 2557 if (vma->vm_flags & VM_MIXEDMAP) 2558 return true; 2559 if (pfn_t_devmap(pfn)) 2560 return true; 2561 if (pfn_t_special(pfn)) 2562 return true; 2563 if (is_zero_pfn(pfn_t_to_pfn(pfn))) 2564 return true; 2565 return false; 2566 } 2567 2568 static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma, 2569 unsigned long addr, pfn_t pfn, bool mkwrite) 2570 { 2571 pgprot_t pgprot = vma->vm_page_prot; 2572 int err; 2573 2574 if (!vm_mixed_ok(vma, pfn, mkwrite)) 2575 return VM_FAULT_SIGBUS; 2576 2577 if (addr < vma->vm_start || addr >= vma->vm_end) 2578 return VM_FAULT_SIGBUS; 2579 2580 track_pfn_insert(vma, &pgprot, pfn); 2581 2582 if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot)) 2583 return VM_FAULT_SIGBUS; 2584 2585 /* 2586 * If we don't have pte special, then we have to use the pfn_valid() 2587 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must* 2588 * refcount the page if pfn_valid is true (hence insert_page rather 2589 * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP 2590 * without pte special, it would there be refcounted as a normal page. 2591 */ 2592 if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && 2593 !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) { 2594 struct page *page; 2595 2596 /* 2597 * At this point we are committed to insert_page() 2598 * regardless of whether the caller specified flags that 2599 * result in pfn_t_has_page() == false. 2600 */ 2601 page = pfn_to_page(pfn_t_to_pfn(pfn)); 2602 err = insert_page(vma, addr, page, pgprot); 2603 } else { 2604 return insert_pfn(vma, addr, pfn, pgprot, mkwrite); 2605 } 2606 2607 if (err == -ENOMEM) 2608 return VM_FAULT_OOM; 2609 if (err < 0 && err != -EBUSY) 2610 return VM_FAULT_SIGBUS; 2611 2612 return VM_FAULT_NOPAGE; 2613 } 2614 2615 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 2616 pfn_t pfn) 2617 { 2618 return __vm_insert_mixed(vma, addr, pfn, false); 2619 } 2620 EXPORT_SYMBOL(vmf_insert_mixed); 2621 2622 /* 2623 * If the insertion of PTE failed because someone else already added a 2624 * different entry in the mean time, we treat that as success as we assume 2625 * the same entry was actually inserted. 2626 */ 2627 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, 2628 unsigned long addr, pfn_t pfn) 2629 { 2630 return __vm_insert_mixed(vma, addr, pfn, true); 2631 } 2632 2633 /* 2634 * maps a range of physical memory into the requested pages. the old 2635 * mappings are removed. any references to nonexistent pages results 2636 * in null mappings (currently treated as "copy-on-access") 2637 */ 2638 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, 2639 unsigned long addr, unsigned long end, 2640 unsigned long pfn, pgprot_t prot) 2641 { 2642 pte_t *pte, *mapped_pte; 2643 spinlock_t *ptl; 2644 int err = 0; 2645 2646 mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); 2647 if (!pte) 2648 return -ENOMEM; 2649 arch_enter_lazy_mmu_mode(); 2650 do { 2651 BUG_ON(!pte_none(ptep_get(pte))); 2652 if (!pfn_modify_allowed(pfn, prot)) { 2653 err = -EACCES; 2654 break; 2655 } 2656 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); 2657 pfn++; 2658 } while (pte++, addr += PAGE_SIZE, addr != end); 2659 arch_leave_lazy_mmu_mode(); 2660 pte_unmap_unlock(mapped_pte, ptl); 2661 return err; 2662 } 2663 2664 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, 2665 unsigned long addr, unsigned long end, 2666 unsigned long pfn, pgprot_t prot) 2667 { 2668 pmd_t *pmd; 2669 unsigned long next; 2670 int err; 2671 2672 pfn -= addr >> PAGE_SHIFT; 2673 pmd = pmd_alloc(mm, pud, addr); 2674 if (!pmd) 2675 return -ENOMEM; 2676 VM_BUG_ON(pmd_trans_huge(*pmd)); 2677 do { 2678 next = pmd_addr_end(addr, end); 2679 err = remap_pte_range(mm, pmd, addr, next, 2680 pfn + (addr >> PAGE_SHIFT), prot); 2681 if (err) 2682 return err; 2683 } while (pmd++, addr = next, addr != end); 2684 return 0; 2685 } 2686 2687 static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d, 2688 unsigned long addr, unsigned long end, 2689 unsigned long pfn, pgprot_t prot) 2690 { 2691 pud_t *pud; 2692 unsigned long next; 2693 int err; 2694 2695 pfn -= addr >> PAGE_SHIFT; 2696 pud = pud_alloc(mm, p4d, addr); 2697 if (!pud) 2698 return -ENOMEM; 2699 do { 2700 next = pud_addr_end(addr, end); 2701 err = remap_pmd_range(mm, pud, addr, next, 2702 pfn + (addr >> PAGE_SHIFT), prot); 2703 if (err) 2704 return err; 2705 } while (pud++, addr = next, addr != end); 2706 return 0; 2707 } 2708 2709 static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd, 2710 unsigned long addr, unsigned long end, 2711 unsigned long pfn, pgprot_t prot) 2712 { 2713 p4d_t *p4d; 2714 unsigned long next; 2715 int err; 2716 2717 pfn -= addr >> PAGE_SHIFT; 2718 p4d = p4d_alloc(mm, pgd, addr); 2719 if (!p4d) 2720 return -ENOMEM; 2721 do { 2722 next = p4d_addr_end(addr, end); 2723 err = remap_pud_range(mm, p4d, addr, next, 2724 pfn + (addr >> PAGE_SHIFT), prot); 2725 if (err) 2726 return err; 2727 } while (p4d++, addr = next, addr != end); 2728 return 0; 2729 } 2730 2731 static int remap_pfn_range_internal(struct vm_area_struct *vma, unsigned long addr, 2732 unsigned long pfn, unsigned long size, pgprot_t prot) 2733 { 2734 pgd_t *pgd; 2735 unsigned long next; 2736 unsigned long end = addr + PAGE_ALIGN(size); 2737 struct mm_struct *mm = vma->vm_mm; 2738 int err; 2739 2740 if (WARN_ON_ONCE(!PAGE_ALIGNED(addr))) 2741 return -EINVAL; 2742 2743 /* 2744 * Physically remapped pages are special. Tell the 2745 * rest of the world about it: 2746 * VM_IO tells people not to look at these pages 2747 * (accesses can have side effects). 2748 * VM_PFNMAP tells the core MM that the base pages are just 2749 * raw PFN mappings, and do not have a "struct page" associated 2750 * with them. 2751 * VM_DONTEXPAND 2752 * Disable vma merging and expanding with mremap(). 2753 * VM_DONTDUMP 2754 * Omit vma from core dump, even when VM_IO turned off. 2755 * 2756 * There's a horrible special case to handle copy-on-write 2757 * behaviour that some programs depend on. We mark the "original" 2758 * un-COW'ed pages by matching them up with "vma->vm_pgoff". 2759 * See vm_normal_page() for details. 2760 */ 2761 if (is_cow_mapping(vma->vm_flags)) { 2762 if (addr != vma->vm_start || end != vma->vm_end) 2763 return -EINVAL; 2764 vma->vm_pgoff = pfn; 2765 } 2766 2767 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); 2768 2769 BUG_ON(addr >= end); 2770 pfn -= addr >> PAGE_SHIFT; 2771 pgd = pgd_offset(mm, addr); 2772 flush_cache_range(vma, addr, end); 2773 do { 2774 next = pgd_addr_end(addr, end); 2775 err = remap_p4d_range(mm, pgd, addr, next, 2776 pfn + (addr >> PAGE_SHIFT), prot); 2777 if (err) 2778 return err; 2779 } while (pgd++, addr = next, addr != end); 2780 2781 return 0; 2782 } 2783 2784 /* 2785 * Variant of remap_pfn_range that does not call track_pfn_remap. The caller 2786 * must have pre-validated the caching bits of the pgprot_t. 2787 */ 2788 int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, 2789 unsigned long pfn, unsigned long size, pgprot_t prot) 2790 { 2791 int error = remap_pfn_range_internal(vma, addr, pfn, size, prot); 2792 2793 if (!error) 2794 return 0; 2795 2796 /* 2797 * A partial pfn range mapping is dangerous: it does not 2798 * maintain page reference counts, and callers may free 2799 * pages due to the error. So zap it early. 2800 */ 2801 zap_page_range_single(vma, addr, size, NULL); 2802 return error; 2803 } 2804 2805 /** 2806 * remap_pfn_range - remap kernel memory to userspace 2807 * @vma: user vma to map to 2808 * @addr: target page aligned user address to start at 2809 * @pfn: page frame number of kernel physical memory address 2810 * @size: size of mapping area 2811 * @prot: page protection flags for this mapping 2812 * 2813 * Note: this is only safe if the mm semaphore is held when called. 2814 * 2815 * Return: %0 on success, negative error code otherwise. 2816 */ 2817 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, 2818 unsigned long pfn, unsigned long size, pgprot_t prot) 2819 { 2820 int err; 2821 2822 err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size)); 2823 if (err) 2824 return -EINVAL; 2825 2826 err = remap_pfn_range_notrack(vma, addr, pfn, size, prot); 2827 if (err) 2828 untrack_pfn(vma, pfn, PAGE_ALIGN(size), true); 2829 return err; 2830 } 2831 EXPORT_SYMBOL(remap_pfn_range); 2832 2833 /** 2834 * vm_iomap_memory - remap memory to userspace 2835 * @vma: user vma to map to 2836 * @start: start of the physical memory to be mapped 2837 * @len: size of area 2838 * 2839 * This is a simplified io_remap_pfn_range() for common driver use. The 2840 * driver just needs to give us the physical memory range to be mapped, 2841 * we'll figure out the rest from the vma information. 2842 * 2843 * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get 2844 * whatever write-combining details or similar. 2845 * 2846 * Return: %0 on success, negative error code otherwise. 2847 */ 2848 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) 2849 { 2850 unsigned long vm_len, pfn, pages; 2851 2852 /* Check that the physical memory area passed in looks valid */ 2853 if (start + len < start) 2854 return -EINVAL; 2855 /* 2856 * You *really* shouldn't map things that aren't page-aligned, 2857 * but we've historically allowed it because IO memory might 2858 * just have smaller alignment. 2859 */ 2860 len += start & ~PAGE_MASK; 2861 pfn = start >> PAGE_SHIFT; 2862 pages = (len + ~PAGE_MASK) >> PAGE_SHIFT; 2863 if (pfn + pages < pfn) 2864 return -EINVAL; 2865 2866 /* We start the mapping 'vm_pgoff' pages into the area */ 2867 if (vma->vm_pgoff > pages) 2868 return -EINVAL; 2869 pfn += vma->vm_pgoff; 2870 pages -= vma->vm_pgoff; 2871 2872 /* Can we fit all of the mapping? */ 2873 vm_len = vma->vm_end - vma->vm_start; 2874 if (vm_len >> PAGE_SHIFT > pages) 2875 return -EINVAL; 2876 2877 /* Ok, let it rip */ 2878 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); 2879 } 2880 EXPORT_SYMBOL(vm_iomap_memory); 2881 2882 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, 2883 unsigned long addr, unsigned long end, 2884 pte_fn_t fn, void *data, bool create, 2885 pgtbl_mod_mask *mask) 2886 { 2887 pte_t *pte, *mapped_pte; 2888 int err = 0; 2889 spinlock_t *ptl; 2890 2891 if (create) { 2892 mapped_pte = pte = (mm == &init_mm) ? 2893 pte_alloc_kernel_track(pmd, addr, mask) : 2894 pte_alloc_map_lock(mm, pmd, addr, &ptl); 2895 if (!pte) 2896 return -ENOMEM; 2897 } else { 2898 mapped_pte = pte = (mm == &init_mm) ? 2899 pte_offset_kernel(pmd, addr) : 2900 pte_offset_map_lock(mm, pmd, addr, &ptl); 2901 if (!pte) 2902 return -EINVAL; 2903 } 2904 2905 arch_enter_lazy_mmu_mode(); 2906 2907 if (fn) { 2908 do { 2909 if (create || !pte_none(ptep_get(pte))) { 2910 err = fn(pte++, addr, data); 2911 if (err) 2912 break; 2913 } 2914 } while (addr += PAGE_SIZE, addr != end); 2915 } 2916 *mask |= PGTBL_PTE_MODIFIED; 2917 2918 arch_leave_lazy_mmu_mode(); 2919 2920 if (mm != &init_mm) 2921 pte_unmap_unlock(mapped_pte, ptl); 2922 return err; 2923 } 2924 2925 static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, 2926 unsigned long addr, unsigned long end, 2927 pte_fn_t fn, void *data, bool create, 2928 pgtbl_mod_mask *mask) 2929 { 2930 pmd_t *pmd; 2931 unsigned long next; 2932 int err = 0; 2933 2934 BUG_ON(pud_leaf(*pud)); 2935 2936 if (create) { 2937 pmd = pmd_alloc_track(mm, pud, addr, mask); 2938 if (!pmd) 2939 return -ENOMEM; 2940 } else { 2941 pmd = pmd_offset(pud, addr); 2942 } 2943 do { 2944 next = pmd_addr_end(addr, end); 2945 if (pmd_none(*pmd) && !create) 2946 continue; 2947 if (WARN_ON_ONCE(pmd_leaf(*pmd))) 2948 return -EINVAL; 2949 if (!pmd_none(*pmd) && WARN_ON_ONCE(pmd_bad(*pmd))) { 2950 if (!create) 2951 continue; 2952 pmd_clear_bad(pmd); 2953 } 2954 err = apply_to_pte_range(mm, pmd, addr, next, 2955 fn, data, create, mask); 2956 if (err) 2957 break; 2958 } while (pmd++, addr = next, addr != end); 2959 2960 return err; 2961 } 2962 2963 static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d, 2964 unsigned long addr, unsigned long end, 2965 pte_fn_t fn, void *data, bool create, 2966 pgtbl_mod_mask *mask) 2967 { 2968 pud_t *pud; 2969 unsigned long next; 2970 int err = 0; 2971 2972 if (create) { 2973 pud = pud_alloc_track(mm, p4d, addr, mask); 2974 if (!pud) 2975 return -ENOMEM; 2976 } else { 2977 pud = pud_offset(p4d, addr); 2978 } 2979 do { 2980 next = pud_addr_end(addr, end); 2981 if (pud_none(*pud) && !create) 2982 continue; 2983 if (WARN_ON_ONCE(pud_leaf(*pud))) 2984 return -EINVAL; 2985 if (!pud_none(*pud) && WARN_ON_ONCE(pud_bad(*pud))) { 2986 if (!create) 2987 continue; 2988 pud_clear_bad(pud); 2989 } 2990 err = apply_to_pmd_range(mm, pud, addr, next, 2991 fn, data, create, mask); 2992 if (err) 2993 break; 2994 } while (pud++, addr = next, addr != end); 2995 2996 return err; 2997 } 2998 2999 static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd, 3000 unsigned long addr, unsigned long end, 3001 pte_fn_t fn, void *data, bool create, 3002 pgtbl_mod_mask *mask) 3003 { 3004 p4d_t *p4d; 3005 unsigned long next; 3006 int err = 0; 3007 3008 if (create) { 3009 p4d = p4d_alloc_track(mm, pgd, addr, mask); 3010 if (!p4d) 3011 return -ENOMEM; 3012 } else { 3013 p4d = p4d_offset(pgd, addr); 3014 } 3015 do { 3016 next = p4d_addr_end(addr, end); 3017 if (p4d_none(*p4d) && !create) 3018 continue; 3019 if (WARN_ON_ONCE(p4d_leaf(*p4d))) 3020 return -EINVAL; 3021 if (!p4d_none(*p4d) && WARN_ON_ONCE(p4d_bad(*p4d))) { 3022 if (!create) 3023 continue; 3024 p4d_clear_bad(p4d); 3025 } 3026 err = apply_to_pud_range(mm, p4d, addr, next, 3027 fn, data, create, mask); 3028 if (err) 3029 break; 3030 } while (p4d++, addr = next, addr != end); 3031 3032 return err; 3033 } 3034 3035 static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr, 3036 unsigned long size, pte_fn_t fn, 3037 void *data, bool create) 3038 { 3039 pgd_t *pgd; 3040 unsigned long start = addr, next; 3041 unsigned long end = addr + size; 3042 pgtbl_mod_mask mask = 0; 3043 int err = 0; 3044 3045 if (WARN_ON(addr >= end)) 3046 return -EINVAL; 3047 3048 pgd = pgd_offset(mm, addr); 3049 do { 3050 next = pgd_addr_end(addr, end); 3051 if (pgd_none(*pgd) && !create) 3052 continue; 3053 if (WARN_ON_ONCE(pgd_leaf(*pgd))) { 3054 err = -EINVAL; 3055 break; 3056 } 3057 if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) { 3058 if (!create) 3059 continue; 3060 pgd_clear_bad(pgd); 3061 } 3062 err = apply_to_p4d_range(mm, pgd, addr, next, 3063 fn, data, create, &mask); 3064 if (err) 3065 break; 3066 } while (pgd++, addr = next, addr != end); 3067 3068 if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 3069 arch_sync_kernel_mappings(start, start + size); 3070 3071 return err; 3072 } 3073 3074 /* 3075 * Scan a region of virtual memory, filling in page tables as necessary 3076 * and calling a provided function on each leaf page table. 3077 */ 3078 int apply_to_page_range(struct mm_struct *mm, unsigned long addr, 3079 unsigned long size, pte_fn_t fn, void *data) 3080 { 3081 return __apply_to_page_range(mm, addr, size, fn, data, true); 3082 } 3083 EXPORT_SYMBOL_GPL(apply_to_page_range); 3084 3085 /* 3086 * Scan a region of virtual memory, calling a provided function on 3087 * each leaf page table where it exists. 3088 * 3089 * Unlike apply_to_page_range, this does _not_ fill in page tables 3090 * where they are absent. 3091 */ 3092 int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr, 3093 unsigned long size, pte_fn_t fn, void *data) 3094 { 3095 return __apply_to_page_range(mm, addr, size, fn, data, false); 3096 } 3097 3098 /* 3099 * handle_pte_fault chooses page fault handler according to an entry which was 3100 * read non-atomically. Before making any commitment, on those architectures 3101 * or configurations (e.g. i386 with PAE) which might give a mix of unmatched 3102 * parts, do_swap_page must check under lock before unmapping the pte and 3103 * proceeding (but do_wp_page is only called after already making such a check; 3104 * and do_anonymous_page can safely check later on). 3105 */ 3106 static inline int pte_unmap_same(struct vm_fault *vmf) 3107 { 3108 int same = 1; 3109 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION) 3110 if (sizeof(pte_t) > sizeof(unsigned long)) { 3111 spin_lock(vmf->ptl); 3112 same = pte_same(ptep_get(vmf->pte), vmf->orig_pte); 3113 spin_unlock(vmf->ptl); 3114 } 3115 #endif 3116 pte_unmap(vmf->pte); 3117 vmf->pte = NULL; 3118 return same; 3119 } 3120 3121 /* 3122 * Return: 3123 * 0: copied succeeded 3124 * -EHWPOISON: copy failed due to hwpoison in source page 3125 * -EAGAIN: copied failed (some other reason) 3126 */ 3127 static inline int __wp_page_copy_user(struct page *dst, struct page *src, 3128 struct vm_fault *vmf) 3129 { 3130 int ret; 3131 void *kaddr; 3132 void __user *uaddr; 3133 struct vm_area_struct *vma = vmf->vma; 3134 struct mm_struct *mm = vma->vm_mm; 3135 unsigned long addr = vmf->address; 3136 3137 if (likely(src)) { 3138 if (copy_mc_user_highpage(dst, src, addr, vma)) 3139 return -EHWPOISON; 3140 return 0; 3141 } 3142 3143 /* 3144 * If the source page was a PFN mapping, we don't have 3145 * a "struct page" for it. We do a best-effort copy by 3146 * just copying from the original user address. If that 3147 * fails, we just zero-fill it. Live with it. 3148 */ 3149 kaddr = kmap_local_page(dst); 3150 pagefault_disable(); 3151 uaddr = (void __user *)(addr & PAGE_MASK); 3152 3153 /* 3154 * On architectures with software "accessed" bits, we would 3155 * take a double page fault, so mark it accessed here. 3156 */ 3157 vmf->pte = NULL; 3158 if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) { 3159 pte_t entry; 3160 3161 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); 3162 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { 3163 /* 3164 * Other thread has already handled the fault 3165 * and update local tlb only 3166 */ 3167 if (vmf->pte) 3168 update_mmu_tlb(vma, addr, vmf->pte); 3169 ret = -EAGAIN; 3170 goto pte_unlock; 3171 } 3172 3173 entry = pte_mkyoung(vmf->orig_pte); 3174 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0)) 3175 update_mmu_cache_range(vmf, vma, addr, vmf->pte, 1); 3176 } 3177 3178 /* 3179 * This really shouldn't fail, because the page is there 3180 * in the page tables. But it might just be unreadable, 3181 * in which case we just give up and fill the result with 3182 * zeroes. 3183 */ 3184 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) { 3185 if (vmf->pte) 3186 goto warn; 3187 3188 /* Re-validate under PTL if the page is still mapped */ 3189 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); 3190 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { 3191 /* The PTE changed under us, update local tlb */ 3192 if (vmf->pte) 3193 update_mmu_tlb(vma, addr, vmf->pte); 3194 ret = -EAGAIN; 3195 goto pte_unlock; 3196 } 3197 3198 /* 3199 * The same page can be mapped back since last copy attempt. 3200 * Try to copy again under PTL. 3201 */ 3202 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) { 3203 /* 3204 * Give a warn in case there can be some obscure 3205 * use-case 3206 */ 3207 warn: 3208 WARN_ON_ONCE(1); 3209 clear_page(kaddr); 3210 } 3211 } 3212 3213 ret = 0; 3214 3215 pte_unlock: 3216 if (vmf->pte) 3217 pte_unmap_unlock(vmf->pte, vmf->ptl); 3218 pagefault_enable(); 3219 kunmap_local(kaddr); 3220 flush_dcache_page(dst); 3221 3222 return ret; 3223 } 3224 3225 static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma) 3226 { 3227 struct file *vm_file = vma->vm_file; 3228 3229 if (vm_file) 3230 return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO; 3231 3232 /* 3233 * Special mappings (e.g. VDSO) do not have any file so fake 3234 * a default GFP_KERNEL for them. 3235 */ 3236 return GFP_KERNEL; 3237 } 3238 3239 /* 3240 * Notify the address space that the page is about to become writable so that 3241 * it can prohibit this or wait for the page to get into an appropriate state. 3242 * 3243 * We do this without the lock held, so that it can sleep if it needs to. 3244 */ 3245 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf, struct folio *folio) 3246 { 3247 vm_fault_t ret; 3248 unsigned int old_flags = vmf->flags; 3249 3250 vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; 3251 3252 if (vmf->vma->vm_file && 3253 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host)) 3254 return VM_FAULT_SIGBUS; 3255 3256 ret = vmf->vma->vm_ops->page_mkwrite(vmf); 3257 /* Restore original flags so that caller is not surprised */ 3258 vmf->flags = old_flags; 3259 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) 3260 return ret; 3261 if (unlikely(!(ret & VM_FAULT_LOCKED))) { 3262 folio_lock(folio); 3263 if (!folio->mapping) { 3264 folio_unlock(folio); 3265 return 0; /* retry */ 3266 } 3267 ret |= VM_FAULT_LOCKED; 3268 } else 3269 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 3270 return ret; 3271 } 3272 3273 /* 3274 * Handle dirtying of a page in shared file mapping on a write fault. 3275 * 3276 * The function expects the page to be locked and unlocks it. 3277 */ 3278 static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf) 3279 { 3280 struct vm_area_struct *vma = vmf->vma; 3281 struct address_space *mapping; 3282 struct folio *folio = page_folio(vmf->page); 3283 bool dirtied; 3284 bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite; 3285 3286 dirtied = folio_mark_dirty(folio); 3287 VM_BUG_ON_FOLIO(folio_test_anon(folio), folio); 3288 /* 3289 * Take a local copy of the address_space - folio.mapping may be zeroed 3290 * by truncate after folio_unlock(). The address_space itself remains 3291 * pinned by vma->vm_file's reference. We rely on folio_unlock()'s 3292 * release semantics to prevent the compiler from undoing this copying. 3293 */ 3294 mapping = folio_raw_mapping(folio); 3295 folio_unlock(folio); 3296 3297 if (!page_mkwrite) 3298 file_update_time(vma->vm_file); 3299 3300 /* 3301 * Throttle page dirtying rate down to writeback speed. 3302 * 3303 * mapping may be NULL here because some device drivers do not 3304 * set page.mapping but still dirty their pages 3305 * 3306 * Drop the mmap_lock before waiting on IO, if we can. The file 3307 * is pinning the mapping, as per above. 3308 */ 3309 if ((dirtied || page_mkwrite) && mapping) { 3310 struct file *fpin; 3311 3312 fpin = maybe_unlock_mmap_for_io(vmf, NULL); 3313 balance_dirty_pages_ratelimited(mapping); 3314 if (fpin) { 3315 fput(fpin); 3316 return VM_FAULT_COMPLETED; 3317 } 3318 } 3319 3320 return 0; 3321 } 3322 3323 /* 3324 * Handle write page faults for pages that can be reused in the current vma 3325 * 3326 * This can happen either due to the mapping being with the VM_SHARED flag, 3327 * or due to us being the last reference standing to the page. In either 3328 * case, all we need to do here is to mark the page as writable and update 3329 * any related book-keeping. 3330 */ 3331 static inline void wp_page_reuse(struct vm_fault *vmf, struct folio *folio) 3332 __releases(vmf->ptl) 3333 { 3334 struct vm_area_struct *vma = vmf->vma; 3335 pte_t entry; 3336 3337 VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE)); 3338 VM_WARN_ON(is_zero_pfn(pte_pfn(vmf->orig_pte))); 3339 3340 if (folio) { 3341 VM_BUG_ON(folio_test_anon(folio) && 3342 !PageAnonExclusive(vmf->page)); 3343 /* 3344 * Clear the folio's cpupid information as the existing 3345 * information potentially belongs to a now completely 3346 * unrelated process. 3347 */ 3348 folio_xchg_last_cpupid(folio, (1 << LAST_CPUPID_SHIFT) - 1); 3349 } 3350 3351 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); 3352 entry = pte_mkyoung(vmf->orig_pte); 3353 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 3354 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) 3355 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); 3356 pte_unmap_unlock(vmf->pte, vmf->ptl); 3357 count_vm_event(PGREUSE); 3358 } 3359 3360 /* 3361 * We could add a bitflag somewhere, but for now, we know that all 3362 * vm_ops that have a ->map_pages have been audited and don't need 3363 * the mmap_lock to be held. 3364 */ 3365 static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf) 3366 { 3367 struct vm_area_struct *vma = vmf->vma; 3368 3369 if (vma->vm_ops->map_pages || !(vmf->flags & FAULT_FLAG_VMA_LOCK)) 3370 return 0; 3371 vma_end_read(vma); 3372 return VM_FAULT_RETRY; 3373 } 3374 3375 /** 3376 * __vmf_anon_prepare - Prepare to handle an anonymous fault. 3377 * @vmf: The vm_fault descriptor passed from the fault handler. 3378 * 3379 * When preparing to insert an anonymous page into a VMA from a 3380 * fault handler, call this function rather than anon_vma_prepare(). 3381 * If this vma does not already have an associated anon_vma and we are 3382 * only protected by the per-VMA lock, the caller must retry with the 3383 * mmap_lock held. __anon_vma_prepare() will look at adjacent VMAs to 3384 * determine if this VMA can share its anon_vma, and that's not safe to 3385 * do with only the per-VMA lock held for this VMA. 3386 * 3387 * Return: 0 if fault handling can proceed. Any other value should be 3388 * returned to the caller. 3389 */ 3390 vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf) 3391 { 3392 struct vm_area_struct *vma = vmf->vma; 3393 vm_fault_t ret = 0; 3394 3395 if (likely(vma->anon_vma)) 3396 return 0; 3397 if (vmf->flags & FAULT_FLAG_VMA_LOCK) { 3398 if (!mmap_read_trylock(vma->vm_mm)) 3399 return VM_FAULT_RETRY; 3400 } 3401 if (__anon_vma_prepare(vma)) 3402 ret = VM_FAULT_OOM; 3403 if (vmf->flags & FAULT_FLAG_VMA_LOCK) 3404 mmap_read_unlock(vma->vm_mm); 3405 return ret; 3406 } 3407 3408 /* 3409 * Handle the case of a page which we actually need to copy to a new page, 3410 * either due to COW or unsharing. 3411 * 3412 * Called with mmap_lock locked and the old page referenced, but 3413 * without the ptl held. 3414 * 3415 * High level logic flow: 3416 * 3417 * - Allocate a page, copy the content of the old page to the new one. 3418 * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc. 3419 * - Take the PTL. If the pte changed, bail out and release the allocated page 3420 * - If the pte is still the way we remember it, update the page table and all 3421 * relevant references. This includes dropping the reference the page-table 3422 * held to the old page, as well as updating the rmap. 3423 * - In any case, unlock the PTL and drop the reference we took to the old page. 3424 */ 3425 static vm_fault_t wp_page_copy(struct vm_fault *vmf) 3426 { 3427 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; 3428 struct vm_area_struct *vma = vmf->vma; 3429 struct mm_struct *mm = vma->vm_mm; 3430 struct folio *old_folio = NULL; 3431 struct folio *new_folio = NULL; 3432 pte_t entry; 3433 int page_copied = 0; 3434 struct mmu_notifier_range range; 3435 vm_fault_t ret; 3436 bool pfn_is_zero; 3437 3438 delayacct_wpcopy_start(); 3439 3440 if (vmf->page) 3441 old_folio = page_folio(vmf->page); 3442 ret = vmf_anon_prepare(vmf); 3443 if (unlikely(ret)) 3444 goto out; 3445 3446 pfn_is_zero = is_zero_pfn(pte_pfn(vmf->orig_pte)); 3447 new_folio = folio_prealloc(mm, vma, vmf->address, pfn_is_zero); 3448 if (!new_folio) 3449 goto oom; 3450 3451 if (!pfn_is_zero) { 3452 int err; 3453 3454 err = __wp_page_copy_user(&new_folio->page, vmf->page, vmf); 3455 if (err) { 3456 /* 3457 * COW failed, if the fault was solved by other, 3458 * it's fine. If not, userspace would re-fault on 3459 * the same address and we will handle the fault 3460 * from the second attempt. 3461 * The -EHWPOISON case will not be retried. 3462 */ 3463 folio_put(new_folio); 3464 if (old_folio) 3465 folio_put(old_folio); 3466 3467 delayacct_wpcopy_end(); 3468 return err == -EHWPOISON ? VM_FAULT_HWPOISON : 0; 3469 } 3470 kmsan_copy_page_meta(&new_folio->page, vmf->page); 3471 } 3472 3473 __folio_mark_uptodate(new_folio); 3474 3475 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, 3476 vmf->address & PAGE_MASK, 3477 (vmf->address & PAGE_MASK) + PAGE_SIZE); 3478 mmu_notifier_invalidate_range_start(&range); 3479 3480 /* 3481 * Re-check the pte - we dropped the lock 3482 */ 3483 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); 3484 if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { 3485 if (old_folio) { 3486 if (!folio_test_anon(old_folio)) { 3487 dec_mm_counter(mm, mm_counter_file(old_folio)); 3488 inc_mm_counter(mm, MM_ANONPAGES); 3489 } 3490 } else { 3491 ksm_might_unmap_zero_page(mm, vmf->orig_pte); 3492 inc_mm_counter(mm, MM_ANONPAGES); 3493 } 3494 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); 3495 entry = mk_pte(&new_folio->page, vma->vm_page_prot); 3496 entry = pte_sw_mkyoung(entry); 3497 if (unlikely(unshare)) { 3498 if (pte_soft_dirty(vmf->orig_pte)) 3499 entry = pte_mksoft_dirty(entry); 3500 if (pte_uffd_wp(vmf->orig_pte)) 3501 entry = pte_mkuffd_wp(entry); 3502 } else { 3503 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 3504 } 3505 3506 /* 3507 * Clear the pte entry and flush it first, before updating the 3508 * pte with the new entry, to keep TLBs on different CPUs in 3509 * sync. This code used to set the new PTE then flush TLBs, but 3510 * that left a window where the new PTE could be loaded into 3511 * some TLBs while the old PTE remains in others. 3512 */ 3513 ptep_clear_flush(vma, vmf->address, vmf->pte); 3514 folio_add_new_anon_rmap(new_folio, vma, vmf->address, RMAP_EXCLUSIVE); 3515 folio_add_lru_vma(new_folio, vma); 3516 BUG_ON(unshare && pte_write(entry)); 3517 set_pte_at(mm, vmf->address, vmf->pte, entry); 3518 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); 3519 if (old_folio) { 3520 /* 3521 * Only after switching the pte to the new page may 3522 * we remove the mapcount here. Otherwise another 3523 * process may come and find the rmap count decremented 3524 * before the pte is switched to the new page, and 3525 * "reuse" the old page writing into it while our pte 3526 * here still points into it and can be read by other 3527 * threads. 3528 * 3529 * The critical issue is to order this 3530 * folio_remove_rmap_pte() with the ptp_clear_flush 3531 * above. Those stores are ordered by (if nothing else,) 3532 * the barrier present in the atomic_add_negative 3533 * in folio_remove_rmap_pte(); 3534 * 3535 * Then the TLB flush in ptep_clear_flush ensures that 3536 * no process can access the old page before the 3537 * decremented mapcount is visible. And the old page 3538 * cannot be reused until after the decremented 3539 * mapcount is visible. So transitively, TLBs to 3540 * old page will be flushed before it can be reused. 3541 */ 3542 folio_remove_rmap_pte(old_folio, vmf->page, vma); 3543 } 3544 3545 /* Free the old page.. */ 3546 new_folio = old_folio; 3547 page_copied = 1; 3548 pte_unmap_unlock(vmf->pte, vmf->ptl); 3549 } else if (vmf->pte) { 3550 update_mmu_tlb(vma, vmf->address, vmf->pte); 3551 pte_unmap_unlock(vmf->pte, vmf->ptl); 3552 } 3553 3554 mmu_notifier_invalidate_range_end(&range); 3555 3556 if (new_folio) 3557 folio_put(new_folio); 3558 if (old_folio) { 3559 if (page_copied) 3560 free_swap_cache(old_folio); 3561 folio_put(old_folio); 3562 } 3563 3564 delayacct_wpcopy_end(); 3565 return 0; 3566 oom: 3567 ret = VM_FAULT_OOM; 3568 out: 3569 if (old_folio) 3570 folio_put(old_folio); 3571 3572 delayacct_wpcopy_end(); 3573 return ret; 3574 } 3575 3576 /** 3577 * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE 3578 * writeable once the page is prepared 3579 * 3580 * @vmf: structure describing the fault 3581 * @folio: the folio of vmf->page 3582 * 3583 * This function handles all that is needed to finish a write page fault in a 3584 * shared mapping due to PTE being read-only once the mapped page is prepared. 3585 * It handles locking of PTE and modifying it. 3586 * 3587 * The function expects the page to be locked or other protection against 3588 * concurrent faults / writeback (such as DAX radix tree locks). 3589 * 3590 * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before 3591 * we acquired PTE lock. 3592 */ 3593 static vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf, struct folio *folio) 3594 { 3595 WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); 3596 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, 3597 &vmf->ptl); 3598 if (!vmf->pte) 3599 return VM_FAULT_NOPAGE; 3600 /* 3601 * We might have raced with another page fault while we released the 3602 * pte_offset_map_lock. 3603 */ 3604 if (!pte_same(ptep_get(vmf->pte), vmf->orig_pte)) { 3605 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); 3606 pte_unmap_unlock(vmf->pte, vmf->ptl); 3607 return VM_FAULT_NOPAGE; 3608 } 3609 wp_page_reuse(vmf, folio); 3610 return 0; 3611 } 3612 3613 /* 3614 * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED 3615 * mapping 3616 */ 3617 static vm_fault_t wp_pfn_shared(struct vm_fault *vmf) 3618 { 3619 struct vm_area_struct *vma = vmf->vma; 3620 3621 if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { 3622 vm_fault_t ret; 3623 3624 pte_unmap_unlock(vmf->pte, vmf->ptl); 3625 ret = vmf_can_call_fault(vmf); 3626 if (ret) 3627 return ret; 3628 3629 vmf->flags |= FAULT_FLAG_MKWRITE; 3630 ret = vma->vm_ops->pfn_mkwrite(vmf); 3631 if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)) 3632 return ret; 3633 return finish_mkwrite_fault(vmf, NULL); 3634 } 3635 wp_page_reuse(vmf, NULL); 3636 return 0; 3637 } 3638 3639 static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio) 3640 __releases(vmf->ptl) 3641 { 3642 struct vm_area_struct *vma = vmf->vma; 3643 vm_fault_t ret = 0; 3644 3645 folio_get(folio); 3646 3647 if (vma->vm_ops && vma->vm_ops->page_mkwrite) { 3648 vm_fault_t tmp; 3649 3650 pte_unmap_unlock(vmf->pte, vmf->ptl); 3651 tmp = vmf_can_call_fault(vmf); 3652 if (tmp) { 3653 folio_put(folio); 3654 return tmp; 3655 } 3656 3657 tmp = do_page_mkwrite(vmf, folio); 3658 if (unlikely(!tmp || (tmp & 3659 (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { 3660 folio_put(folio); 3661 return tmp; 3662 } 3663 tmp = finish_mkwrite_fault(vmf, folio); 3664 if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { 3665 folio_unlock(folio); 3666 folio_put(folio); 3667 return tmp; 3668 } 3669 } else { 3670 wp_page_reuse(vmf, folio); 3671 folio_lock(folio); 3672 } 3673 ret |= fault_dirty_shared_page(vmf); 3674 folio_put(folio); 3675 3676 return ret; 3677 } 3678 3679 static bool wp_can_reuse_anon_folio(struct folio *folio, 3680 struct vm_area_struct *vma) 3681 { 3682 /* 3683 * We could currently only reuse a subpage of a large folio if no 3684 * other subpages of the large folios are still mapped. However, 3685 * let's just consistently not reuse subpages even if we could 3686 * reuse in that scenario, and give back a large folio a bit 3687 * sooner. 3688 */ 3689 if (folio_test_large(folio)) 3690 return false; 3691 3692 /* 3693 * We have to verify under folio lock: these early checks are 3694 * just an optimization to avoid locking the folio and freeing 3695 * the swapcache if there is little hope that we can reuse. 3696 * 3697 * KSM doesn't necessarily raise the folio refcount. 3698 */ 3699 if (folio_test_ksm(folio) || folio_ref_count(folio) > 3) 3700 return false; 3701 if (!folio_test_lru(folio)) 3702 /* 3703 * We cannot easily detect+handle references from 3704 * remote LRU caches or references to LRU folios. 3705 */ 3706 lru_add_drain(); 3707 if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio)) 3708 return false; 3709 if (!folio_trylock(folio)) 3710 return false; 3711 if (folio_test_swapcache(folio)) 3712 folio_free_swap(folio); 3713 if (folio_test_ksm(folio) || folio_ref_count(folio) != 1) { 3714 folio_unlock(folio); 3715 return false; 3716 } 3717 /* 3718 * Ok, we've got the only folio reference from our mapping 3719 * and the folio is locked, it's dark out, and we're wearing 3720 * sunglasses. Hit it. 3721 */ 3722 folio_move_anon_rmap(folio, vma); 3723 folio_unlock(folio); 3724 return true; 3725 } 3726 3727 /* 3728 * This routine handles present pages, when 3729 * * users try to write to a shared page (FAULT_FLAG_WRITE) 3730 * * GUP wants to take a R/O pin on a possibly shared anonymous page 3731 * (FAULT_FLAG_UNSHARE) 3732 * 3733 * It is done by copying the page to a new address and decrementing the 3734 * shared-page counter for the old page. 3735 * 3736 * Note that this routine assumes that the protection checks have been 3737 * done by the caller (the low-level page fault routine in most cases). 3738 * Thus, with FAULT_FLAG_WRITE, we can safely just mark it writable once we've 3739 * done any necessary COW. 3740 * 3741 * In case of FAULT_FLAG_WRITE, we also mark the page dirty at this point even 3742 * though the page will change only once the write actually happens. This 3743 * avoids a few races, and potentially makes it more efficient. 3744 * 3745 * We enter with non-exclusive mmap_lock (to exclude vma changes, 3746 * but allow concurrent faults), with pte both mapped and locked. 3747 * We return with mmap_lock still held, but pte unmapped and unlocked. 3748 */ 3749 static vm_fault_t do_wp_page(struct vm_fault *vmf) 3750 __releases(vmf->ptl) 3751 { 3752 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; 3753 struct vm_area_struct *vma = vmf->vma; 3754 struct folio *folio = NULL; 3755 pte_t pte; 3756 3757 if (likely(!unshare)) { 3758 if (userfaultfd_pte_wp(vma, ptep_get(vmf->pte))) { 3759 if (!userfaultfd_wp_async(vma)) { 3760 pte_unmap_unlock(vmf->pte, vmf->ptl); 3761 return handle_userfault(vmf, VM_UFFD_WP); 3762 } 3763 3764 /* 3765 * Nothing needed (cache flush, TLB invalidations, 3766 * etc.) because we're only removing the uffd-wp bit, 3767 * which is completely invisible to the user. 3768 */ 3769 pte = pte_clear_uffd_wp(ptep_get(vmf->pte)); 3770 3771 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); 3772 /* 3773 * Update this to be prepared for following up CoW 3774 * handling 3775 */ 3776 vmf->orig_pte = pte; 3777 } 3778 3779 /* 3780 * Userfaultfd write-protect can defer flushes. Ensure the TLB 3781 * is flushed in this case before copying. 3782 */ 3783 if (unlikely(userfaultfd_wp(vmf->vma) && 3784 mm_tlb_flush_pending(vmf->vma->vm_mm))) 3785 flush_tlb_page(vmf->vma, vmf->address); 3786 } 3787 3788 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); 3789 3790 if (vmf->page) 3791 folio = page_folio(vmf->page); 3792 3793 /* 3794 * Shared mapping: we are guaranteed to have VM_WRITE and 3795 * FAULT_FLAG_WRITE set at this point. 3796 */ 3797 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { 3798 /* 3799 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a 3800 * VM_PFNMAP VMA. 3801 * 3802 * We should not cow pages in a shared writeable mapping. 3803 * Just mark the pages writable and/or call ops->pfn_mkwrite. 3804 */ 3805 if (!vmf->page) 3806 return wp_pfn_shared(vmf); 3807 return wp_page_shared(vmf, folio); 3808 } 3809 3810 /* 3811 * Private mapping: create an exclusive anonymous page copy if reuse 3812 * is impossible. We might miss VM_WRITE for FOLL_FORCE handling. 3813 * 3814 * If we encounter a page that is marked exclusive, we must reuse 3815 * the page without further checks. 3816 */ 3817 if (folio && folio_test_anon(folio) && 3818 (PageAnonExclusive(vmf->page) || wp_can_reuse_anon_folio(folio, vma))) { 3819 if (!PageAnonExclusive(vmf->page)) 3820 SetPageAnonExclusive(vmf->page); 3821 if (unlikely(unshare)) { 3822 pte_unmap_unlock(vmf->pte, vmf->ptl); 3823 return 0; 3824 } 3825 wp_page_reuse(vmf, folio); 3826 return 0; 3827 } 3828 /* 3829 * Ok, we need to copy. Oh, well.. 3830 */ 3831 if (folio) 3832 folio_get(folio); 3833 3834 pte_unmap_unlock(vmf->pte, vmf->ptl); 3835 #ifdef CONFIG_KSM 3836 if (folio && folio_test_ksm(folio)) 3837 count_vm_event(COW_KSM); 3838 #endif 3839 return wp_page_copy(vmf); 3840 } 3841 3842 static void unmap_mapping_range_vma(struct vm_area_struct *vma, 3843 unsigned long start_addr, unsigned long end_addr, 3844 struct zap_details *details) 3845 { 3846 zap_page_range_single(vma, start_addr, end_addr - start_addr, details); 3847 } 3848 3849 static inline void unmap_mapping_range_tree(struct rb_root_cached *root, 3850 pgoff_t first_index, 3851 pgoff_t last_index, 3852 struct zap_details *details) 3853 { 3854 struct vm_area_struct *vma; 3855 pgoff_t vba, vea, zba, zea; 3856 3857 vma_interval_tree_foreach(vma, root, first_index, last_index) { 3858 vba = vma->vm_pgoff; 3859 vea = vba + vma_pages(vma) - 1; 3860 zba = max(first_index, vba); 3861 zea = min(last_index, vea); 3862 3863 unmap_mapping_range_vma(vma, 3864 ((zba - vba) << PAGE_SHIFT) + vma->vm_start, 3865 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, 3866 details); 3867 } 3868 } 3869 3870 /** 3871 * unmap_mapping_folio() - Unmap single folio from processes. 3872 * @folio: The locked folio to be unmapped. 3873 * 3874 * Unmap this folio from any userspace process which still has it mmaped. 3875 * Typically, for efficiency, the range of nearby pages has already been 3876 * unmapped by unmap_mapping_pages() or unmap_mapping_range(). But once 3877 * truncation or invalidation holds the lock on a folio, it may find that 3878 * the page has been remapped again: and then uses unmap_mapping_folio() 3879 * to unmap it finally. 3880 */ 3881 void unmap_mapping_folio(struct folio *folio) 3882 { 3883 struct address_space *mapping = folio->mapping; 3884 struct zap_details details = { }; 3885 pgoff_t first_index; 3886 pgoff_t last_index; 3887 3888 VM_BUG_ON(!folio_test_locked(folio)); 3889 3890 first_index = folio->index; 3891 last_index = folio_next_index(folio) - 1; 3892 3893 details.even_cows = false; 3894 details.single_folio = folio; 3895 details.zap_flags = ZAP_FLAG_DROP_MARKER; 3896 3897 i_mmap_lock_read(mapping); 3898 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))) 3899 unmap_mapping_range_tree(&mapping->i_mmap, first_index, 3900 last_index, &details); 3901 i_mmap_unlock_read(mapping); 3902 } 3903 3904 /** 3905 * unmap_mapping_pages() - Unmap pages from processes. 3906 * @mapping: The address space containing pages to be unmapped. 3907 * @start: Index of first page to be unmapped. 3908 * @nr: Number of pages to be unmapped. 0 to unmap to end of file. 3909 * @even_cows: Whether to unmap even private COWed pages. 3910 * 3911 * Unmap the pages in this address space from any userspace process which 3912 * has them mmaped. Generally, you want to remove COWed pages as well when 3913 * a file is being truncated, but not when invalidating pages from the page 3914 * cache. 3915 */ 3916 void unmap_mapping_pages(struct address_space *mapping, pgoff_t start, 3917 pgoff_t nr, bool even_cows) 3918 { 3919 struct zap_details details = { }; 3920 pgoff_t first_index = start; 3921 pgoff_t last_index = start + nr - 1; 3922 3923 details.even_cows = even_cows; 3924 if (last_index < first_index) 3925 last_index = ULONG_MAX; 3926 3927 i_mmap_lock_read(mapping); 3928 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))) 3929 unmap_mapping_range_tree(&mapping->i_mmap, first_index, 3930 last_index, &details); 3931 i_mmap_unlock_read(mapping); 3932 } 3933 EXPORT_SYMBOL_GPL(unmap_mapping_pages); 3934 3935 /** 3936 * unmap_mapping_range - unmap the portion of all mmaps in the specified 3937 * address_space corresponding to the specified byte range in the underlying 3938 * file. 3939 * 3940 * @mapping: the address space containing mmaps to be unmapped. 3941 * @holebegin: byte in first page to unmap, relative to the start of 3942 * the underlying file. This will be rounded down to a PAGE_SIZE 3943 * boundary. Note that this is different from truncate_pagecache(), which 3944 * must keep the partial page. In contrast, we must get rid of 3945 * partial pages. 3946 * @holelen: size of prospective hole in bytes. This will be rounded 3947 * up to a PAGE_SIZE boundary. A holelen of zero truncates to the 3948 * end of the file. 3949 * @even_cows: 1 when truncating a file, unmap even private COWed pages; 3950 * but 0 when invalidating pagecache, don't throw away private data. 3951 */ 3952 void unmap_mapping_range(struct address_space *mapping, 3953 loff_t const holebegin, loff_t const holelen, int even_cows) 3954 { 3955 pgoff_t hba = (pgoff_t)(holebegin) >> PAGE_SHIFT; 3956 pgoff_t hlen = ((pgoff_t)(holelen) + PAGE_SIZE - 1) >> PAGE_SHIFT; 3957 3958 /* Check for overflow. */ 3959 if (sizeof(holelen) > sizeof(hlen)) { 3960 long long holeend = 3961 (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; 3962 if (holeend & ~(long long)ULONG_MAX) 3963 hlen = ULONG_MAX - hba + 1; 3964 } 3965 3966 unmap_mapping_pages(mapping, hba, hlen, even_cows); 3967 } 3968 EXPORT_SYMBOL(unmap_mapping_range); 3969 3970 /* 3971 * Restore a potential device exclusive pte to a working pte entry 3972 */ 3973 static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf) 3974 { 3975 struct folio *folio = page_folio(vmf->page); 3976 struct vm_area_struct *vma = vmf->vma; 3977 struct mmu_notifier_range range; 3978 vm_fault_t ret; 3979 3980 /* 3981 * We need a reference to lock the folio because we don't hold 3982 * the PTL so a racing thread can remove the device-exclusive 3983 * entry and unmap it. If the folio is free the entry must 3984 * have been removed already. If it happens to have already 3985 * been re-allocated after being freed all we do is lock and 3986 * unlock it. 3987 */ 3988 if (!folio_try_get(folio)) 3989 return 0; 3990 3991 ret = folio_lock_or_retry(folio, vmf); 3992 if (ret) { 3993 folio_put(folio); 3994 return ret; 3995 } 3996 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, 3997 vma->vm_mm, vmf->address & PAGE_MASK, 3998 (vmf->address & PAGE_MASK) + PAGE_SIZE, NULL); 3999 mmu_notifier_invalidate_range_start(&range); 4000 4001 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, 4002 &vmf->ptl); 4003 if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) 4004 restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte); 4005 4006 if (vmf->pte) 4007 pte_unmap_unlock(vmf->pte, vmf->ptl); 4008 folio_unlock(folio); 4009 folio_put(folio); 4010 4011 mmu_notifier_invalidate_range_end(&range); 4012 return 0; 4013 } 4014 4015 static inline bool should_try_to_free_swap(struct folio *folio, 4016 struct vm_area_struct *vma, 4017 unsigned int fault_flags) 4018 { 4019 if (!folio_test_swapcache(folio)) 4020 return false; 4021 if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) || 4022 folio_test_mlocked(folio)) 4023 return true; 4024 /* 4025 * If we want to map a page that's in the swapcache writable, we 4026 * have to detect via the refcount if we're really the exclusive 4027 * user. Try freeing the swapcache to get rid of the swapcache 4028 * reference only in case it's likely that we'll be the exlusive user. 4029 */ 4030 return (fault_flags & FAULT_FLAG_WRITE) && !folio_test_ksm(folio) && 4031 folio_ref_count(folio) == (1 + folio_nr_pages(folio)); 4032 } 4033 4034 static vm_fault_t pte_marker_clear(struct vm_fault *vmf) 4035 { 4036 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, 4037 vmf->address, &vmf->ptl); 4038 if (!vmf->pte) 4039 return 0; 4040 /* 4041 * Be careful so that we will only recover a special uffd-wp pte into a 4042 * none pte. Otherwise it means the pte could have changed, so retry. 4043 * 4044 * This should also cover the case where e.g. the pte changed 4045 * quickly from a PTE_MARKER_UFFD_WP into PTE_MARKER_POISONED. 4046 * So is_pte_marker() check is not enough to safely drop the pte. 4047 */ 4048 if (pte_same(vmf->orig_pte, ptep_get(vmf->pte))) 4049 pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte); 4050 pte_unmap_unlock(vmf->pte, vmf->ptl); 4051 return 0; 4052 } 4053 4054 static vm_fault_t do_pte_missing(struct vm_fault *vmf) 4055 { 4056 if (vma_is_anonymous(vmf->vma)) 4057 return do_anonymous_page(vmf); 4058 else 4059 return do_fault(vmf); 4060 } 4061 4062 /* 4063 * This is actually a page-missing access, but with uffd-wp special pte 4064 * installed. It means this pte was wr-protected before being unmapped. 4065 */ 4066 static vm_fault_t pte_marker_handle_uffd_wp(struct vm_fault *vmf) 4067 { 4068 /* 4069 * Just in case there're leftover special ptes even after the region 4070 * got unregistered - we can simply clear them. 4071 */ 4072 if (unlikely(!userfaultfd_wp(vmf->vma))) 4073 return pte_marker_clear(vmf); 4074 4075 return do_pte_missing(vmf); 4076 } 4077 4078 static vm_fault_t handle_pte_marker(struct vm_fault *vmf) 4079 { 4080 swp_entry_t entry = pte_to_swp_entry(vmf->orig_pte); 4081 unsigned long marker = pte_marker_get(entry); 4082 4083 /* 4084 * PTE markers should never be empty. If anything weird happened, 4085 * the best thing to do is to kill the process along with its mm. 4086 */ 4087 if (WARN_ON_ONCE(!marker)) 4088 return VM_FAULT_SIGBUS; 4089 4090 /* Higher priority than uffd-wp when data corrupted */ 4091 if (marker & PTE_MARKER_POISONED) 4092 return VM_FAULT_HWPOISON; 4093 4094 /* Hitting a guard page is always a fatal condition. */ 4095 if (marker & PTE_MARKER_GUARD) 4096 return VM_FAULT_SIGSEGV; 4097 4098 if (pte_marker_entry_uffd_wp(entry)) 4099 return pte_marker_handle_uffd_wp(vmf); 4100 4101 /* This is an unknown pte marker */ 4102 return VM_FAULT_SIGBUS; 4103 } 4104 4105 static struct folio *__alloc_swap_folio(struct vm_fault *vmf) 4106 { 4107 struct vm_area_struct *vma = vmf->vma; 4108 struct folio *folio; 4109 swp_entry_t entry; 4110 4111 folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vmf->address); 4112 if (!folio) 4113 return NULL; 4114 4115 entry = pte_to_swp_entry(vmf->orig_pte); 4116 if (mem_cgroup_swapin_charge_folio(folio, vma->vm_mm, 4117 GFP_KERNEL, entry)) { 4118 folio_put(folio); 4119 return NULL; 4120 } 4121 4122 return folio; 4123 } 4124 4125 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4126 static inline int non_swapcache_batch(swp_entry_t entry, int max_nr) 4127 { 4128 struct swap_info_struct *si = swp_swap_info(entry); 4129 pgoff_t offset = swp_offset(entry); 4130 int i; 4131 4132 /* 4133 * While allocating a large folio and doing swap_read_folio, which is 4134 * the case the being faulted pte doesn't have swapcache. We need to 4135 * ensure all PTEs have no cache as well, otherwise, we might go to 4136 * swap devices while the content is in swapcache. 4137 */ 4138 for (i = 0; i < max_nr; i++) { 4139 if ((si->swap_map[offset + i] & SWAP_HAS_CACHE)) 4140 return i; 4141 } 4142 4143 return i; 4144 } 4145 4146 /* 4147 * Check if the PTEs within a range are contiguous swap entries 4148 * and have consistent swapcache, zeromap. 4149 */ 4150 static bool can_swapin_thp(struct vm_fault *vmf, pte_t *ptep, int nr_pages) 4151 { 4152 unsigned long addr; 4153 swp_entry_t entry; 4154 int idx; 4155 pte_t pte; 4156 4157 addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE); 4158 idx = (vmf->address - addr) / PAGE_SIZE; 4159 pte = ptep_get(ptep); 4160 4161 if (!pte_same(pte, pte_move_swp_offset(vmf->orig_pte, -idx))) 4162 return false; 4163 entry = pte_to_swp_entry(pte); 4164 if (swap_pte_batch(ptep, nr_pages, pte) != nr_pages) 4165 return false; 4166 4167 /* 4168 * swap_read_folio() can't handle the case a large folio is hybridly 4169 * from different backends. And they are likely corner cases. Similar 4170 * things might be added once zswap support large folios. 4171 */ 4172 if (unlikely(swap_zeromap_batch(entry, nr_pages, NULL) != nr_pages)) 4173 return false; 4174 if (unlikely(non_swapcache_batch(entry, nr_pages) != nr_pages)) 4175 return false; 4176 4177 return true; 4178 } 4179 4180 static inline unsigned long thp_swap_suitable_orders(pgoff_t swp_offset, 4181 unsigned long addr, 4182 unsigned long orders) 4183 { 4184 int order, nr; 4185 4186 order = highest_order(orders); 4187 4188 /* 4189 * To swap in a THP with nr pages, we require that its first swap_offset 4190 * is aligned with that number, as it was when the THP was swapped out. 4191 * This helps filter out most invalid entries. 4192 */ 4193 while (orders) { 4194 nr = 1 << order; 4195 if ((addr >> PAGE_SHIFT) % nr == swp_offset % nr) 4196 break; 4197 order = next_order(&orders, order); 4198 } 4199 4200 return orders; 4201 } 4202 4203 static struct folio *alloc_swap_folio(struct vm_fault *vmf) 4204 { 4205 struct vm_area_struct *vma = vmf->vma; 4206 unsigned long orders; 4207 struct folio *folio; 4208 unsigned long addr; 4209 swp_entry_t entry; 4210 spinlock_t *ptl; 4211 pte_t *pte; 4212 gfp_t gfp; 4213 int order; 4214 4215 /* 4216 * If uffd is active for the vma we need per-page fault fidelity to 4217 * maintain the uffd semantics. 4218 */ 4219 if (unlikely(userfaultfd_armed(vma))) 4220 goto fallback; 4221 4222 /* 4223 * A large swapped out folio could be partially or fully in zswap. We 4224 * lack handling for such cases, so fallback to swapping in order-0 4225 * folio. 4226 */ 4227 if (!zswap_never_enabled()) 4228 goto fallback; 4229 4230 entry = pte_to_swp_entry(vmf->orig_pte); 4231 /* 4232 * Get a list of all the (large) orders below PMD_ORDER that are enabled 4233 * and suitable for swapping THP. 4234 */ 4235 orders = thp_vma_allowable_orders(vma, vma->vm_flags, 4236 TVA_IN_PF | TVA_ENFORCE_SYSFS, BIT(PMD_ORDER) - 1); 4237 orders = thp_vma_suitable_orders(vma, vmf->address, orders); 4238 orders = thp_swap_suitable_orders(swp_offset(entry), 4239 vmf->address, orders); 4240 4241 if (!orders) 4242 goto fallback; 4243 4244 pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, 4245 vmf->address & PMD_MASK, &ptl); 4246 if (unlikely(!pte)) 4247 goto fallback; 4248 4249 /* 4250 * For do_swap_page, find the highest order where the aligned range is 4251 * completely swap entries with contiguous swap offsets. 4252 */ 4253 order = highest_order(orders); 4254 while (orders) { 4255 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); 4256 if (can_swapin_thp(vmf, pte + pte_index(addr), 1 << order)) 4257 break; 4258 order = next_order(&orders, order); 4259 } 4260 4261 pte_unmap_unlock(pte, ptl); 4262 4263 /* Try allocating the highest of the remaining orders. */ 4264 gfp = vma_thp_gfp_mask(vma); 4265 while (orders) { 4266 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); 4267 folio = vma_alloc_folio(gfp, order, vma, addr); 4268 if (folio) { 4269 if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm, 4270 gfp, entry)) 4271 return folio; 4272 count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK_CHARGE); 4273 folio_put(folio); 4274 } 4275 count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK); 4276 order = next_order(&orders, order); 4277 } 4278 4279 fallback: 4280 return __alloc_swap_folio(vmf); 4281 } 4282 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ 4283 static struct folio *alloc_swap_folio(struct vm_fault *vmf) 4284 { 4285 return __alloc_swap_folio(vmf); 4286 } 4287 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 4288 4289 static DECLARE_WAIT_QUEUE_HEAD(swapcache_wq); 4290 4291 /* 4292 * We enter with non-exclusive mmap_lock (to exclude vma changes, 4293 * but allow concurrent faults), and pte mapped but not yet locked. 4294 * We return with pte unmapped and unlocked. 4295 * 4296 * We return with the mmap_lock locked or unlocked in the same cases 4297 * as does filemap_fault(). 4298 */ 4299 vm_fault_t do_swap_page(struct vm_fault *vmf) 4300 { 4301 struct vm_area_struct *vma = vmf->vma; 4302 struct folio *swapcache, *folio = NULL; 4303 DECLARE_WAITQUEUE(wait, current); 4304 struct page *page; 4305 struct swap_info_struct *si = NULL; 4306 rmap_t rmap_flags = RMAP_NONE; 4307 bool need_clear_cache = false; 4308 bool exclusive = false; 4309 swp_entry_t entry; 4310 pte_t pte; 4311 vm_fault_t ret = 0; 4312 void *shadow = NULL; 4313 int nr_pages; 4314 unsigned long page_idx; 4315 unsigned long address; 4316 pte_t *ptep; 4317 4318 if (!pte_unmap_same(vmf)) 4319 goto out; 4320 4321 entry = pte_to_swp_entry(vmf->orig_pte); 4322 if (unlikely(non_swap_entry(entry))) { 4323 if (is_migration_entry(entry)) { 4324 migration_entry_wait(vma->vm_mm, vmf->pmd, 4325 vmf->address); 4326 } else if (is_device_exclusive_entry(entry)) { 4327 vmf->page = pfn_swap_entry_to_page(entry); 4328 ret = remove_device_exclusive_entry(vmf); 4329 } else if (is_device_private_entry(entry)) { 4330 if (vmf->flags & FAULT_FLAG_VMA_LOCK) { 4331 /* 4332 * migrate_to_ram is not yet ready to operate 4333 * under VMA lock. 4334 */ 4335 vma_end_read(vma); 4336 ret = VM_FAULT_RETRY; 4337 goto out; 4338 } 4339 4340 vmf->page = pfn_swap_entry_to_page(entry); 4341 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 4342 vmf->address, &vmf->ptl); 4343 if (unlikely(!vmf->pte || 4344 !pte_same(ptep_get(vmf->pte), 4345 vmf->orig_pte))) 4346 goto unlock; 4347 4348 /* 4349 * Get a page reference while we know the page can't be 4350 * freed. 4351 */ 4352 if (trylock_page(vmf->page)) { 4353 get_page(vmf->page); 4354 pte_unmap_unlock(vmf->pte, vmf->ptl); 4355 ret = vmf->page->pgmap->ops->migrate_to_ram(vmf); 4356 unlock_page(vmf->page); 4357 put_page(vmf->page); 4358 } else { 4359 pte_unmap_unlock(vmf->pte, vmf->ptl); 4360 } 4361 } else if (is_hwpoison_entry(entry)) { 4362 ret = VM_FAULT_HWPOISON; 4363 } else if (is_pte_marker_entry(entry)) { 4364 ret = handle_pte_marker(vmf); 4365 } else { 4366 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL); 4367 ret = VM_FAULT_SIGBUS; 4368 } 4369 goto out; 4370 } 4371 4372 /* Prevent swapoff from happening to us. */ 4373 si = get_swap_device(entry); 4374 if (unlikely(!si)) 4375 goto out; 4376 4377 folio = swap_cache_get_folio(entry, vma, vmf->address); 4378 if (folio) 4379 page = folio_file_page(folio, swp_offset(entry)); 4380 swapcache = folio; 4381 4382 if (!folio) { 4383 if (data_race(si->flags & SWP_SYNCHRONOUS_IO) && 4384 __swap_count(entry) == 1) { 4385 /* skip swapcache */ 4386 folio = alloc_swap_folio(vmf); 4387 if (folio) { 4388 __folio_set_locked(folio); 4389 __folio_set_swapbacked(folio); 4390 4391 nr_pages = folio_nr_pages(folio); 4392 if (folio_test_large(folio)) 4393 entry.val = ALIGN_DOWN(entry.val, nr_pages); 4394 /* 4395 * Prevent parallel swapin from proceeding with 4396 * the cache flag. Otherwise, another thread 4397 * may finish swapin first, free the entry, and 4398 * swapout reusing the same entry. It's 4399 * undetectable as pte_same() returns true due 4400 * to entry reuse. 4401 */ 4402 if (swapcache_prepare(entry, nr_pages)) { 4403 /* 4404 * Relax a bit to prevent rapid 4405 * repeated page faults. 4406 */ 4407 add_wait_queue(&swapcache_wq, &wait); 4408 schedule_timeout_uninterruptible(1); 4409 remove_wait_queue(&swapcache_wq, &wait); 4410 goto out_page; 4411 } 4412 need_clear_cache = true; 4413 4414 mem_cgroup_swapin_uncharge_swap(entry, nr_pages); 4415 4416 shadow = get_shadow_from_swap_cache(entry); 4417 if (shadow) 4418 workingset_refault(folio, shadow); 4419 4420 folio_add_lru(folio); 4421 4422 /* To provide entry to swap_read_folio() */ 4423 folio->swap = entry; 4424 swap_read_folio(folio, NULL); 4425 folio->private = NULL; 4426 } 4427 } else { 4428 folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, 4429 vmf); 4430 swapcache = folio; 4431 } 4432 4433 if (!folio) { 4434 /* 4435 * Back out if somebody else faulted in this pte 4436 * while we released the pte lock. 4437 */ 4438 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 4439 vmf->address, &vmf->ptl); 4440 if (likely(vmf->pte && 4441 pte_same(ptep_get(vmf->pte), vmf->orig_pte))) 4442 ret = VM_FAULT_OOM; 4443 goto unlock; 4444 } 4445 4446 /* Had to read the page from swap area: Major fault */ 4447 ret = VM_FAULT_MAJOR; 4448 count_vm_event(PGMAJFAULT); 4449 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); 4450 page = folio_file_page(folio, swp_offset(entry)); 4451 } else if (PageHWPoison(page)) { 4452 /* 4453 * hwpoisoned dirty swapcache pages are kept for killing 4454 * owner processes (which may be unknown at hwpoison time) 4455 */ 4456 ret = VM_FAULT_HWPOISON; 4457 goto out_release; 4458 } 4459 4460 ret |= folio_lock_or_retry(folio, vmf); 4461 if (ret & VM_FAULT_RETRY) 4462 goto out_release; 4463 4464 if (swapcache) { 4465 /* 4466 * Make sure folio_free_swap() or swapoff did not release the 4467 * swapcache from under us. The page pin, and pte_same test 4468 * below, are not enough to exclude that. Even if it is still 4469 * swapcache, we need to check that the page's swap has not 4470 * changed. 4471 */ 4472 if (unlikely(!folio_test_swapcache(folio) || 4473 page_swap_entry(page).val != entry.val)) 4474 goto out_page; 4475 4476 /* 4477 * KSM sometimes has to copy on read faults, for example, if 4478 * page->index of !PageKSM() pages would be nonlinear inside the 4479 * anon VMA -- PageKSM() is lost on actual swapout. 4480 */ 4481 folio = ksm_might_need_to_copy(folio, vma, vmf->address); 4482 if (unlikely(!folio)) { 4483 ret = VM_FAULT_OOM; 4484 folio = swapcache; 4485 goto out_page; 4486 } else if (unlikely(folio == ERR_PTR(-EHWPOISON))) { 4487 ret = VM_FAULT_HWPOISON; 4488 folio = swapcache; 4489 goto out_page; 4490 } 4491 if (folio != swapcache) 4492 page = folio_page(folio, 0); 4493 4494 /* 4495 * If we want to map a page that's in the swapcache writable, we 4496 * have to detect via the refcount if we're really the exclusive 4497 * owner. Try removing the extra reference from the local LRU 4498 * caches if required. 4499 */ 4500 if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache && 4501 !folio_test_ksm(folio) && !folio_test_lru(folio)) 4502 lru_add_drain(); 4503 } 4504 4505 folio_throttle_swaprate(folio, GFP_KERNEL); 4506 4507 /* 4508 * Back out if somebody else already faulted in this pte. 4509 */ 4510 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, 4511 &vmf->ptl); 4512 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) 4513 goto out_nomap; 4514 4515 if (unlikely(!folio_test_uptodate(folio))) { 4516 ret = VM_FAULT_SIGBUS; 4517 goto out_nomap; 4518 } 4519 4520 /* allocated large folios for SWP_SYNCHRONOUS_IO */ 4521 if (folio_test_large(folio) && !folio_test_swapcache(folio)) { 4522 unsigned long nr = folio_nr_pages(folio); 4523 unsigned long folio_start = ALIGN_DOWN(vmf->address, nr * PAGE_SIZE); 4524 unsigned long idx = (vmf->address - folio_start) / PAGE_SIZE; 4525 pte_t *folio_ptep = vmf->pte - idx; 4526 pte_t folio_pte = ptep_get(folio_ptep); 4527 4528 if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) || 4529 swap_pte_batch(folio_ptep, nr, folio_pte) != nr) 4530 goto out_nomap; 4531 4532 page_idx = idx; 4533 address = folio_start; 4534 ptep = folio_ptep; 4535 goto check_folio; 4536 } 4537 4538 nr_pages = 1; 4539 page_idx = 0; 4540 address = vmf->address; 4541 ptep = vmf->pte; 4542 if (folio_test_large(folio) && folio_test_swapcache(folio)) { 4543 int nr = folio_nr_pages(folio); 4544 unsigned long idx = folio_page_idx(folio, page); 4545 unsigned long folio_start = address - idx * PAGE_SIZE; 4546 unsigned long folio_end = folio_start + nr * PAGE_SIZE; 4547 pte_t *folio_ptep; 4548 pte_t folio_pte; 4549 4550 if (unlikely(folio_start < max(address & PMD_MASK, vma->vm_start))) 4551 goto check_folio; 4552 if (unlikely(folio_end > pmd_addr_end(address, vma->vm_end))) 4553 goto check_folio; 4554 4555 folio_ptep = vmf->pte - idx; 4556 folio_pte = ptep_get(folio_ptep); 4557 if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) || 4558 swap_pte_batch(folio_ptep, nr, folio_pte) != nr) 4559 goto check_folio; 4560 4561 page_idx = idx; 4562 address = folio_start; 4563 ptep = folio_ptep; 4564 nr_pages = nr; 4565 entry = folio->swap; 4566 page = &folio->page; 4567 } 4568 4569 check_folio: 4570 /* 4571 * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte 4572 * must never point at an anonymous page in the swapcache that is 4573 * PG_anon_exclusive. Sanity check that this holds and especially, that 4574 * no filesystem set PG_mappedtodisk on a page in the swapcache. Sanity 4575 * check after taking the PT lock and making sure that nobody 4576 * concurrently faulted in this page and set PG_anon_exclusive. 4577 */ 4578 BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio)); 4579 BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page)); 4580 4581 /* 4582 * Check under PT lock (to protect against concurrent fork() sharing 4583 * the swap entry concurrently) for certainly exclusive pages. 4584 */ 4585 if (!folio_test_ksm(folio)) { 4586 exclusive = pte_swp_exclusive(vmf->orig_pte); 4587 if (folio != swapcache) { 4588 /* 4589 * We have a fresh page that is not exposed to the 4590 * swapcache -> certainly exclusive. 4591 */ 4592 exclusive = true; 4593 } else if (exclusive && folio_test_writeback(folio) && 4594 data_race(si->flags & SWP_STABLE_WRITES)) { 4595 /* 4596 * This is tricky: not all swap backends support 4597 * concurrent page modifications while under writeback. 4598 * 4599 * So if we stumble over such a page in the swapcache 4600 * we must not set the page exclusive, otherwise we can 4601 * map it writable without further checks and modify it 4602 * while still under writeback. 4603 * 4604 * For these problematic swap backends, simply drop the 4605 * exclusive marker: this is perfectly fine as we start 4606 * writeback only if we fully unmapped the page and 4607 * there are no unexpected references on the page after 4608 * unmapping succeeded. After fully unmapped, no 4609 * further GUP references (FOLL_GET and FOLL_PIN) can 4610 * appear, so dropping the exclusive marker and mapping 4611 * it only R/O is fine. 4612 */ 4613 exclusive = false; 4614 } 4615 } 4616 4617 /* 4618 * Some architectures may have to restore extra metadata to the page 4619 * when reading from swap. This metadata may be indexed by swap entry 4620 * so this must be called before swap_free(). 4621 */ 4622 arch_swap_restore(folio_swap(entry, folio), folio); 4623 4624 /* 4625 * Remove the swap entry and conditionally try to free up the swapcache. 4626 * We're already holding a reference on the page but haven't mapped it 4627 * yet. 4628 */ 4629 swap_free_nr(entry, nr_pages); 4630 if (should_try_to_free_swap(folio, vma, vmf->flags)) 4631 folio_free_swap(folio); 4632 4633 add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages); 4634 add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages); 4635 pte = mk_pte(page, vma->vm_page_prot); 4636 if (pte_swp_soft_dirty(vmf->orig_pte)) 4637 pte = pte_mksoft_dirty(pte); 4638 if (pte_swp_uffd_wp(vmf->orig_pte)) 4639 pte = pte_mkuffd_wp(pte); 4640 4641 /* 4642 * Same logic as in do_wp_page(); however, optimize for pages that are 4643 * certainly not shared either because we just allocated them without 4644 * exposing them to the swapcache or because the swap entry indicates 4645 * exclusivity. 4646 */ 4647 if (!folio_test_ksm(folio) && 4648 (exclusive || folio_ref_count(folio) == 1)) { 4649 if ((vma->vm_flags & VM_WRITE) && !userfaultfd_pte_wp(vma, pte) && 4650 !pte_needs_soft_dirty_wp(vma, pte)) { 4651 pte = pte_mkwrite(pte, vma); 4652 if (vmf->flags & FAULT_FLAG_WRITE) { 4653 pte = pte_mkdirty(pte); 4654 vmf->flags &= ~FAULT_FLAG_WRITE; 4655 } 4656 } 4657 rmap_flags |= RMAP_EXCLUSIVE; 4658 } 4659 folio_ref_add(folio, nr_pages - 1); 4660 flush_icache_pages(vma, page, nr_pages); 4661 vmf->orig_pte = pte_advance_pfn(pte, page_idx); 4662 4663 /* ksm created a completely new copy */ 4664 if (unlikely(folio != swapcache && swapcache)) { 4665 folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE); 4666 folio_add_lru_vma(folio, vma); 4667 } else if (!folio_test_anon(folio)) { 4668 /* 4669 * We currently only expect small !anon folios which are either 4670 * fully exclusive or fully shared, or new allocated large 4671 * folios which are fully exclusive. If we ever get large 4672 * folios within swapcache here, we have to be careful. 4673 */ 4674 VM_WARN_ON_ONCE(folio_test_large(folio) && folio_test_swapcache(folio)); 4675 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); 4676 folio_add_new_anon_rmap(folio, vma, address, rmap_flags); 4677 } else { 4678 folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address, 4679 rmap_flags); 4680 } 4681 4682 VM_BUG_ON(!folio_test_anon(folio) || 4683 (pte_write(pte) && !PageAnonExclusive(page))); 4684 set_ptes(vma->vm_mm, address, ptep, pte, nr_pages); 4685 arch_do_swap_page_nr(vma->vm_mm, vma, address, 4686 pte, pte, nr_pages); 4687 4688 folio_unlock(folio); 4689 if (folio != swapcache && swapcache) { 4690 /* 4691 * Hold the lock to avoid the swap entry to be reused 4692 * until we take the PT lock for the pte_same() check 4693 * (to avoid false positives from pte_same). For 4694 * further safety release the lock after the swap_free 4695 * so that the swap count won't change under a 4696 * parallel locked swapcache. 4697 */ 4698 folio_unlock(swapcache); 4699 folio_put(swapcache); 4700 } 4701 4702 if (vmf->flags & FAULT_FLAG_WRITE) { 4703 ret |= do_wp_page(vmf); 4704 if (ret & VM_FAULT_ERROR) 4705 ret &= VM_FAULT_ERROR; 4706 goto out; 4707 } 4708 4709 /* No need to invalidate - it was non-present before */ 4710 update_mmu_cache_range(vmf, vma, address, ptep, nr_pages); 4711 unlock: 4712 if (vmf->pte) 4713 pte_unmap_unlock(vmf->pte, vmf->ptl); 4714 out: 4715 /* Clear the swap cache pin for direct swapin after PTL unlock */ 4716 if (need_clear_cache) { 4717 swapcache_clear(si, entry, nr_pages); 4718 if (waitqueue_active(&swapcache_wq)) 4719 wake_up(&swapcache_wq); 4720 } 4721 if (si) 4722 put_swap_device(si); 4723 return ret; 4724 out_nomap: 4725 if (vmf->pte) 4726 pte_unmap_unlock(vmf->pte, vmf->ptl); 4727 out_page: 4728 folio_unlock(folio); 4729 out_release: 4730 folio_put(folio); 4731 if (folio != swapcache && swapcache) { 4732 folio_unlock(swapcache); 4733 folio_put(swapcache); 4734 } 4735 if (need_clear_cache) { 4736 swapcache_clear(si, entry, nr_pages); 4737 if (waitqueue_active(&swapcache_wq)) 4738 wake_up(&swapcache_wq); 4739 } 4740 if (si) 4741 put_swap_device(si); 4742 return ret; 4743 } 4744 4745 static bool pte_range_none(pte_t *pte, int nr_pages) 4746 { 4747 int i; 4748 4749 for (i = 0; i < nr_pages; i++) { 4750 if (!pte_none(ptep_get_lockless(pte + i))) 4751 return false; 4752 } 4753 4754 return true; 4755 } 4756 4757 static struct folio *alloc_anon_folio(struct vm_fault *vmf) 4758 { 4759 struct vm_area_struct *vma = vmf->vma; 4760 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4761 unsigned long orders; 4762 struct folio *folio; 4763 unsigned long addr; 4764 pte_t *pte; 4765 gfp_t gfp; 4766 int order; 4767 4768 /* 4769 * If uffd is active for the vma we need per-page fault fidelity to 4770 * maintain the uffd semantics. 4771 */ 4772 if (unlikely(userfaultfd_armed(vma))) 4773 goto fallback; 4774 4775 /* 4776 * Get a list of all the (large) orders below PMD_ORDER that are enabled 4777 * for this vma. Then filter out the orders that can't be allocated over 4778 * the faulting address and still be fully contained in the vma. 4779 */ 4780 orders = thp_vma_allowable_orders(vma, vma->vm_flags, 4781 TVA_IN_PF | TVA_ENFORCE_SYSFS, BIT(PMD_ORDER) - 1); 4782 orders = thp_vma_suitable_orders(vma, vmf->address, orders); 4783 4784 if (!orders) 4785 goto fallback; 4786 4787 pte = pte_offset_map(vmf->pmd, vmf->address & PMD_MASK); 4788 if (!pte) 4789 return ERR_PTR(-EAGAIN); 4790 4791 /* 4792 * Find the highest order where the aligned range is completely 4793 * pte_none(). Note that all remaining orders will be completely 4794 * pte_none(). 4795 */ 4796 order = highest_order(orders); 4797 while (orders) { 4798 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); 4799 if (pte_range_none(pte + pte_index(addr), 1 << order)) 4800 break; 4801 order = next_order(&orders, order); 4802 } 4803 4804 pte_unmap(pte); 4805 4806 if (!orders) 4807 goto fallback; 4808 4809 /* Try allocating the highest of the remaining orders. */ 4810 gfp = vma_thp_gfp_mask(vma); 4811 while (orders) { 4812 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); 4813 folio = vma_alloc_folio(gfp, order, vma, addr); 4814 if (folio) { 4815 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) { 4816 count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); 4817 folio_put(folio); 4818 goto next; 4819 } 4820 folio_throttle_swaprate(folio, gfp); 4821 /* 4822 * When a folio is not zeroed during allocation 4823 * (__GFP_ZERO not used) or user folios require special 4824 * handling, folio_zero_user() is used to make sure 4825 * that the page corresponding to the faulting address 4826 * will be hot in the cache after zeroing. 4827 */ 4828 if (user_alloc_needs_zeroing()) 4829 folio_zero_user(folio, vmf->address); 4830 return folio; 4831 } 4832 next: 4833 count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK); 4834 order = next_order(&orders, order); 4835 } 4836 4837 fallback: 4838 #endif 4839 return folio_prealloc(vma->vm_mm, vma, vmf->address, true); 4840 } 4841 4842 /* 4843 * We enter with non-exclusive mmap_lock (to exclude vma changes, 4844 * but allow concurrent faults), and pte mapped but not yet locked. 4845 * We return with mmap_lock still held, but pte unmapped and unlocked. 4846 */ 4847 static vm_fault_t do_anonymous_page(struct vm_fault *vmf) 4848 { 4849 struct vm_area_struct *vma = vmf->vma; 4850 unsigned long addr = vmf->address; 4851 struct folio *folio; 4852 vm_fault_t ret = 0; 4853 int nr_pages = 1; 4854 pte_t entry; 4855 4856 /* File mapping without ->vm_ops ? */ 4857 if (vma->vm_flags & VM_SHARED) 4858 return VM_FAULT_SIGBUS; 4859 4860 /* 4861 * Use pte_alloc() instead of pte_alloc_map(), so that OOM can 4862 * be distinguished from a transient failure of pte_offset_map(). 4863 */ 4864 if (pte_alloc(vma->vm_mm, vmf->pmd)) 4865 return VM_FAULT_OOM; 4866 4867 /* Use the zero-page for reads */ 4868 if (!(vmf->flags & FAULT_FLAG_WRITE) && 4869 !mm_forbids_zeropage(vma->vm_mm)) { 4870 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address), 4871 vma->vm_page_prot)); 4872 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 4873 vmf->address, &vmf->ptl); 4874 if (!vmf->pte) 4875 goto unlock; 4876 if (vmf_pte_changed(vmf)) { 4877 update_mmu_tlb(vma, vmf->address, vmf->pte); 4878 goto unlock; 4879 } 4880 ret = check_stable_address_space(vma->vm_mm); 4881 if (ret) 4882 goto unlock; 4883 /* Deliver the page fault to userland, check inside PT lock */ 4884 if (userfaultfd_missing(vma)) { 4885 pte_unmap_unlock(vmf->pte, vmf->ptl); 4886 return handle_userfault(vmf, VM_UFFD_MISSING); 4887 } 4888 goto setpte; 4889 } 4890 4891 /* Allocate our own private page. */ 4892 ret = vmf_anon_prepare(vmf); 4893 if (ret) 4894 return ret; 4895 /* Returns NULL on OOM or ERR_PTR(-EAGAIN) if we must retry the fault */ 4896 folio = alloc_anon_folio(vmf); 4897 if (IS_ERR(folio)) 4898 return 0; 4899 if (!folio) 4900 goto oom; 4901 4902 nr_pages = folio_nr_pages(folio); 4903 addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE); 4904 4905 /* 4906 * The memory barrier inside __folio_mark_uptodate makes sure that 4907 * preceding stores to the page contents become visible before 4908 * the set_pte_at() write. 4909 */ 4910 __folio_mark_uptodate(folio); 4911 4912 entry = mk_pte(&folio->page, vma->vm_page_prot); 4913 entry = pte_sw_mkyoung(entry); 4914 if (vma->vm_flags & VM_WRITE) 4915 entry = pte_mkwrite(pte_mkdirty(entry), vma); 4916 4917 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); 4918 if (!vmf->pte) 4919 goto release; 4920 if (nr_pages == 1 && vmf_pte_changed(vmf)) { 4921 update_mmu_tlb(vma, addr, vmf->pte); 4922 goto release; 4923 } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) { 4924 update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages); 4925 goto release; 4926 } 4927 4928 ret = check_stable_address_space(vma->vm_mm); 4929 if (ret) 4930 goto release; 4931 4932 /* Deliver the page fault to userland, check inside PT lock */ 4933 if (userfaultfd_missing(vma)) { 4934 pte_unmap_unlock(vmf->pte, vmf->ptl); 4935 folio_put(folio); 4936 return handle_userfault(vmf, VM_UFFD_MISSING); 4937 } 4938 4939 folio_ref_add(folio, nr_pages - 1); 4940 add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages); 4941 count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_FAULT_ALLOC); 4942 folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE); 4943 folio_add_lru_vma(folio, vma); 4944 setpte: 4945 if (vmf_orig_pte_uffd_wp(vmf)) 4946 entry = pte_mkuffd_wp(entry); 4947 set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr_pages); 4948 4949 /* No need to invalidate - it was non-present before */ 4950 update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr_pages); 4951 unlock: 4952 if (vmf->pte) 4953 pte_unmap_unlock(vmf->pte, vmf->ptl); 4954 return ret; 4955 release: 4956 folio_put(folio); 4957 goto unlock; 4958 oom: 4959 return VM_FAULT_OOM; 4960 } 4961 4962 /* 4963 * The mmap_lock must have been held on entry, and may have been 4964 * released depending on flags and vma->vm_ops->fault() return value. 4965 * See filemap_fault() and __lock_page_retry(). 4966 */ 4967 static vm_fault_t __do_fault(struct vm_fault *vmf) 4968 { 4969 struct vm_area_struct *vma = vmf->vma; 4970 struct folio *folio; 4971 vm_fault_t ret; 4972 4973 /* 4974 * Preallocate pte before we take page_lock because this might lead to 4975 * deadlocks for memcg reclaim which waits for pages under writeback: 4976 * lock_page(A) 4977 * SetPageWriteback(A) 4978 * unlock_page(A) 4979 * lock_page(B) 4980 * lock_page(B) 4981 * pte_alloc_one 4982 * shrink_folio_list 4983 * wait_on_page_writeback(A) 4984 * SetPageWriteback(B) 4985 * unlock_page(B) 4986 * # flush A, B to clear the writeback 4987 */ 4988 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) { 4989 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); 4990 if (!vmf->prealloc_pte) 4991 return VM_FAULT_OOM; 4992 } 4993 4994 ret = vma->vm_ops->fault(vmf); 4995 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY | 4996 VM_FAULT_DONE_COW))) 4997 return ret; 4998 4999 folio = page_folio(vmf->page); 5000 if (unlikely(PageHWPoison(vmf->page))) { 5001 vm_fault_t poisonret = VM_FAULT_HWPOISON; 5002 if (ret & VM_FAULT_LOCKED) { 5003 if (page_mapped(vmf->page)) 5004 unmap_mapping_folio(folio); 5005 /* Retry if a clean folio was removed from the cache. */ 5006 if (mapping_evict_folio(folio->mapping, folio)) 5007 poisonret = VM_FAULT_NOPAGE; 5008 folio_unlock(folio); 5009 } 5010 folio_put(folio); 5011 vmf->page = NULL; 5012 return poisonret; 5013 } 5014 5015 if (unlikely(!(ret & VM_FAULT_LOCKED))) 5016 folio_lock(folio); 5017 else 5018 VM_BUG_ON_PAGE(!folio_test_locked(folio), vmf->page); 5019 5020 return ret; 5021 } 5022 5023 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5024 static void deposit_prealloc_pte(struct vm_fault *vmf) 5025 { 5026 struct vm_area_struct *vma = vmf->vma; 5027 5028 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); 5029 /* 5030 * We are going to consume the prealloc table, 5031 * count that as nr_ptes. 5032 */ 5033 mm_inc_nr_ptes(vma->vm_mm); 5034 vmf->prealloc_pte = NULL; 5035 } 5036 5037 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) 5038 { 5039 struct folio *folio = page_folio(page); 5040 struct vm_area_struct *vma = vmf->vma; 5041 bool write = vmf->flags & FAULT_FLAG_WRITE; 5042 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 5043 pmd_t entry; 5044 vm_fault_t ret = VM_FAULT_FALLBACK; 5045 5046 /* 5047 * It is too late to allocate a small folio, we already have a large 5048 * folio in the pagecache: especially s390 KVM cannot tolerate any 5049 * PMD mappings, but PTE-mapped THP are fine. So let's simply refuse any 5050 * PMD mappings if THPs are disabled. 5051 */ 5052 if (thp_disabled_by_hw() || vma_thp_disabled(vma, vma->vm_flags)) 5053 return ret; 5054 5055 if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER)) 5056 return ret; 5057 5058 if (folio_order(folio) != HPAGE_PMD_ORDER) 5059 return ret; 5060 page = &folio->page; 5061 5062 /* 5063 * Just backoff if any subpage of a THP is corrupted otherwise 5064 * the corrupted page may mapped by PMD silently to escape the 5065 * check. This kind of THP just can be PTE mapped. Access to 5066 * the corrupted subpage should trigger SIGBUS as expected. 5067 */ 5068 if (unlikely(folio_test_has_hwpoisoned(folio))) 5069 return ret; 5070 5071 /* 5072 * Archs like ppc64 need additional space to store information 5073 * related to pte entry. Use the preallocated table for that. 5074 */ 5075 if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) { 5076 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); 5077 if (!vmf->prealloc_pte) 5078 return VM_FAULT_OOM; 5079 } 5080 5081 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 5082 if (unlikely(!pmd_none(*vmf->pmd))) 5083 goto out; 5084 5085 flush_icache_pages(vma, page, HPAGE_PMD_NR); 5086 5087 entry = mk_huge_pmd(page, vma->vm_page_prot); 5088 if (write) 5089 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 5090 5091 add_mm_counter(vma->vm_mm, mm_counter_file(folio), HPAGE_PMD_NR); 5092 folio_add_file_rmap_pmd(folio, page, vma); 5093 5094 /* 5095 * deposit and withdraw with pmd lock held 5096 */ 5097 if (arch_needs_pgtable_deposit()) 5098 deposit_prealloc_pte(vmf); 5099 5100 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); 5101 5102 update_mmu_cache_pmd(vma, haddr, vmf->pmd); 5103 5104 /* fault is handled */ 5105 ret = 0; 5106 count_vm_event(THP_FILE_MAPPED); 5107 out: 5108 spin_unlock(vmf->ptl); 5109 return ret; 5110 } 5111 #else 5112 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) 5113 { 5114 return VM_FAULT_FALLBACK; 5115 } 5116 #endif 5117 5118 /** 5119 * set_pte_range - Set a range of PTEs to point to pages in a folio. 5120 * @vmf: Fault decription. 5121 * @folio: The folio that contains @page. 5122 * @page: The first page to create a PTE for. 5123 * @nr: The number of PTEs to create. 5124 * @addr: The first address to create a PTE for. 5125 */ 5126 void set_pte_range(struct vm_fault *vmf, struct folio *folio, 5127 struct page *page, unsigned int nr, unsigned long addr) 5128 { 5129 struct vm_area_struct *vma = vmf->vma; 5130 bool write = vmf->flags & FAULT_FLAG_WRITE; 5131 bool prefault = !in_range(vmf->address, addr, nr * PAGE_SIZE); 5132 pte_t entry; 5133 5134 flush_icache_pages(vma, page, nr); 5135 entry = mk_pte(page, vma->vm_page_prot); 5136 5137 if (prefault && arch_wants_old_prefaulted_pte()) 5138 entry = pte_mkold(entry); 5139 else 5140 entry = pte_sw_mkyoung(entry); 5141 5142 if (write) 5143 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 5144 if (unlikely(vmf_orig_pte_uffd_wp(vmf))) 5145 entry = pte_mkuffd_wp(entry); 5146 /* copy-on-write page */ 5147 if (write && !(vma->vm_flags & VM_SHARED)) { 5148 VM_BUG_ON_FOLIO(nr != 1, folio); 5149 folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE); 5150 folio_add_lru_vma(folio, vma); 5151 } else { 5152 folio_add_file_rmap_ptes(folio, page, nr, vma); 5153 } 5154 set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr); 5155 5156 /* no need to invalidate: a not-present page won't be cached */ 5157 update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr); 5158 } 5159 5160 static bool vmf_pte_changed(struct vm_fault *vmf) 5161 { 5162 if (vmf->flags & FAULT_FLAG_ORIG_PTE_VALID) 5163 return !pte_same(ptep_get(vmf->pte), vmf->orig_pte); 5164 5165 return !pte_none(ptep_get(vmf->pte)); 5166 } 5167 5168 /** 5169 * finish_fault - finish page fault once we have prepared the page to fault 5170 * 5171 * @vmf: structure describing the fault 5172 * 5173 * This function handles all that is needed to finish a page fault once the 5174 * page to fault in is prepared. It handles locking of PTEs, inserts PTE for 5175 * given page, adds reverse page mapping, handles memcg charges and LRU 5176 * addition. 5177 * 5178 * The function expects the page to be locked and on success it consumes a 5179 * reference of a page being mapped (for the PTE which maps it). 5180 * 5181 * Return: %0 on success, %VM_FAULT_ code in case of error. 5182 */ 5183 vm_fault_t finish_fault(struct vm_fault *vmf) 5184 { 5185 struct vm_area_struct *vma = vmf->vma; 5186 struct page *page; 5187 struct folio *folio; 5188 vm_fault_t ret; 5189 bool is_cow = (vmf->flags & FAULT_FLAG_WRITE) && 5190 !(vma->vm_flags & VM_SHARED); 5191 int type, nr_pages; 5192 unsigned long addr; 5193 bool needs_fallback = false; 5194 5195 fallback: 5196 addr = vmf->address; 5197 5198 /* Did we COW the page? */ 5199 if (is_cow) 5200 page = vmf->cow_page; 5201 else 5202 page = vmf->page; 5203 5204 /* 5205 * check even for read faults because we might have lost our CoWed 5206 * page 5207 */ 5208 if (!(vma->vm_flags & VM_SHARED)) { 5209 ret = check_stable_address_space(vma->vm_mm); 5210 if (ret) 5211 return ret; 5212 } 5213 5214 if (pmd_none(*vmf->pmd)) { 5215 if (PageTransCompound(page)) { 5216 ret = do_set_pmd(vmf, page); 5217 if (ret != VM_FAULT_FALLBACK) 5218 return ret; 5219 } 5220 5221 if (vmf->prealloc_pte) 5222 pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte); 5223 else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) 5224 return VM_FAULT_OOM; 5225 } 5226 5227 folio = page_folio(page); 5228 nr_pages = folio_nr_pages(folio); 5229 5230 /* 5231 * Using per-page fault to maintain the uffd semantics, and same 5232 * approach also applies to non-anonymous-shmem faults to avoid 5233 * inflating the RSS of the process. 5234 */ 5235 if (!vma_is_anon_shmem(vma) || unlikely(userfaultfd_armed(vma)) || 5236 unlikely(needs_fallback)) { 5237 nr_pages = 1; 5238 } else if (nr_pages > 1) { 5239 pgoff_t idx = folio_page_idx(folio, page); 5240 /* The page offset of vmf->address within the VMA. */ 5241 pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff; 5242 /* The index of the entry in the pagetable for fault page. */ 5243 pgoff_t pte_off = pte_index(vmf->address); 5244 5245 /* 5246 * Fallback to per-page fault in case the folio size in page 5247 * cache beyond the VMA limits and PMD pagetable limits. 5248 */ 5249 if (unlikely(vma_off < idx || 5250 vma_off + (nr_pages - idx) > vma_pages(vma) || 5251 pte_off < idx || 5252 pte_off + (nr_pages - idx) > PTRS_PER_PTE)) { 5253 nr_pages = 1; 5254 } else { 5255 /* Now we can set mappings for the whole large folio. */ 5256 addr = vmf->address - idx * PAGE_SIZE; 5257 page = &folio->page; 5258 } 5259 } 5260 5261 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 5262 addr, &vmf->ptl); 5263 if (!vmf->pte) 5264 return VM_FAULT_NOPAGE; 5265 5266 /* Re-check under ptl */ 5267 if (nr_pages == 1 && unlikely(vmf_pte_changed(vmf))) { 5268 update_mmu_tlb(vma, addr, vmf->pte); 5269 ret = VM_FAULT_NOPAGE; 5270 goto unlock; 5271 } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) { 5272 needs_fallback = true; 5273 pte_unmap_unlock(vmf->pte, vmf->ptl); 5274 goto fallback; 5275 } 5276 5277 folio_ref_add(folio, nr_pages - 1); 5278 set_pte_range(vmf, folio, page, nr_pages, addr); 5279 type = is_cow ? MM_ANONPAGES : mm_counter_file(folio); 5280 add_mm_counter(vma->vm_mm, type, nr_pages); 5281 ret = 0; 5282 5283 unlock: 5284 pte_unmap_unlock(vmf->pte, vmf->ptl); 5285 return ret; 5286 } 5287 5288 static unsigned long fault_around_pages __read_mostly = 5289 65536 >> PAGE_SHIFT; 5290 5291 #ifdef CONFIG_DEBUG_FS 5292 static int fault_around_bytes_get(void *data, u64 *val) 5293 { 5294 *val = fault_around_pages << PAGE_SHIFT; 5295 return 0; 5296 } 5297 5298 /* 5299 * fault_around_bytes must be rounded down to the nearest page order as it's 5300 * what do_fault_around() expects to see. 5301 */ 5302 static int fault_around_bytes_set(void *data, u64 val) 5303 { 5304 if (val / PAGE_SIZE > PTRS_PER_PTE) 5305 return -EINVAL; 5306 5307 /* 5308 * The minimum value is 1 page, however this results in no fault-around 5309 * at all. See should_fault_around(). 5310 */ 5311 val = max(val, PAGE_SIZE); 5312 fault_around_pages = rounddown_pow_of_two(val) >> PAGE_SHIFT; 5313 5314 return 0; 5315 } 5316 DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops, 5317 fault_around_bytes_get, fault_around_bytes_set, "%llu\n"); 5318 5319 static int __init fault_around_debugfs(void) 5320 { 5321 debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL, 5322 &fault_around_bytes_fops); 5323 return 0; 5324 } 5325 late_initcall(fault_around_debugfs); 5326 #endif 5327 5328 /* 5329 * do_fault_around() tries to map few pages around the fault address. The hope 5330 * is that the pages will be needed soon and this will lower the number of 5331 * faults to handle. 5332 * 5333 * It uses vm_ops->map_pages() to map the pages, which skips the page if it's 5334 * not ready to be mapped: not up-to-date, locked, etc. 5335 * 5336 * This function doesn't cross VMA or page table boundaries, in order to call 5337 * map_pages() and acquire a PTE lock only once. 5338 * 5339 * fault_around_pages defines how many pages we'll try to map. 5340 * do_fault_around() expects it to be set to a power of two less than or equal 5341 * to PTRS_PER_PTE. 5342 * 5343 * The virtual address of the area that we map is naturally aligned to 5344 * fault_around_pages * PAGE_SIZE rounded down to the machine page size 5345 * (and therefore to page order). This way it's easier to guarantee 5346 * that we don't cross page table boundaries. 5347 */ 5348 static vm_fault_t do_fault_around(struct vm_fault *vmf) 5349 { 5350 pgoff_t nr_pages = READ_ONCE(fault_around_pages); 5351 pgoff_t pte_off = pte_index(vmf->address); 5352 /* The page offset of vmf->address within the VMA. */ 5353 pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff; 5354 pgoff_t from_pte, to_pte; 5355 vm_fault_t ret; 5356 5357 /* The PTE offset of the start address, clamped to the VMA. */ 5358 from_pte = max(ALIGN_DOWN(pte_off, nr_pages), 5359 pte_off - min(pte_off, vma_off)); 5360 5361 /* The PTE offset of the end address, clamped to the VMA and PTE. */ 5362 to_pte = min3(from_pte + nr_pages, (pgoff_t)PTRS_PER_PTE, 5363 pte_off + vma_pages(vmf->vma) - vma_off) - 1; 5364 5365 if (pmd_none(*vmf->pmd)) { 5366 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); 5367 if (!vmf->prealloc_pte) 5368 return VM_FAULT_OOM; 5369 } 5370 5371 rcu_read_lock(); 5372 ret = vmf->vma->vm_ops->map_pages(vmf, 5373 vmf->pgoff + from_pte - pte_off, 5374 vmf->pgoff + to_pte - pte_off); 5375 rcu_read_unlock(); 5376 5377 return ret; 5378 } 5379 5380 /* Return true if we should do read fault-around, false otherwise */ 5381 static inline bool should_fault_around(struct vm_fault *vmf) 5382 { 5383 /* No ->map_pages? No way to fault around... */ 5384 if (!vmf->vma->vm_ops->map_pages) 5385 return false; 5386 5387 if (uffd_disable_fault_around(vmf->vma)) 5388 return false; 5389 5390 /* A single page implies no faulting 'around' at all. */ 5391 return fault_around_pages > 1; 5392 } 5393 5394 static vm_fault_t do_read_fault(struct vm_fault *vmf) 5395 { 5396 vm_fault_t ret = 0; 5397 struct folio *folio; 5398 5399 /* 5400 * Let's call ->map_pages() first and use ->fault() as fallback 5401 * if page by the offset is not ready to be mapped (cold cache or 5402 * something). 5403 */ 5404 if (should_fault_around(vmf)) { 5405 ret = do_fault_around(vmf); 5406 if (ret) 5407 return ret; 5408 } 5409 5410 ret = vmf_can_call_fault(vmf); 5411 if (ret) 5412 return ret; 5413 5414 ret = __do_fault(vmf); 5415 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 5416 return ret; 5417 5418 ret |= finish_fault(vmf); 5419 folio = page_folio(vmf->page); 5420 folio_unlock(folio); 5421 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 5422 folio_put(folio); 5423 return ret; 5424 } 5425 5426 static vm_fault_t do_cow_fault(struct vm_fault *vmf) 5427 { 5428 struct vm_area_struct *vma = vmf->vma; 5429 struct folio *folio; 5430 vm_fault_t ret; 5431 5432 ret = vmf_can_call_fault(vmf); 5433 if (!ret) 5434 ret = vmf_anon_prepare(vmf); 5435 if (ret) 5436 return ret; 5437 5438 folio = folio_prealloc(vma->vm_mm, vma, vmf->address, false); 5439 if (!folio) 5440 return VM_FAULT_OOM; 5441 5442 vmf->cow_page = &folio->page; 5443 5444 ret = __do_fault(vmf); 5445 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 5446 goto uncharge_out; 5447 if (ret & VM_FAULT_DONE_COW) 5448 return ret; 5449 5450 if (copy_mc_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma)) { 5451 ret = VM_FAULT_HWPOISON; 5452 goto unlock; 5453 } 5454 __folio_mark_uptodate(folio); 5455 5456 ret |= finish_fault(vmf); 5457 unlock: 5458 unlock_page(vmf->page); 5459 put_page(vmf->page); 5460 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 5461 goto uncharge_out; 5462 return ret; 5463 uncharge_out: 5464 folio_put(folio); 5465 return ret; 5466 } 5467 5468 static vm_fault_t do_shared_fault(struct vm_fault *vmf) 5469 { 5470 struct vm_area_struct *vma = vmf->vma; 5471 vm_fault_t ret, tmp; 5472 struct folio *folio; 5473 5474 ret = vmf_can_call_fault(vmf); 5475 if (ret) 5476 return ret; 5477 5478 ret = __do_fault(vmf); 5479 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 5480 return ret; 5481 5482 folio = page_folio(vmf->page); 5483 5484 /* 5485 * Check if the backing address space wants to know that the page is 5486 * about to become writable 5487 */ 5488 if (vma->vm_ops->page_mkwrite) { 5489 folio_unlock(folio); 5490 tmp = do_page_mkwrite(vmf, folio); 5491 if (unlikely(!tmp || 5492 (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { 5493 folio_put(folio); 5494 return tmp; 5495 } 5496 } 5497 5498 ret |= finish_fault(vmf); 5499 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | 5500 VM_FAULT_RETRY))) { 5501 folio_unlock(folio); 5502 folio_put(folio); 5503 return ret; 5504 } 5505 5506 ret |= fault_dirty_shared_page(vmf); 5507 return ret; 5508 } 5509 5510 /* 5511 * We enter with non-exclusive mmap_lock (to exclude vma changes, 5512 * but allow concurrent faults). 5513 * The mmap_lock may have been released depending on flags and our 5514 * return value. See filemap_fault() and __folio_lock_or_retry(). 5515 * If mmap_lock is released, vma may become invalid (for example 5516 * by other thread calling munmap()). 5517 */ 5518 static vm_fault_t do_fault(struct vm_fault *vmf) 5519 { 5520 struct vm_area_struct *vma = vmf->vma; 5521 struct mm_struct *vm_mm = vma->vm_mm; 5522 vm_fault_t ret; 5523 5524 /* 5525 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND 5526 */ 5527 if (!vma->vm_ops->fault) { 5528 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, 5529 vmf->address, &vmf->ptl); 5530 if (unlikely(!vmf->pte)) 5531 ret = VM_FAULT_SIGBUS; 5532 else { 5533 /* 5534 * Make sure this is not a temporary clearing of pte 5535 * by holding ptl and checking again. A R/M/W update 5536 * of pte involves: take ptl, clearing the pte so that 5537 * we don't have concurrent modification by hardware 5538 * followed by an update. 5539 */ 5540 if (unlikely(pte_none(ptep_get(vmf->pte)))) 5541 ret = VM_FAULT_SIGBUS; 5542 else 5543 ret = VM_FAULT_NOPAGE; 5544 5545 pte_unmap_unlock(vmf->pte, vmf->ptl); 5546 } 5547 } else if (!(vmf->flags & FAULT_FLAG_WRITE)) 5548 ret = do_read_fault(vmf); 5549 else if (!(vma->vm_flags & VM_SHARED)) 5550 ret = do_cow_fault(vmf); 5551 else 5552 ret = do_shared_fault(vmf); 5553 5554 /* preallocated pagetable is unused: free it */ 5555 if (vmf->prealloc_pte) { 5556 pte_free(vm_mm, vmf->prealloc_pte); 5557 vmf->prealloc_pte = NULL; 5558 } 5559 return ret; 5560 } 5561 5562 int numa_migrate_check(struct folio *folio, struct vm_fault *vmf, 5563 unsigned long addr, int *flags, 5564 bool writable, int *last_cpupid) 5565 { 5566 struct vm_area_struct *vma = vmf->vma; 5567 5568 /* 5569 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as 5570 * much anyway since they can be in shared cache state. This misses 5571 * the case where a mapping is writable but the process never writes 5572 * to it but pte_write gets cleared during protection updates and 5573 * pte_dirty has unpredictable behaviour between PTE scan updates, 5574 * background writeback, dirty balancing and application behaviour. 5575 */ 5576 if (!writable) 5577 *flags |= TNF_NO_GROUP; 5578 5579 /* 5580 * Flag if the folio is shared between multiple address spaces. This 5581 * is later used when determining whether to group tasks together 5582 */ 5583 if (folio_likely_mapped_shared(folio) && (vma->vm_flags & VM_SHARED)) 5584 *flags |= TNF_SHARED; 5585 /* 5586 * For memory tiering mode, cpupid of slow memory page is used 5587 * to record page access time. So use default value. 5588 */ 5589 if (folio_use_access_time(folio)) 5590 *last_cpupid = (-1 & LAST_CPUPID_MASK); 5591 else 5592 *last_cpupid = folio_last_cpupid(folio); 5593 5594 /* Record the current PID acceesing VMA */ 5595 vma_set_access_pid_bit(vma); 5596 5597 count_vm_numa_event(NUMA_HINT_FAULTS); 5598 #ifdef CONFIG_NUMA_BALANCING 5599 count_memcg_folio_events(folio, NUMA_HINT_FAULTS, 1); 5600 #endif 5601 if (folio_nid(folio) == numa_node_id()) { 5602 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); 5603 *flags |= TNF_FAULT_LOCAL; 5604 } 5605 5606 return mpol_misplaced(folio, vmf, addr); 5607 } 5608 5609 static void numa_rebuild_single_mapping(struct vm_fault *vmf, struct vm_area_struct *vma, 5610 unsigned long fault_addr, pte_t *fault_pte, 5611 bool writable) 5612 { 5613 pte_t pte, old_pte; 5614 5615 old_pte = ptep_modify_prot_start(vma, fault_addr, fault_pte); 5616 pte = pte_modify(old_pte, vma->vm_page_prot); 5617 pte = pte_mkyoung(pte); 5618 if (writable) 5619 pte = pte_mkwrite(pte, vma); 5620 ptep_modify_prot_commit(vma, fault_addr, fault_pte, old_pte, pte); 5621 update_mmu_cache_range(vmf, vma, fault_addr, fault_pte, 1); 5622 } 5623 5624 static void numa_rebuild_large_mapping(struct vm_fault *vmf, struct vm_area_struct *vma, 5625 struct folio *folio, pte_t fault_pte, 5626 bool ignore_writable, bool pte_write_upgrade) 5627 { 5628 int nr = pte_pfn(fault_pte) - folio_pfn(folio); 5629 unsigned long start, end, addr = vmf->address; 5630 unsigned long addr_start = addr - (nr << PAGE_SHIFT); 5631 unsigned long pt_start = ALIGN_DOWN(addr, PMD_SIZE); 5632 pte_t *start_ptep; 5633 5634 /* Stay within the VMA and within the page table. */ 5635 start = max3(addr_start, pt_start, vma->vm_start); 5636 end = min3(addr_start + folio_size(folio), pt_start + PMD_SIZE, 5637 vma->vm_end); 5638 start_ptep = vmf->pte - ((addr - start) >> PAGE_SHIFT); 5639 5640 /* Restore all PTEs' mapping of the large folio */ 5641 for (addr = start; addr != end; start_ptep++, addr += PAGE_SIZE) { 5642 pte_t ptent = ptep_get(start_ptep); 5643 bool writable = false; 5644 5645 if (!pte_present(ptent) || !pte_protnone(ptent)) 5646 continue; 5647 5648 if (pfn_folio(pte_pfn(ptent)) != folio) 5649 continue; 5650 5651 if (!ignore_writable) { 5652 ptent = pte_modify(ptent, vma->vm_page_prot); 5653 writable = pte_write(ptent); 5654 if (!writable && pte_write_upgrade && 5655 can_change_pte_writable(vma, addr, ptent)) 5656 writable = true; 5657 } 5658 5659 numa_rebuild_single_mapping(vmf, vma, addr, start_ptep, writable); 5660 } 5661 } 5662 5663 static vm_fault_t do_numa_page(struct vm_fault *vmf) 5664 { 5665 struct vm_area_struct *vma = vmf->vma; 5666 struct folio *folio = NULL; 5667 int nid = NUMA_NO_NODE; 5668 bool writable = false, ignore_writable = false; 5669 bool pte_write_upgrade = vma_wants_manual_pte_write_upgrade(vma); 5670 int last_cpupid; 5671 int target_nid; 5672 pte_t pte, old_pte; 5673 int flags = 0, nr_pages; 5674 5675 /* 5676 * The pte cannot be used safely until we verify, while holding the page 5677 * table lock, that its contents have not changed during fault handling. 5678 */ 5679 spin_lock(vmf->ptl); 5680 /* Read the live PTE from the page tables: */ 5681 old_pte = ptep_get(vmf->pte); 5682 5683 if (unlikely(!pte_same(old_pte, vmf->orig_pte))) { 5684 pte_unmap_unlock(vmf->pte, vmf->ptl); 5685 return 0; 5686 } 5687 5688 pte = pte_modify(old_pte, vma->vm_page_prot); 5689 5690 /* 5691 * Detect now whether the PTE could be writable; this information 5692 * is only valid while holding the PT lock. 5693 */ 5694 writable = pte_write(pte); 5695 if (!writable && pte_write_upgrade && 5696 can_change_pte_writable(vma, vmf->address, pte)) 5697 writable = true; 5698 5699 folio = vm_normal_folio(vma, vmf->address, pte); 5700 if (!folio || folio_is_zone_device(folio)) 5701 goto out_map; 5702 5703 nid = folio_nid(folio); 5704 nr_pages = folio_nr_pages(folio); 5705 5706 target_nid = numa_migrate_check(folio, vmf, vmf->address, &flags, 5707 writable, &last_cpupid); 5708 if (target_nid == NUMA_NO_NODE) 5709 goto out_map; 5710 if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) { 5711 flags |= TNF_MIGRATE_FAIL; 5712 goto out_map; 5713 } 5714 /* The folio is isolated and isolation code holds a folio reference. */ 5715 pte_unmap_unlock(vmf->pte, vmf->ptl); 5716 writable = false; 5717 ignore_writable = true; 5718 5719 /* Migrate to the requested node */ 5720 if (!migrate_misplaced_folio(folio, target_nid)) { 5721 nid = target_nid; 5722 flags |= TNF_MIGRATED; 5723 task_numa_fault(last_cpupid, nid, nr_pages, flags); 5724 return 0; 5725 } 5726 5727 flags |= TNF_MIGRATE_FAIL; 5728 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 5729 vmf->address, &vmf->ptl); 5730 if (unlikely(!vmf->pte)) 5731 return 0; 5732 if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { 5733 pte_unmap_unlock(vmf->pte, vmf->ptl); 5734 return 0; 5735 } 5736 out_map: 5737 /* 5738 * Make it present again, depending on how arch implements 5739 * non-accessible ptes, some can allow access by kernel mode. 5740 */ 5741 if (folio && folio_test_large(folio)) 5742 numa_rebuild_large_mapping(vmf, vma, folio, pte, ignore_writable, 5743 pte_write_upgrade); 5744 else 5745 numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte, 5746 writable); 5747 pte_unmap_unlock(vmf->pte, vmf->ptl); 5748 5749 if (nid != NUMA_NO_NODE) 5750 task_numa_fault(last_cpupid, nid, nr_pages, flags); 5751 return 0; 5752 } 5753 5754 static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf) 5755 { 5756 struct vm_area_struct *vma = vmf->vma; 5757 if (vma_is_anonymous(vma)) 5758 return do_huge_pmd_anonymous_page(vmf); 5759 if (vma->vm_ops->huge_fault) 5760 return vma->vm_ops->huge_fault(vmf, PMD_ORDER); 5761 return VM_FAULT_FALLBACK; 5762 } 5763 5764 /* `inline' is required to avoid gcc 4.1.2 build error */ 5765 static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf) 5766 { 5767 struct vm_area_struct *vma = vmf->vma; 5768 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; 5769 vm_fault_t ret; 5770 5771 if (vma_is_anonymous(vma)) { 5772 if (likely(!unshare) && 5773 userfaultfd_huge_pmd_wp(vma, vmf->orig_pmd)) { 5774 if (userfaultfd_wp_async(vmf->vma)) 5775 goto split; 5776 return handle_userfault(vmf, VM_UFFD_WP); 5777 } 5778 return do_huge_pmd_wp_page(vmf); 5779 } 5780 5781 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { 5782 if (vma->vm_ops->huge_fault) { 5783 ret = vma->vm_ops->huge_fault(vmf, PMD_ORDER); 5784 if (!(ret & VM_FAULT_FALLBACK)) 5785 return ret; 5786 } 5787 } 5788 5789 split: 5790 /* COW or write-notify handled on pte level: split pmd. */ 5791 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL); 5792 5793 return VM_FAULT_FALLBACK; 5794 } 5795 5796 static vm_fault_t create_huge_pud(struct vm_fault *vmf) 5797 { 5798 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ 5799 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 5800 struct vm_area_struct *vma = vmf->vma; 5801 /* No support for anonymous transparent PUD pages yet */ 5802 if (vma_is_anonymous(vma)) 5803 return VM_FAULT_FALLBACK; 5804 if (vma->vm_ops->huge_fault) 5805 return vma->vm_ops->huge_fault(vmf, PUD_ORDER); 5806 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 5807 return VM_FAULT_FALLBACK; 5808 } 5809 5810 static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud) 5811 { 5812 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ 5813 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 5814 struct vm_area_struct *vma = vmf->vma; 5815 vm_fault_t ret; 5816 5817 /* No support for anonymous transparent PUD pages yet */ 5818 if (vma_is_anonymous(vma)) 5819 goto split; 5820 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { 5821 if (vma->vm_ops->huge_fault) { 5822 ret = vma->vm_ops->huge_fault(vmf, PUD_ORDER); 5823 if (!(ret & VM_FAULT_FALLBACK)) 5824 return ret; 5825 } 5826 } 5827 split: 5828 /* COW or write-notify not handled on PUD level: split pud.*/ 5829 __split_huge_pud(vma, vmf->pud, vmf->address); 5830 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 5831 return VM_FAULT_FALLBACK; 5832 } 5833 5834 /* 5835 * These routines also need to handle stuff like marking pages dirty 5836 * and/or accessed for architectures that don't do it in hardware (most 5837 * RISC architectures). The early dirtying is also good on the i386. 5838 * 5839 * There is also a hook called "update_mmu_cache()" that architectures 5840 * with external mmu caches can use to update those (ie the Sparc or 5841 * PowerPC hashed page tables that act as extended TLBs). 5842 * 5843 * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow 5844 * concurrent faults). 5845 * 5846 * The mmap_lock may have been released depending on flags and our return value. 5847 * See filemap_fault() and __folio_lock_or_retry(). 5848 */ 5849 static vm_fault_t handle_pte_fault(struct vm_fault *vmf) 5850 { 5851 pte_t entry; 5852 5853 if (unlikely(pmd_none(*vmf->pmd))) { 5854 /* 5855 * Leave __pte_alloc() until later: because vm_ops->fault may 5856 * want to allocate huge page, and if we expose page table 5857 * for an instant, it will be difficult to retract from 5858 * concurrent faults and from rmap lookups. 5859 */ 5860 vmf->pte = NULL; 5861 vmf->flags &= ~FAULT_FLAG_ORIG_PTE_VALID; 5862 } else { 5863 pmd_t dummy_pmdval; 5864 5865 /* 5866 * A regular pmd is established and it can't morph into a huge 5867 * pmd by anon khugepaged, since that takes mmap_lock in write 5868 * mode; but shmem or file collapse to THP could still morph 5869 * it into a huge pmd: just retry later if so. 5870 * 5871 * Use the maywrite version to indicate that vmf->pte may be 5872 * modified, but since we will use pte_same() to detect the 5873 * change of the !pte_none() entry, there is no need to recheck 5874 * the pmdval. Here we chooes to pass a dummy variable instead 5875 * of NULL, which helps new user think about why this place is 5876 * special. 5877 */ 5878 vmf->pte = pte_offset_map_rw_nolock(vmf->vma->vm_mm, vmf->pmd, 5879 vmf->address, &dummy_pmdval, 5880 &vmf->ptl); 5881 if (unlikely(!vmf->pte)) 5882 return 0; 5883 vmf->orig_pte = ptep_get_lockless(vmf->pte); 5884 vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID; 5885 5886 if (pte_none(vmf->orig_pte)) { 5887 pte_unmap(vmf->pte); 5888 vmf->pte = NULL; 5889 } 5890 } 5891 5892 if (!vmf->pte) 5893 return do_pte_missing(vmf); 5894 5895 if (!pte_present(vmf->orig_pte)) 5896 return do_swap_page(vmf); 5897 5898 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) 5899 return do_numa_page(vmf); 5900 5901 spin_lock(vmf->ptl); 5902 entry = vmf->orig_pte; 5903 if (unlikely(!pte_same(ptep_get(vmf->pte), entry))) { 5904 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); 5905 goto unlock; 5906 } 5907 if (vmf->flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) { 5908 if (!pte_write(entry)) 5909 return do_wp_page(vmf); 5910 else if (likely(vmf->flags & FAULT_FLAG_WRITE)) 5911 entry = pte_mkdirty(entry); 5912 } 5913 entry = pte_mkyoung(entry); 5914 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry, 5915 vmf->flags & FAULT_FLAG_WRITE)) { 5916 update_mmu_cache_range(vmf, vmf->vma, vmf->address, 5917 vmf->pte, 1); 5918 } else { 5919 /* Skip spurious TLB flush for retried page fault */ 5920 if (vmf->flags & FAULT_FLAG_TRIED) 5921 goto unlock; 5922 /* 5923 * This is needed only for protection faults but the arch code 5924 * is not yet telling us if this is a protection fault or not. 5925 * This still avoids useless tlb flushes for .text page faults 5926 * with threads. 5927 */ 5928 if (vmf->flags & FAULT_FLAG_WRITE) 5929 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address, 5930 vmf->pte); 5931 } 5932 unlock: 5933 pte_unmap_unlock(vmf->pte, vmf->ptl); 5934 return 0; 5935 } 5936 5937 /* 5938 * On entry, we hold either the VMA lock or the mmap_lock 5939 * (FAULT_FLAG_VMA_LOCK tells you which). If VM_FAULT_RETRY is set in 5940 * the result, the mmap_lock is not held on exit. See filemap_fault() 5941 * and __folio_lock_or_retry(). 5942 */ 5943 static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, 5944 unsigned long address, unsigned int flags) 5945 { 5946 struct vm_fault vmf = { 5947 .vma = vma, 5948 .address = address & PAGE_MASK, 5949 .real_address = address, 5950 .flags = flags, 5951 .pgoff = linear_page_index(vma, address), 5952 .gfp_mask = __get_fault_gfp_mask(vma), 5953 }; 5954 struct mm_struct *mm = vma->vm_mm; 5955 unsigned long vm_flags = vma->vm_flags; 5956 pgd_t *pgd; 5957 p4d_t *p4d; 5958 vm_fault_t ret; 5959 5960 pgd = pgd_offset(mm, address); 5961 p4d = p4d_alloc(mm, pgd, address); 5962 if (!p4d) 5963 return VM_FAULT_OOM; 5964 5965 vmf.pud = pud_alloc(mm, p4d, address); 5966 if (!vmf.pud) 5967 return VM_FAULT_OOM; 5968 retry_pud: 5969 if (pud_none(*vmf.pud) && 5970 thp_vma_allowable_order(vma, vm_flags, 5971 TVA_IN_PF | TVA_ENFORCE_SYSFS, PUD_ORDER)) { 5972 ret = create_huge_pud(&vmf); 5973 if (!(ret & VM_FAULT_FALLBACK)) 5974 return ret; 5975 } else { 5976 pud_t orig_pud = *vmf.pud; 5977 5978 barrier(); 5979 if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) { 5980 5981 /* 5982 * TODO once we support anonymous PUDs: NUMA case and 5983 * FAULT_FLAG_UNSHARE handling. 5984 */ 5985 if ((flags & FAULT_FLAG_WRITE) && !pud_write(orig_pud)) { 5986 ret = wp_huge_pud(&vmf, orig_pud); 5987 if (!(ret & VM_FAULT_FALLBACK)) 5988 return ret; 5989 } else { 5990 huge_pud_set_accessed(&vmf, orig_pud); 5991 return 0; 5992 } 5993 } 5994 } 5995 5996 vmf.pmd = pmd_alloc(mm, vmf.pud, address); 5997 if (!vmf.pmd) 5998 return VM_FAULT_OOM; 5999 6000 /* Huge pud page fault raced with pmd_alloc? */ 6001 if (pud_trans_unstable(vmf.pud)) 6002 goto retry_pud; 6003 6004 if (pmd_none(*vmf.pmd) && 6005 thp_vma_allowable_order(vma, vm_flags, 6006 TVA_IN_PF | TVA_ENFORCE_SYSFS, PMD_ORDER)) { 6007 ret = create_huge_pmd(&vmf); 6008 if (!(ret & VM_FAULT_FALLBACK)) 6009 return ret; 6010 } else { 6011 vmf.orig_pmd = pmdp_get_lockless(vmf.pmd); 6012 6013 if (unlikely(is_swap_pmd(vmf.orig_pmd))) { 6014 VM_BUG_ON(thp_migration_supported() && 6015 !is_pmd_migration_entry(vmf.orig_pmd)); 6016 if (is_pmd_migration_entry(vmf.orig_pmd)) 6017 pmd_migration_entry_wait(mm, vmf.pmd); 6018 return 0; 6019 } 6020 if (pmd_trans_huge(vmf.orig_pmd) || pmd_devmap(vmf.orig_pmd)) { 6021 if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma)) 6022 return do_huge_pmd_numa_page(&vmf); 6023 6024 if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) && 6025 !pmd_write(vmf.orig_pmd)) { 6026 ret = wp_huge_pmd(&vmf); 6027 if (!(ret & VM_FAULT_FALLBACK)) 6028 return ret; 6029 } else { 6030 huge_pmd_set_accessed(&vmf); 6031 return 0; 6032 } 6033 } 6034 } 6035 6036 return handle_pte_fault(&vmf); 6037 } 6038 6039 /** 6040 * mm_account_fault - Do page fault accounting 6041 * @mm: mm from which memcg should be extracted. It can be NULL. 6042 * @regs: the pt_regs struct pointer. When set to NULL, will skip accounting 6043 * of perf event counters, but we'll still do the per-task accounting to 6044 * the task who triggered this page fault. 6045 * @address: the faulted address. 6046 * @flags: the fault flags. 6047 * @ret: the fault retcode. 6048 * 6049 * This will take care of most of the page fault accounting. Meanwhile, it 6050 * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter 6051 * updates. However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should 6052 * still be in per-arch page fault handlers at the entry of page fault. 6053 */ 6054 static inline void mm_account_fault(struct mm_struct *mm, struct pt_regs *regs, 6055 unsigned long address, unsigned int flags, 6056 vm_fault_t ret) 6057 { 6058 bool major; 6059 6060 /* Incomplete faults will be accounted upon completion. */ 6061 if (ret & VM_FAULT_RETRY) 6062 return; 6063 6064 /* 6065 * To preserve the behavior of older kernels, PGFAULT counters record 6066 * both successful and failed faults, as opposed to perf counters, 6067 * which ignore failed cases. 6068 */ 6069 count_vm_event(PGFAULT); 6070 count_memcg_event_mm(mm, PGFAULT); 6071 6072 /* 6073 * Do not account for unsuccessful faults (e.g. when the address wasn't 6074 * valid). That includes arch_vma_access_permitted() failing before 6075 * reaching here. So this is not a "this many hardware page faults" 6076 * counter. We should use the hw profiling for that. 6077 */ 6078 if (ret & VM_FAULT_ERROR) 6079 return; 6080 6081 /* 6082 * We define the fault as a major fault when the final successful fault 6083 * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't 6084 * handle it immediately previously). 6085 */ 6086 major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED); 6087 6088 if (major) 6089 current->maj_flt++; 6090 else 6091 current->min_flt++; 6092 6093 /* 6094 * If the fault is done for GUP, regs will be NULL. We only do the 6095 * accounting for the per thread fault counters who triggered the 6096 * fault, and we skip the perf event updates. 6097 */ 6098 if (!regs) 6099 return; 6100 6101 if (major) 6102 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); 6103 else 6104 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); 6105 } 6106 6107 #ifdef CONFIG_LRU_GEN 6108 static void lru_gen_enter_fault(struct vm_area_struct *vma) 6109 { 6110 /* the LRU algorithm only applies to accesses with recency */ 6111 current->in_lru_fault = vma_has_recency(vma); 6112 } 6113 6114 static void lru_gen_exit_fault(void) 6115 { 6116 current->in_lru_fault = false; 6117 } 6118 #else 6119 static void lru_gen_enter_fault(struct vm_area_struct *vma) 6120 { 6121 } 6122 6123 static void lru_gen_exit_fault(void) 6124 { 6125 } 6126 #endif /* CONFIG_LRU_GEN */ 6127 6128 static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma, 6129 unsigned int *flags) 6130 { 6131 if (unlikely(*flags & FAULT_FLAG_UNSHARE)) { 6132 if (WARN_ON_ONCE(*flags & FAULT_FLAG_WRITE)) 6133 return VM_FAULT_SIGSEGV; 6134 /* 6135 * FAULT_FLAG_UNSHARE only applies to COW mappings. Let's 6136 * just treat it like an ordinary read-fault otherwise. 6137 */ 6138 if (!is_cow_mapping(vma->vm_flags)) 6139 *flags &= ~FAULT_FLAG_UNSHARE; 6140 } else if (*flags & FAULT_FLAG_WRITE) { 6141 /* Write faults on read-only mappings are impossible ... */ 6142 if (WARN_ON_ONCE(!(vma->vm_flags & VM_MAYWRITE))) 6143 return VM_FAULT_SIGSEGV; 6144 /* ... and FOLL_FORCE only applies to COW mappings. */ 6145 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE) && 6146 !is_cow_mapping(vma->vm_flags))) 6147 return VM_FAULT_SIGSEGV; 6148 } 6149 #ifdef CONFIG_PER_VMA_LOCK 6150 /* 6151 * Per-VMA locks can't be used with FAULT_FLAG_RETRY_NOWAIT because of 6152 * the assumption that lock is dropped on VM_FAULT_RETRY. 6153 */ 6154 if (WARN_ON_ONCE((*flags & 6155 (FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT)) == 6156 (FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT))) 6157 return VM_FAULT_SIGSEGV; 6158 #endif 6159 6160 return 0; 6161 } 6162 6163 /* 6164 * By the time we get here, we already hold either the VMA lock or the 6165 * mmap_lock (FAULT_FLAG_VMA_LOCK tells you which). 6166 * 6167 * The mmap_lock may have been released depending on flags and our 6168 * return value. See filemap_fault() and __folio_lock_or_retry(). 6169 */ 6170 vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, 6171 unsigned int flags, struct pt_regs *regs) 6172 { 6173 /* If the fault handler drops the mmap_lock, vma may be freed */ 6174 struct mm_struct *mm = vma->vm_mm; 6175 vm_fault_t ret; 6176 bool is_droppable; 6177 6178 __set_current_state(TASK_RUNNING); 6179 6180 ret = sanitize_fault_flags(vma, &flags); 6181 if (ret) 6182 goto out; 6183 6184 if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE, 6185 flags & FAULT_FLAG_INSTRUCTION, 6186 flags & FAULT_FLAG_REMOTE)) { 6187 ret = VM_FAULT_SIGSEGV; 6188 goto out; 6189 } 6190 6191 is_droppable = !!(vma->vm_flags & VM_DROPPABLE); 6192 6193 /* 6194 * Enable the memcg OOM handling for faults triggered in user 6195 * space. Kernel faults are handled more gracefully. 6196 */ 6197 if (flags & FAULT_FLAG_USER) 6198 mem_cgroup_enter_user_fault(); 6199 6200 lru_gen_enter_fault(vma); 6201 6202 if (unlikely(is_vm_hugetlb_page(vma))) 6203 ret = hugetlb_fault(vma->vm_mm, vma, address, flags); 6204 else 6205 ret = __handle_mm_fault(vma, address, flags); 6206 6207 /* 6208 * Warning: It is no longer safe to dereference vma-> after this point, 6209 * because mmap_lock might have been dropped by __handle_mm_fault(), so 6210 * vma might be destroyed from underneath us. 6211 */ 6212 6213 lru_gen_exit_fault(); 6214 6215 /* If the mapping is droppable, then errors due to OOM aren't fatal. */ 6216 if (is_droppable) 6217 ret &= ~VM_FAULT_OOM; 6218 6219 if (flags & FAULT_FLAG_USER) { 6220 mem_cgroup_exit_user_fault(); 6221 /* 6222 * The task may have entered a memcg OOM situation but 6223 * if the allocation error was handled gracefully (no 6224 * VM_FAULT_OOM), there is no need to kill anything. 6225 * Just clean up the OOM state peacefully. 6226 */ 6227 if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM)) 6228 mem_cgroup_oom_synchronize(false); 6229 } 6230 out: 6231 mm_account_fault(mm, regs, address, flags, ret); 6232 6233 return ret; 6234 } 6235 EXPORT_SYMBOL_GPL(handle_mm_fault); 6236 6237 #ifdef CONFIG_LOCK_MM_AND_FIND_VMA 6238 #include <linux/extable.h> 6239 6240 static inline bool get_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs) 6241 { 6242 if (likely(mmap_read_trylock(mm))) 6243 return true; 6244 6245 if (regs && !user_mode(regs)) { 6246 unsigned long ip = exception_ip(regs); 6247 if (!search_exception_tables(ip)) 6248 return false; 6249 } 6250 6251 return !mmap_read_lock_killable(mm); 6252 } 6253 6254 static inline bool mmap_upgrade_trylock(struct mm_struct *mm) 6255 { 6256 /* 6257 * We don't have this operation yet. 6258 * 6259 * It should be easy enough to do: it's basically a 6260 * atomic_long_try_cmpxchg_acquire() 6261 * from RWSEM_READER_BIAS -> RWSEM_WRITER_LOCKED, but 6262 * it also needs the proper lockdep magic etc. 6263 */ 6264 return false; 6265 } 6266 6267 static inline bool upgrade_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs) 6268 { 6269 mmap_read_unlock(mm); 6270 if (regs && !user_mode(regs)) { 6271 unsigned long ip = exception_ip(regs); 6272 if (!search_exception_tables(ip)) 6273 return false; 6274 } 6275 return !mmap_write_lock_killable(mm); 6276 } 6277 6278 /* 6279 * Helper for page fault handling. 6280 * 6281 * This is kind of equivalent to "mmap_read_lock()" followed 6282 * by "find_extend_vma()", except it's a lot more careful about 6283 * the locking (and will drop the lock on failure). 6284 * 6285 * For example, if we have a kernel bug that causes a page 6286 * fault, we don't want to just use mmap_read_lock() to get 6287 * the mm lock, because that would deadlock if the bug were 6288 * to happen while we're holding the mm lock for writing. 6289 * 6290 * So this checks the exception tables on kernel faults in 6291 * order to only do this all for instructions that are actually 6292 * expected to fault. 6293 * 6294 * We can also actually take the mm lock for writing if we 6295 * need to extend the vma, which helps the VM layer a lot. 6296 */ 6297 struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm, 6298 unsigned long addr, struct pt_regs *regs) 6299 { 6300 struct vm_area_struct *vma; 6301 6302 if (!get_mmap_lock_carefully(mm, regs)) 6303 return NULL; 6304 6305 vma = find_vma(mm, addr); 6306 if (likely(vma && (vma->vm_start <= addr))) 6307 return vma; 6308 6309 /* 6310 * Well, dang. We might still be successful, but only 6311 * if we can extend a vma to do so. 6312 */ 6313 if (!vma || !(vma->vm_flags & VM_GROWSDOWN)) { 6314 mmap_read_unlock(mm); 6315 return NULL; 6316 } 6317 6318 /* 6319 * We can try to upgrade the mmap lock atomically, 6320 * in which case we can continue to use the vma 6321 * we already looked up. 6322 * 6323 * Otherwise we'll have to drop the mmap lock and 6324 * re-take it, and also look up the vma again, 6325 * re-checking it. 6326 */ 6327 if (!mmap_upgrade_trylock(mm)) { 6328 if (!upgrade_mmap_lock_carefully(mm, regs)) 6329 return NULL; 6330 6331 vma = find_vma(mm, addr); 6332 if (!vma) 6333 goto fail; 6334 if (vma->vm_start <= addr) 6335 goto success; 6336 if (!(vma->vm_flags & VM_GROWSDOWN)) 6337 goto fail; 6338 } 6339 6340 if (expand_stack_locked(vma, addr)) 6341 goto fail; 6342 6343 success: 6344 mmap_write_downgrade(mm); 6345 return vma; 6346 6347 fail: 6348 mmap_write_unlock(mm); 6349 return NULL; 6350 } 6351 #endif 6352 6353 #ifdef CONFIG_PER_VMA_LOCK 6354 /* 6355 * Lookup and lock a VMA under RCU protection. Returned VMA is guaranteed to be 6356 * stable and not isolated. If the VMA is not found or is being modified the 6357 * function returns NULL. 6358 */ 6359 struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, 6360 unsigned long address) 6361 { 6362 MA_STATE(mas, &mm->mm_mt, address, address); 6363 struct vm_area_struct *vma; 6364 6365 rcu_read_lock(); 6366 retry: 6367 vma = mas_walk(&mas); 6368 if (!vma) 6369 goto inval; 6370 6371 if (!vma_start_read(vma)) 6372 goto inval; 6373 6374 /* Check if the VMA got isolated after we found it */ 6375 if (vma->detached) { 6376 vma_end_read(vma); 6377 count_vm_vma_lock_event(VMA_LOCK_MISS); 6378 /* The area was replaced with another one */ 6379 goto retry; 6380 } 6381 /* 6382 * At this point, we have a stable reference to a VMA: The VMA is 6383 * locked and we know it hasn't already been isolated. 6384 * From here on, we can access the VMA without worrying about which 6385 * fields are accessible for RCU readers. 6386 */ 6387 6388 /* Check since vm_start/vm_end might change before we lock the VMA */ 6389 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) 6390 goto inval_end_read; 6391 6392 rcu_read_unlock(); 6393 return vma; 6394 6395 inval_end_read: 6396 vma_end_read(vma); 6397 inval: 6398 rcu_read_unlock(); 6399 count_vm_vma_lock_event(VMA_LOCK_ABORT); 6400 return NULL; 6401 } 6402 #endif /* CONFIG_PER_VMA_LOCK */ 6403 6404 #ifndef __PAGETABLE_P4D_FOLDED 6405 /* 6406 * Allocate p4d page table. 6407 * We've already handled the fast-path in-line. 6408 */ 6409 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) 6410 { 6411 p4d_t *new = p4d_alloc_one(mm, address); 6412 if (!new) 6413 return -ENOMEM; 6414 6415 spin_lock(&mm->page_table_lock); 6416 if (pgd_present(*pgd)) { /* Another has populated it */ 6417 p4d_free(mm, new); 6418 } else { 6419 smp_wmb(); /* See comment in pmd_install() */ 6420 pgd_populate(mm, pgd, new); 6421 } 6422 spin_unlock(&mm->page_table_lock); 6423 return 0; 6424 } 6425 #endif /* __PAGETABLE_P4D_FOLDED */ 6426 6427 #ifndef __PAGETABLE_PUD_FOLDED 6428 /* 6429 * Allocate page upper directory. 6430 * We've already handled the fast-path in-line. 6431 */ 6432 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address) 6433 { 6434 pud_t *new = pud_alloc_one(mm, address); 6435 if (!new) 6436 return -ENOMEM; 6437 6438 spin_lock(&mm->page_table_lock); 6439 if (!p4d_present(*p4d)) { 6440 mm_inc_nr_puds(mm); 6441 smp_wmb(); /* See comment in pmd_install() */ 6442 p4d_populate(mm, p4d, new); 6443 } else /* Another has populated it */ 6444 pud_free(mm, new); 6445 spin_unlock(&mm->page_table_lock); 6446 return 0; 6447 } 6448 #endif /* __PAGETABLE_PUD_FOLDED */ 6449 6450 #ifndef __PAGETABLE_PMD_FOLDED 6451 /* 6452 * Allocate page middle directory. 6453 * We've already handled the fast-path in-line. 6454 */ 6455 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 6456 { 6457 spinlock_t *ptl; 6458 pmd_t *new = pmd_alloc_one(mm, address); 6459 if (!new) 6460 return -ENOMEM; 6461 6462 ptl = pud_lock(mm, pud); 6463 if (!pud_present(*pud)) { 6464 mm_inc_nr_pmds(mm); 6465 smp_wmb(); /* See comment in pmd_install() */ 6466 pud_populate(mm, pud, new); 6467 } else { /* Another has populated it */ 6468 pmd_free(mm, new); 6469 } 6470 spin_unlock(ptl); 6471 return 0; 6472 } 6473 #endif /* __PAGETABLE_PMD_FOLDED */ 6474 6475 static inline void pfnmap_args_setup(struct follow_pfnmap_args *args, 6476 spinlock_t *lock, pte_t *ptep, 6477 pgprot_t pgprot, unsigned long pfn_base, 6478 unsigned long addr_mask, bool writable, 6479 bool special) 6480 { 6481 args->lock = lock; 6482 args->ptep = ptep; 6483 args->pfn = pfn_base + ((args->address & ~addr_mask) >> PAGE_SHIFT); 6484 args->pgprot = pgprot; 6485 args->writable = writable; 6486 args->special = special; 6487 } 6488 6489 static inline void pfnmap_lockdep_assert(struct vm_area_struct *vma) 6490 { 6491 #ifdef CONFIG_LOCKDEP 6492 struct file *file = vma->vm_file; 6493 struct address_space *mapping = file ? file->f_mapping : NULL; 6494 6495 if (mapping) 6496 lockdep_assert(lockdep_is_held(&mapping->i_mmap_rwsem) || 6497 lockdep_is_held(&vma->vm_mm->mmap_lock)); 6498 else 6499 lockdep_assert(lockdep_is_held(&vma->vm_mm->mmap_lock)); 6500 #endif 6501 } 6502 6503 /** 6504 * follow_pfnmap_start() - Look up a pfn mapping at a user virtual address 6505 * @args: Pointer to struct @follow_pfnmap_args 6506 * 6507 * The caller needs to setup args->vma and args->address to point to the 6508 * virtual address as the target of such lookup. On a successful return, 6509 * the results will be put into other output fields. 6510 * 6511 * After the caller finished using the fields, the caller must invoke 6512 * another follow_pfnmap_end() to proper releases the locks and resources 6513 * of such look up request. 6514 * 6515 * During the start() and end() calls, the results in @args will be valid 6516 * as proper locks will be held. After the end() is called, all the fields 6517 * in @follow_pfnmap_args will be invalid to be further accessed. Further 6518 * use of such information after end() may require proper synchronizations 6519 * by the caller with page table updates, otherwise it can create a 6520 * security bug. 6521 * 6522 * If the PTE maps a refcounted page, callers are responsible to protect 6523 * against invalidation with MMU notifiers; otherwise access to the PFN at 6524 * a later point in time can trigger use-after-free. 6525 * 6526 * Only IO mappings and raw PFN mappings are allowed. The mmap semaphore 6527 * should be taken for read, and the mmap semaphore cannot be released 6528 * before the end() is invoked. 6529 * 6530 * This function must not be used to modify PTE content. 6531 * 6532 * Return: zero on success, negative otherwise. 6533 */ 6534 int follow_pfnmap_start(struct follow_pfnmap_args *args) 6535 { 6536 struct vm_area_struct *vma = args->vma; 6537 unsigned long address = args->address; 6538 struct mm_struct *mm = vma->vm_mm; 6539 spinlock_t *lock; 6540 pgd_t *pgdp; 6541 p4d_t *p4dp, p4d; 6542 pud_t *pudp, pud; 6543 pmd_t *pmdp, pmd; 6544 pte_t *ptep, pte; 6545 6546 pfnmap_lockdep_assert(vma); 6547 6548 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) 6549 goto out; 6550 6551 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) 6552 goto out; 6553 retry: 6554 pgdp = pgd_offset(mm, address); 6555 if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp))) 6556 goto out; 6557 6558 p4dp = p4d_offset(pgdp, address); 6559 p4d = READ_ONCE(*p4dp); 6560 if (p4d_none(p4d) || unlikely(p4d_bad(p4d))) 6561 goto out; 6562 6563 pudp = pud_offset(p4dp, address); 6564 pud = READ_ONCE(*pudp); 6565 if (pud_none(pud)) 6566 goto out; 6567 if (pud_leaf(pud)) { 6568 lock = pud_lock(mm, pudp); 6569 if (!unlikely(pud_leaf(pud))) { 6570 spin_unlock(lock); 6571 goto retry; 6572 } 6573 pfnmap_args_setup(args, lock, NULL, pud_pgprot(pud), 6574 pud_pfn(pud), PUD_MASK, pud_write(pud), 6575 pud_special(pud)); 6576 return 0; 6577 } 6578 6579 pmdp = pmd_offset(pudp, address); 6580 pmd = pmdp_get_lockless(pmdp); 6581 if (pmd_leaf(pmd)) { 6582 lock = pmd_lock(mm, pmdp); 6583 if (!unlikely(pmd_leaf(pmd))) { 6584 spin_unlock(lock); 6585 goto retry; 6586 } 6587 pfnmap_args_setup(args, lock, NULL, pmd_pgprot(pmd), 6588 pmd_pfn(pmd), PMD_MASK, pmd_write(pmd), 6589 pmd_special(pmd)); 6590 return 0; 6591 } 6592 6593 ptep = pte_offset_map_lock(mm, pmdp, address, &lock); 6594 if (!ptep) 6595 goto out; 6596 pte = ptep_get(ptep); 6597 if (!pte_present(pte)) 6598 goto unlock; 6599 pfnmap_args_setup(args, lock, ptep, pte_pgprot(pte), 6600 pte_pfn(pte), PAGE_MASK, pte_write(pte), 6601 pte_special(pte)); 6602 return 0; 6603 unlock: 6604 pte_unmap_unlock(ptep, lock); 6605 out: 6606 return -EINVAL; 6607 } 6608 EXPORT_SYMBOL_GPL(follow_pfnmap_start); 6609 6610 /** 6611 * follow_pfnmap_end(): End a follow_pfnmap_start() process 6612 * @args: Pointer to struct @follow_pfnmap_args 6613 * 6614 * Must be used in pair of follow_pfnmap_start(). See the start() function 6615 * above for more information. 6616 */ 6617 void follow_pfnmap_end(struct follow_pfnmap_args *args) 6618 { 6619 if (args->lock) 6620 spin_unlock(args->lock); 6621 if (args->ptep) 6622 pte_unmap(args->ptep); 6623 } 6624 EXPORT_SYMBOL_GPL(follow_pfnmap_end); 6625 6626 #ifdef CONFIG_HAVE_IOREMAP_PROT 6627 /** 6628 * generic_access_phys - generic implementation for iomem mmap access 6629 * @vma: the vma to access 6630 * @addr: userspace address, not relative offset within @vma 6631 * @buf: buffer to read/write 6632 * @len: length of transfer 6633 * @write: set to FOLL_WRITE when writing, otherwise reading 6634 * 6635 * This is a generic implementation for &vm_operations_struct.access for an 6636 * iomem mapping. This callback is used by access_process_vm() when the @vma is 6637 * not page based. 6638 */ 6639 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, 6640 void *buf, int len, int write) 6641 { 6642 resource_size_t phys_addr; 6643 unsigned long prot = 0; 6644 void __iomem *maddr; 6645 int offset = offset_in_page(addr); 6646 int ret = -EINVAL; 6647 bool writable; 6648 struct follow_pfnmap_args args = { .vma = vma, .address = addr }; 6649 6650 retry: 6651 if (follow_pfnmap_start(&args)) 6652 return -EINVAL; 6653 prot = pgprot_val(args.pgprot); 6654 phys_addr = (resource_size_t)args.pfn << PAGE_SHIFT; 6655 writable = args.writable; 6656 follow_pfnmap_end(&args); 6657 6658 if ((write & FOLL_WRITE) && !writable) 6659 return -EINVAL; 6660 6661 maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot); 6662 if (!maddr) 6663 return -ENOMEM; 6664 6665 if (follow_pfnmap_start(&args)) 6666 goto out_unmap; 6667 6668 if ((prot != pgprot_val(args.pgprot)) || 6669 (phys_addr != (args.pfn << PAGE_SHIFT)) || 6670 (writable != args.writable)) { 6671 follow_pfnmap_end(&args); 6672 iounmap(maddr); 6673 goto retry; 6674 } 6675 6676 if (write) 6677 memcpy_toio(maddr + offset, buf, len); 6678 else 6679 memcpy_fromio(buf, maddr + offset, len); 6680 ret = len; 6681 follow_pfnmap_end(&args); 6682 out_unmap: 6683 iounmap(maddr); 6684 6685 return ret; 6686 } 6687 EXPORT_SYMBOL_GPL(generic_access_phys); 6688 #endif 6689 6690 /* 6691 * Access another process' address space as given in mm. 6692 */ 6693 static int __access_remote_vm(struct mm_struct *mm, unsigned long addr, 6694 void *buf, int len, unsigned int gup_flags) 6695 { 6696 void *old_buf = buf; 6697 int write = gup_flags & FOLL_WRITE; 6698 6699 if (mmap_read_lock_killable(mm)) 6700 return 0; 6701 6702 /* Untag the address before looking up the VMA */ 6703 addr = untagged_addr_remote(mm, addr); 6704 6705 /* Avoid triggering the temporary warning in __get_user_pages */ 6706 if (!vma_lookup(mm, addr) && !expand_stack(mm, addr)) 6707 return 0; 6708 6709 /* ignore errors, just check how much was successfully transferred */ 6710 while (len) { 6711 int bytes, offset; 6712 void *maddr; 6713 struct vm_area_struct *vma = NULL; 6714 struct page *page = get_user_page_vma_remote(mm, addr, 6715 gup_flags, &vma); 6716 6717 if (IS_ERR(page)) { 6718 /* We might need to expand the stack to access it */ 6719 vma = vma_lookup(mm, addr); 6720 if (!vma) { 6721 vma = expand_stack(mm, addr); 6722 6723 /* mmap_lock was dropped on failure */ 6724 if (!vma) 6725 return buf - old_buf; 6726 6727 /* Try again if stack expansion worked */ 6728 continue; 6729 } 6730 6731 /* 6732 * Check if this is a VM_IO | VM_PFNMAP VMA, which 6733 * we can access using slightly different code. 6734 */ 6735 bytes = 0; 6736 #ifdef CONFIG_HAVE_IOREMAP_PROT 6737 if (vma->vm_ops && vma->vm_ops->access) 6738 bytes = vma->vm_ops->access(vma, addr, buf, 6739 len, write); 6740 #endif 6741 if (bytes <= 0) 6742 break; 6743 } else { 6744 bytes = len; 6745 offset = addr & (PAGE_SIZE-1); 6746 if (bytes > PAGE_SIZE-offset) 6747 bytes = PAGE_SIZE-offset; 6748 6749 maddr = kmap_local_page(page); 6750 if (write) { 6751 copy_to_user_page(vma, page, addr, 6752 maddr + offset, buf, bytes); 6753 set_page_dirty_lock(page); 6754 } else { 6755 copy_from_user_page(vma, page, addr, 6756 buf, maddr + offset, bytes); 6757 } 6758 unmap_and_put_page(page, maddr); 6759 } 6760 len -= bytes; 6761 buf += bytes; 6762 addr += bytes; 6763 } 6764 mmap_read_unlock(mm); 6765 6766 return buf - old_buf; 6767 } 6768 6769 /** 6770 * access_remote_vm - access another process' address space 6771 * @mm: the mm_struct of the target address space 6772 * @addr: start address to access 6773 * @buf: source or destination buffer 6774 * @len: number of bytes to transfer 6775 * @gup_flags: flags modifying lookup behaviour 6776 * 6777 * The caller must hold a reference on @mm. 6778 * 6779 * Return: number of bytes copied from source to destination. 6780 */ 6781 int access_remote_vm(struct mm_struct *mm, unsigned long addr, 6782 void *buf, int len, unsigned int gup_flags) 6783 { 6784 return __access_remote_vm(mm, addr, buf, len, gup_flags); 6785 } 6786 6787 /* 6788 * Access another process' address space. 6789 * Source/target buffer must be kernel space, 6790 * Do not walk the page table directly, use get_user_pages 6791 */ 6792 int access_process_vm(struct task_struct *tsk, unsigned long addr, 6793 void *buf, int len, unsigned int gup_flags) 6794 { 6795 struct mm_struct *mm; 6796 int ret; 6797 6798 mm = get_task_mm(tsk); 6799 if (!mm) 6800 return 0; 6801 6802 ret = __access_remote_vm(mm, addr, buf, len, gup_flags); 6803 6804 mmput(mm); 6805 6806 return ret; 6807 } 6808 EXPORT_SYMBOL_GPL(access_process_vm); 6809 6810 /* 6811 * Print the name of a VMA. 6812 */ 6813 void print_vma_addr(char *prefix, unsigned long ip) 6814 { 6815 struct mm_struct *mm = current->mm; 6816 struct vm_area_struct *vma; 6817 6818 /* 6819 * we might be running from an atomic context so we cannot sleep 6820 */ 6821 if (!mmap_read_trylock(mm)) 6822 return; 6823 6824 vma = vma_lookup(mm, ip); 6825 if (vma && vma->vm_file) { 6826 struct file *f = vma->vm_file; 6827 ip -= vma->vm_start; 6828 ip += vma->vm_pgoff << PAGE_SHIFT; 6829 printk("%s%pD[%lx,%lx+%lx]", prefix, f, ip, 6830 vma->vm_start, 6831 vma->vm_end - vma->vm_start); 6832 } 6833 mmap_read_unlock(mm); 6834 } 6835 6836 #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP) 6837 void __might_fault(const char *file, int line) 6838 { 6839 if (pagefault_disabled()) 6840 return; 6841 __might_sleep(file, line); 6842 if (current->mm) 6843 might_lock_read(¤t->mm->mmap_lock); 6844 } 6845 EXPORT_SYMBOL(__might_fault); 6846 #endif 6847 6848 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) 6849 /* 6850 * Process all subpages of the specified huge page with the specified 6851 * operation. The target subpage will be processed last to keep its 6852 * cache lines hot. 6853 */ 6854 static inline int process_huge_page( 6855 unsigned long addr_hint, unsigned int nr_pages, 6856 int (*process_subpage)(unsigned long addr, int idx, void *arg), 6857 void *arg) 6858 { 6859 int i, n, base, l, ret; 6860 unsigned long addr = addr_hint & 6861 ~(((unsigned long)nr_pages << PAGE_SHIFT) - 1); 6862 6863 /* Process target subpage last to keep its cache lines hot */ 6864 might_sleep(); 6865 n = (addr_hint - addr) / PAGE_SIZE; 6866 if (2 * n <= nr_pages) { 6867 /* If target subpage in first half of huge page */ 6868 base = 0; 6869 l = n; 6870 /* Process subpages at the end of huge page */ 6871 for (i = nr_pages - 1; i >= 2 * n; i--) { 6872 cond_resched(); 6873 ret = process_subpage(addr + i * PAGE_SIZE, i, arg); 6874 if (ret) 6875 return ret; 6876 } 6877 } else { 6878 /* If target subpage in second half of huge page */ 6879 base = nr_pages - 2 * (nr_pages - n); 6880 l = nr_pages - n; 6881 /* Process subpages at the begin of huge page */ 6882 for (i = 0; i < base; i++) { 6883 cond_resched(); 6884 ret = process_subpage(addr + i * PAGE_SIZE, i, arg); 6885 if (ret) 6886 return ret; 6887 } 6888 } 6889 /* 6890 * Process remaining subpages in left-right-left-right pattern 6891 * towards the target subpage 6892 */ 6893 for (i = 0; i < l; i++) { 6894 int left_idx = base + i; 6895 int right_idx = base + 2 * l - 1 - i; 6896 6897 cond_resched(); 6898 ret = process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg); 6899 if (ret) 6900 return ret; 6901 cond_resched(); 6902 ret = process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg); 6903 if (ret) 6904 return ret; 6905 } 6906 return 0; 6907 } 6908 6909 static void clear_gigantic_page(struct folio *folio, unsigned long addr_hint, 6910 unsigned int nr_pages) 6911 { 6912 unsigned long addr = ALIGN_DOWN(addr_hint, folio_size(folio)); 6913 int i; 6914 6915 might_sleep(); 6916 for (i = 0; i < nr_pages; i++) { 6917 cond_resched(); 6918 clear_user_highpage(folio_page(folio, i), addr + i * PAGE_SIZE); 6919 } 6920 } 6921 6922 static int clear_subpage(unsigned long addr, int idx, void *arg) 6923 { 6924 struct folio *folio = arg; 6925 6926 clear_user_highpage(folio_page(folio, idx), addr); 6927 return 0; 6928 } 6929 6930 /** 6931 * folio_zero_user - Zero a folio which will be mapped to userspace. 6932 * @folio: The folio to zero. 6933 * @addr_hint: The address will be accessed or the base address if uncelar. 6934 */ 6935 void folio_zero_user(struct folio *folio, unsigned long addr_hint) 6936 { 6937 unsigned int nr_pages = folio_nr_pages(folio); 6938 6939 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) 6940 clear_gigantic_page(folio, addr_hint, nr_pages); 6941 else 6942 process_huge_page(addr_hint, nr_pages, clear_subpage, folio); 6943 } 6944 6945 static int copy_user_gigantic_page(struct folio *dst, struct folio *src, 6946 unsigned long addr_hint, 6947 struct vm_area_struct *vma, 6948 unsigned int nr_pages) 6949 { 6950 unsigned long addr = ALIGN_DOWN(addr_hint, folio_size(dst)); 6951 struct page *dst_page; 6952 struct page *src_page; 6953 int i; 6954 6955 for (i = 0; i < nr_pages; i++) { 6956 dst_page = folio_page(dst, i); 6957 src_page = folio_page(src, i); 6958 6959 cond_resched(); 6960 if (copy_mc_user_highpage(dst_page, src_page, 6961 addr + i*PAGE_SIZE, vma)) 6962 return -EHWPOISON; 6963 } 6964 return 0; 6965 } 6966 6967 struct copy_subpage_arg { 6968 struct folio *dst; 6969 struct folio *src; 6970 struct vm_area_struct *vma; 6971 }; 6972 6973 static int copy_subpage(unsigned long addr, int idx, void *arg) 6974 { 6975 struct copy_subpage_arg *copy_arg = arg; 6976 struct page *dst = folio_page(copy_arg->dst, idx); 6977 struct page *src = folio_page(copy_arg->src, idx); 6978 6979 if (copy_mc_user_highpage(dst, src, addr, copy_arg->vma)) 6980 return -EHWPOISON; 6981 return 0; 6982 } 6983 6984 int copy_user_large_folio(struct folio *dst, struct folio *src, 6985 unsigned long addr_hint, struct vm_area_struct *vma) 6986 { 6987 unsigned int nr_pages = folio_nr_pages(dst); 6988 struct copy_subpage_arg arg = { 6989 .dst = dst, 6990 .src = src, 6991 .vma = vma, 6992 }; 6993 6994 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) 6995 return copy_user_gigantic_page(dst, src, addr_hint, vma, nr_pages); 6996 6997 return process_huge_page(addr_hint, nr_pages, copy_subpage, &arg); 6998 } 6999 7000 long copy_folio_from_user(struct folio *dst_folio, 7001 const void __user *usr_src, 7002 bool allow_pagefault) 7003 { 7004 void *kaddr; 7005 unsigned long i, rc = 0; 7006 unsigned int nr_pages = folio_nr_pages(dst_folio); 7007 unsigned long ret_val = nr_pages * PAGE_SIZE; 7008 struct page *subpage; 7009 7010 for (i = 0; i < nr_pages; i++) { 7011 subpage = folio_page(dst_folio, i); 7012 kaddr = kmap_local_page(subpage); 7013 if (!allow_pagefault) 7014 pagefault_disable(); 7015 rc = copy_from_user(kaddr, usr_src + i * PAGE_SIZE, PAGE_SIZE); 7016 if (!allow_pagefault) 7017 pagefault_enable(); 7018 kunmap_local(kaddr); 7019 7020 ret_val -= (PAGE_SIZE - rc); 7021 if (rc) 7022 break; 7023 7024 flush_dcache_page(subpage); 7025 7026 cond_resched(); 7027 } 7028 return ret_val; 7029 } 7030 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ 7031 7032 #if defined(CONFIG_SPLIT_PTE_PTLOCKS) && ALLOC_SPLIT_PTLOCKS 7033 7034 static struct kmem_cache *page_ptl_cachep; 7035 7036 void __init ptlock_cache_init(void) 7037 { 7038 page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0, 7039 SLAB_PANIC, NULL); 7040 } 7041 7042 bool ptlock_alloc(struct ptdesc *ptdesc) 7043 { 7044 spinlock_t *ptl; 7045 7046 ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL); 7047 if (!ptl) 7048 return false; 7049 ptdesc->ptl = ptl; 7050 return true; 7051 } 7052 7053 void ptlock_free(struct ptdesc *ptdesc) 7054 { 7055 if (ptdesc->ptl) 7056 kmem_cache_free(page_ptl_cachep, ptdesc->ptl); 7057 } 7058 #endif 7059 7060 void vma_pgtable_walk_begin(struct vm_area_struct *vma) 7061 { 7062 if (is_vm_hugetlb_page(vma)) 7063 hugetlb_vma_lock_read(vma); 7064 } 7065 7066 void vma_pgtable_walk_end(struct vm_area_struct *vma) 7067 { 7068 if (is_vm_hugetlb_page(vma)) 7069 hugetlb_vma_unlock_read(vma); 7070 } 7071