1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/memory.c 4 * 5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 6 */ 7 8 /* 9 * demand-loading started 01.12.91 - seems it is high on the list of 10 * things wanted, and it should be easy to implement. - Linus 11 */ 12 13 /* 14 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared 15 * pages started 02.12.91, seems to work. - Linus. 16 * 17 * Tested sharing by executing about 30 /bin/sh: under the old kernel it 18 * would have taken more than the 6M I have free, but it worked well as 19 * far as I could see. 20 * 21 * Also corrected some "invalidate()"s - I wasn't doing enough of them. 22 */ 23 24 /* 25 * Real VM (paging to/from disk) started 18.12.91. Much more work and 26 * thought has to go into this. Oh, well.. 27 * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why. 28 * Found it. Everything seems to work now. 29 * 20.12.91 - Ok, making the swap-device changeable like the root. 30 */ 31 32 /* 33 * 05.04.94 - Multi-page memory management added for v1.1. 34 * Idea by Alex Bligh (alex@cconcepts.co.uk) 35 * 36 * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG 37 * (Gerhard.Wichert@pdb.siemens.de) 38 * 39 * Aug/Sep 2004 Changed to four level page tables (Andi Kleen) 40 */ 41 42 #include <linux/kernel_stat.h> 43 #include <linux/mm.h> 44 #include <linux/mm_inline.h> 45 #include <linux/sched/mm.h> 46 #include <linux/sched/numa_balancing.h> 47 #include <linux/sched/task.h> 48 #include <linux/hugetlb.h> 49 #include <linux/mman.h> 50 #include <linux/swap.h> 51 #include <linux/highmem.h> 52 #include <linux/pagemap.h> 53 #include <linux/memremap.h> 54 #include <linux/kmsan.h> 55 #include <linux/ksm.h> 56 #include <linux/rmap.h> 57 #include <linux/export.h> 58 #include <linux/delayacct.h> 59 #include <linux/init.h> 60 #include <linux/writeback.h> 61 #include <linux/memcontrol.h> 62 #include <linux/mmu_notifier.h> 63 #include <linux/leafops.h> 64 #include <linux/elf.h> 65 #include <linux/gfp.h> 66 #include <linux/migrate.h> 67 #include <linux/string.h> 68 #include <linux/shmem_fs.h> 69 #include <linux/memory-tiers.h> 70 #include <linux/debugfs.h> 71 #include <linux/userfaultfd_k.h> 72 #include <linux/dax.h> 73 #include <linux/oom.h> 74 #include <linux/numa.h> 75 #include <linux/perf_event.h> 76 #include <linux/ptrace.h> 77 #include <linux/vmalloc.h> 78 #include <linux/sched/sysctl.h> 79 #include <linux/pgalloc.h> 80 #include <linux/uaccess.h> 81 82 #include <trace/events/kmem.h> 83 84 #include <asm/io.h> 85 #include <asm/mmu_context.h> 86 #include <asm/tlb.h> 87 #include <asm/tlbflush.h> 88 89 #include "pgalloc-track.h" 90 #include "internal.h" 91 #include "swap.h" 92 93 #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST) 94 #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid. 95 #endif 96 97 static vm_fault_t do_fault(struct vm_fault *vmf); 98 static vm_fault_t do_anonymous_page(struct vm_fault *vmf); 99 static bool vmf_pte_changed(struct vm_fault *vmf); 100 101 /* 102 * Return true if the original pte was a uffd-wp pte marker (so the pte was 103 * wr-protected). 104 */ 105 static __always_inline bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf) 106 { 107 if (!userfaultfd_wp(vmf->vma)) 108 return false; 109 if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)) 110 return false; 111 112 return pte_is_uffd_wp_marker(vmf->orig_pte); 113 } 114 115 /* 116 * Randomize the address space (stacks, mmaps, brk, etc.). 117 * 118 * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization, 119 * as ancient (libc5 based) binaries can segfault. ) 120 */ 121 int randomize_va_space __read_mostly = 122 #ifdef CONFIG_COMPAT_BRK 123 1; 124 #else 125 2; 126 #endif 127 128 static const struct ctl_table mmu_sysctl_table[] = { 129 { 130 .procname = "randomize_va_space", 131 .data = &randomize_va_space, 132 .maxlen = sizeof(int), 133 .mode = 0644, 134 .proc_handler = proc_dointvec, 135 }, 136 }; 137 138 static int __init init_mm_sysctl(void) 139 { 140 register_sysctl_init("kernel", mmu_sysctl_table); 141 return 0; 142 } 143 144 subsys_initcall(init_mm_sysctl); 145 146 #ifndef arch_wants_old_prefaulted_pte 147 static inline bool arch_wants_old_prefaulted_pte(void) 148 { 149 /* 150 * Transitioning a PTE from 'old' to 'young' can be expensive on 151 * some architectures, even if it's performed in hardware. By 152 * default, "false" means prefaulted entries will be 'young'. 153 */ 154 return false; 155 } 156 #endif 157 158 static int __init disable_randmaps(char *s) 159 { 160 randomize_va_space = 0; 161 return 1; 162 } 163 __setup("norandmaps", disable_randmaps); 164 165 unsigned long highest_memmap_pfn __read_mostly; 166 167 void mm_trace_rss_stat(struct mm_struct *mm, int member) 168 { 169 trace_rss_stat(mm, member); 170 } 171 172 /* 173 * Note: this doesn't free the actual pages themselves. That 174 * has been handled earlier when unmapping all the memory regions. 175 */ 176 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, 177 unsigned long addr) 178 { 179 pgtable_t token = pmd_pgtable(*pmd); 180 pmd_clear(pmd); 181 pte_free_tlb(tlb, token, addr); 182 mm_dec_nr_ptes(tlb->mm); 183 } 184 185 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, 186 unsigned long addr, unsigned long end, 187 unsigned long floor, unsigned long ceiling) 188 { 189 pmd_t *pmd; 190 unsigned long next; 191 unsigned long start; 192 193 start = addr; 194 pmd = pmd_offset(pud, addr); 195 do { 196 next = pmd_addr_end(addr, end); 197 if (pmd_none_or_clear_bad(pmd)) 198 continue; 199 free_pte_range(tlb, pmd, addr); 200 } while (pmd++, addr = next, addr != end); 201 202 start &= PUD_MASK; 203 if (start < floor) 204 return; 205 if (ceiling) { 206 ceiling &= PUD_MASK; 207 if (!ceiling) 208 return; 209 } 210 if (end - 1 > ceiling - 1) 211 return; 212 213 pmd = pmd_offset(pud, start); 214 pud_clear(pud); 215 pmd_free_tlb(tlb, pmd, start); 216 mm_dec_nr_pmds(tlb->mm); 217 } 218 219 static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d, 220 unsigned long addr, unsigned long end, 221 unsigned long floor, unsigned long ceiling) 222 { 223 pud_t *pud; 224 unsigned long next; 225 unsigned long start; 226 227 start = addr; 228 pud = pud_offset(p4d, addr); 229 do { 230 next = pud_addr_end(addr, end); 231 if (pud_none_or_clear_bad(pud)) 232 continue; 233 free_pmd_range(tlb, pud, addr, next, floor, ceiling); 234 } while (pud++, addr = next, addr != end); 235 236 start &= P4D_MASK; 237 if (start < floor) 238 return; 239 if (ceiling) { 240 ceiling &= P4D_MASK; 241 if (!ceiling) 242 return; 243 } 244 if (end - 1 > ceiling - 1) 245 return; 246 247 pud = pud_offset(p4d, start); 248 p4d_clear(p4d); 249 pud_free_tlb(tlb, pud, start); 250 mm_dec_nr_puds(tlb->mm); 251 } 252 253 static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd, 254 unsigned long addr, unsigned long end, 255 unsigned long floor, unsigned long ceiling) 256 { 257 p4d_t *p4d; 258 unsigned long next; 259 unsigned long start; 260 261 start = addr; 262 p4d = p4d_offset(pgd, addr); 263 do { 264 next = p4d_addr_end(addr, end); 265 if (p4d_none_or_clear_bad(p4d)) 266 continue; 267 free_pud_range(tlb, p4d, addr, next, floor, ceiling); 268 } while (p4d++, addr = next, addr != end); 269 270 start &= PGDIR_MASK; 271 if (start < floor) 272 return; 273 if (ceiling) { 274 ceiling &= PGDIR_MASK; 275 if (!ceiling) 276 return; 277 } 278 if (end - 1 > ceiling - 1) 279 return; 280 281 p4d = p4d_offset(pgd, start); 282 pgd_clear(pgd); 283 p4d_free_tlb(tlb, p4d, start); 284 } 285 286 /** 287 * free_pgd_range - Unmap and free page tables in the range 288 * @tlb: the mmu_gather containing pending TLB flush info 289 * @addr: virtual address start 290 * @end: virtual address end 291 * @floor: lowest address boundary 292 * @ceiling: highest address boundary 293 * 294 * This function tears down all user-level page tables in the 295 * specified virtual address range [@addr..@end). It is part of 296 * the memory unmap flow. 297 */ 298 void free_pgd_range(struct mmu_gather *tlb, 299 unsigned long addr, unsigned long end, 300 unsigned long floor, unsigned long ceiling) 301 { 302 pgd_t *pgd; 303 unsigned long next; 304 305 /* 306 * The next few lines have given us lots of grief... 307 * 308 * Why are we testing PMD* at this top level? Because often 309 * there will be no work to do at all, and we'd prefer not to 310 * go all the way down to the bottom just to discover that. 311 * 312 * Why all these "- 1"s? Because 0 represents both the bottom 313 * of the address space and the top of it (using -1 for the 314 * top wouldn't help much: the masks would do the wrong thing). 315 * The rule is that addr 0 and floor 0 refer to the bottom of 316 * the address space, but end 0 and ceiling 0 refer to the top 317 * Comparisons need to use "end - 1" and "ceiling - 1" (though 318 * that end 0 case should be mythical). 319 * 320 * Wherever addr is brought up or ceiling brought down, we must 321 * be careful to reject "the opposite 0" before it confuses the 322 * subsequent tests. But what about where end is brought down 323 * by PMD_SIZE below? no, end can't go down to 0 there. 324 * 325 * Whereas we round start (addr) and ceiling down, by different 326 * masks at different levels, in order to test whether a table 327 * now has no other vmas using it, so can be freed, we don't 328 * bother to round floor or end up - the tests don't need that. 329 */ 330 331 addr &= PMD_MASK; 332 if (addr < floor) { 333 addr += PMD_SIZE; 334 if (!addr) 335 return; 336 } 337 if (ceiling) { 338 ceiling &= PMD_MASK; 339 if (!ceiling) 340 return; 341 } 342 if (end - 1 > ceiling - 1) 343 end -= PMD_SIZE; 344 if (addr > end - 1) 345 return; 346 /* 347 * We add page table cache pages with PAGE_SIZE, 348 * (see pte_free_tlb()), flush the tlb if we need 349 */ 350 tlb_change_page_size(tlb, PAGE_SIZE); 351 pgd = pgd_offset(tlb->mm, addr); 352 do { 353 next = pgd_addr_end(addr, end); 354 if (pgd_none_or_clear_bad(pgd)) 355 continue; 356 free_p4d_range(tlb, pgd, addr, next, floor, ceiling); 357 } while (pgd++, addr = next, addr != end); 358 } 359 360 /** 361 * free_pgtables() - Free a range of page tables 362 * @tlb: The mmu gather 363 * @unmap: The unmap_desc 364 * 365 * Note: pg_start and pg_end are provided to indicate the absolute range of the 366 * page tables that should be removed. This can differ from the vma mappings on 367 * some archs that may have mappings that need to be removed outside the vmas. 368 * Note that the prev->vm_end and next->vm_start are often used. 369 * 370 * The vma_end differs from the pg_end when a dup_mmap() failed and the tree has 371 * unrelated data to the mm_struct being torn down. 372 */ 373 void free_pgtables(struct mmu_gather *tlb, struct unmap_desc *unmap) 374 { 375 struct unlink_vma_file_batch vb; 376 struct ma_state *mas = unmap->mas; 377 struct vm_area_struct *vma = unmap->first; 378 379 /* 380 * Note: USER_PGTABLES_CEILING may be passed as the value of pg_end and 381 * may be 0. Underflow is expected in this case. Otherwise the 382 * pagetable end is exclusive. vma_end is exclusive. The last vma 383 * address should never be larger than the pagetable end. 384 */ 385 WARN_ON_ONCE(unmap->vma_end - 1 > unmap->pg_end - 1); 386 387 tlb_free_vmas(tlb); 388 389 do { 390 unsigned long addr = vma->vm_start; 391 struct vm_area_struct *next; 392 393 next = mas_find(mas, unmap->tree_end - 1); 394 395 /* 396 * Hide vma from rmap and truncate_pagecache before freeing 397 * pgtables 398 */ 399 if (unmap->mm_wr_locked) 400 vma_start_write(vma); 401 unlink_anon_vmas(vma); 402 403 unlink_file_vma_batch_init(&vb); 404 unlink_file_vma_batch_add(&vb, vma); 405 406 /* 407 * Optimization: gather nearby vmas into one call down 408 */ 409 while (next && next->vm_start <= vma->vm_end + PMD_SIZE) { 410 vma = next; 411 next = mas_find(mas, unmap->tree_end - 1); 412 if (unmap->mm_wr_locked) 413 vma_start_write(vma); 414 unlink_anon_vmas(vma); 415 unlink_file_vma_batch_add(&vb, vma); 416 } 417 unlink_file_vma_batch_final(&vb); 418 419 free_pgd_range(tlb, addr, vma->vm_end, unmap->pg_start, 420 next ? next->vm_start : unmap->pg_end); 421 vma = next; 422 } while (vma); 423 } 424 425 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte) 426 { 427 spinlock_t *ptl = pmd_lock(mm, pmd); 428 429 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ 430 mm_inc_nr_ptes(mm); 431 /* 432 * Ensure all pte setup (eg. pte page lock and page clearing) are 433 * visible before the pte is made visible to other CPUs by being 434 * put into page tables. 435 * 436 * The other side of the story is the pointer chasing in the page 437 * table walking code (when walking the page table without locking; 438 * ie. most of the time). Fortunately, these data accesses consist 439 * of a chain of data-dependent loads, meaning most CPUs (alpha 440 * being the notable exception) will already guarantee loads are 441 * seen in-order. See the alpha page table accessors for the 442 * smp_rmb() barriers in page table walking code. 443 */ 444 smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ 445 pmd_populate(mm, pmd, *pte); 446 *pte = NULL; 447 } 448 spin_unlock(ptl); 449 } 450 451 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd) 452 { 453 pgtable_t new = pte_alloc_one(mm); 454 if (!new) 455 return -ENOMEM; 456 457 pmd_install(mm, pmd, &new); 458 if (new) 459 pte_free(mm, new); 460 return 0; 461 } 462 463 int __pte_alloc_kernel(pmd_t *pmd) 464 { 465 pte_t *new = pte_alloc_one_kernel(&init_mm); 466 if (!new) 467 return -ENOMEM; 468 469 spin_lock(&init_mm.page_table_lock); 470 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ 471 smp_wmb(); /* See comment in pmd_install() */ 472 pmd_populate_kernel(&init_mm, pmd, new); 473 new = NULL; 474 } 475 spin_unlock(&init_mm.page_table_lock); 476 if (new) 477 pte_free_kernel(&init_mm, new); 478 return 0; 479 } 480 481 static inline void init_rss_vec(int *rss) 482 { 483 memset(rss, 0, sizeof(int) * NR_MM_COUNTERS); 484 } 485 486 static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss) 487 { 488 int i; 489 490 for (i = 0; i < NR_MM_COUNTERS; i++) 491 if (rss[i]) 492 add_mm_counter(mm, i, rss[i]); 493 } 494 495 static bool is_bad_page_map_ratelimited(void) 496 { 497 static unsigned long resume; 498 static unsigned long nr_shown; 499 static unsigned long nr_unshown; 500 501 /* 502 * Allow a burst of 60 reports, then keep quiet for that minute; 503 * or allow a steady drip of one report per second. 504 */ 505 if (nr_shown == 60) { 506 if (time_before(jiffies, resume)) { 507 nr_unshown++; 508 return true; 509 } 510 if (nr_unshown) { 511 pr_alert("BUG: Bad page map: %lu messages suppressed\n", 512 nr_unshown); 513 nr_unshown = 0; 514 } 515 nr_shown = 0; 516 } 517 if (nr_shown++ == 0) 518 resume = jiffies + 60 * HZ; 519 return false; 520 } 521 522 static void __print_bad_page_map_pgtable(struct mm_struct *mm, unsigned long addr) 523 { 524 unsigned long long pgdv, p4dv, pudv, pmdv; 525 p4d_t p4d, *p4dp; 526 pud_t pud, *pudp; 527 pmd_t pmd, *pmdp; 528 pgd_t *pgdp; 529 530 /* 531 * Although this looks like a fully lockless pgtable walk, it is not: 532 * see locking requirements for print_bad_page_map(). 533 */ 534 pgdp = pgd_offset(mm, addr); 535 pgdv = pgd_val(*pgdp); 536 537 if (!pgd_present(*pgdp) || pgd_leaf(*pgdp)) { 538 pr_alert("pgd:%08llx\n", pgdv); 539 return; 540 } 541 542 p4dp = p4d_offset(pgdp, addr); 543 p4d = p4dp_get(p4dp); 544 p4dv = p4d_val(p4d); 545 546 if (!p4d_present(p4d) || p4d_leaf(p4d)) { 547 pr_alert("pgd:%08llx p4d:%08llx\n", pgdv, p4dv); 548 return; 549 } 550 551 pudp = pud_offset(p4dp, addr); 552 pud = pudp_get(pudp); 553 pudv = pud_val(pud); 554 555 if (!pud_present(pud) || pud_leaf(pud)) { 556 pr_alert("pgd:%08llx p4d:%08llx pud:%08llx\n", pgdv, p4dv, pudv); 557 return; 558 } 559 560 pmdp = pmd_offset(pudp, addr); 561 pmd = pmdp_get(pmdp); 562 pmdv = pmd_val(pmd); 563 564 /* 565 * Dumping the PTE would be nice, but it's tricky with CONFIG_HIGHPTE, 566 * because the table should already be mapped by the caller and 567 * doing another map would be bad. print_bad_page_map() should 568 * already take care of printing the PTE. 569 */ 570 pr_alert("pgd:%08llx p4d:%08llx pud:%08llx pmd:%08llx\n", pgdv, 571 p4dv, pudv, pmdv); 572 } 573 574 /* 575 * This function is called to print an error when a bad page table entry (e.g., 576 * corrupted page table entry) is found. For example, we might have a 577 * PFN-mapped pte in a region that doesn't allow it. 578 * 579 * The calling function must still handle the error. 580 * 581 * This function must be called during a proper page table walk, as it will 582 * re-walk the page table to dump information: the caller MUST prevent page 583 * table teardown (by holding mmap, vma or rmap lock) and MUST hold the leaf 584 * page table lock. 585 */ 586 static void print_bad_page_map(struct vm_area_struct *vma, 587 unsigned long addr, unsigned long long entry, struct page *page, 588 enum pgtable_level level) 589 { 590 struct address_space *mapping; 591 pgoff_t index; 592 593 if (is_bad_page_map_ratelimited()) 594 return; 595 596 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; 597 index = linear_page_index(vma, addr); 598 599 pr_alert("BUG: Bad page map in process %s %s:%08llx", current->comm, 600 pgtable_level_to_str(level), entry); 601 __print_bad_page_map_pgtable(vma->vm_mm, addr); 602 if (page) 603 dump_page(page, "bad page map"); 604 pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n", 605 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); 606 pr_alert("file:%pD fault:%ps mmap:%ps mmap_prepare: %ps read_folio:%ps\n", 607 vma->vm_file, 608 vma->vm_ops ? vma->vm_ops->fault : NULL, 609 vma->vm_file ? vma->vm_file->f_op->mmap : NULL, 610 vma->vm_file ? vma->vm_file->f_op->mmap_prepare : NULL, 611 mapping ? mapping->a_ops->read_folio : NULL); 612 dump_stack(); 613 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 614 } 615 #define print_bad_pte(vma, addr, pte, page) \ 616 print_bad_page_map(vma, addr, pte_val(pte), page, PGTABLE_LEVEL_PTE) 617 618 /** 619 * __vm_normal_page() - Get the "struct page" associated with a page table entry. 620 * @vma: The VMA mapping the page table entry. 621 * @addr: The address where the page table entry is mapped. 622 * @pfn: The PFN stored in the page table entry. 623 * @special: Whether the page table entry is marked "special". 624 * @level: The page table level for error reporting purposes only. 625 * @entry: The page table entry value for error reporting purposes only. 626 * 627 * "Special" mappings do not wish to be associated with a "struct page" (either 628 * it doesn't exist, or it exists but they don't want to touch it). In this 629 * case, NULL is returned here. "Normal" mappings do have a struct page and 630 * are ordinarily refcounted. 631 * 632 * Page mappings of the shared zero folios are always considered "special", as 633 * they are not ordinarily refcounted: neither the refcount nor the mapcount 634 * of these folios is adjusted when mapping them into user page tables. 635 * Selected page table walkers (such as GUP) can still identify mappings of the 636 * shared zero folios and work with the underlying "struct page". 637 * 638 * There are 2 broad cases. Firstly, an architecture may define a "special" 639 * page table entry bit, such as pte_special(), in which case this function is 640 * trivial. Secondly, an architecture may not have a spare page table 641 * entry bit, which requires a more complicated scheme, described below. 642 * 643 * With CONFIG_FIND_NORMAL_PAGE, we might have the "special" bit set on 644 * page table entries that actually map "normal" pages: however, that page 645 * cannot be looked up through the PFN stored in the page table entry, but 646 * instead will be looked up through vm_ops->find_normal_page(). So far, this 647 * only applies to PTEs. 648 * 649 * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a 650 * special mapping (even if there are underlying and valid "struct pages"). 651 * COWed pages of a VM_PFNMAP are always normal. 652 * 653 * The way we recognize COWed pages within VM_PFNMAP mappings is through the 654 * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit 655 * set, and the vm_pgoff will point to the first PFN mapped: thus every special 656 * mapping will always honor the rule 657 * 658 * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT) 659 * 660 * And for normal mappings this is false. 661 * 662 * This restricts such mappings to be a linear translation from virtual address 663 * to pfn. To get around this restriction, we allow arbitrary mappings so long 664 * as the vma is not a COW mapping; in that case, we know that all ptes are 665 * special (because none can have been COWed). 666 * 667 * 668 * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP. 669 * 670 * VM_MIXEDMAP mappings can likewise contain memory with or without "struct 671 * page" backing, however the difference is that _all_ pages with a struct 672 * page (that is, those where pfn_valid is true, except the shared zero 673 * folios) are refcounted and considered normal pages by the VM. 674 * 675 * The disadvantage is that pages are refcounted (which can be slower and 676 * simply not an option for some PFNMAP users). The advantage is that we 677 * don't have to follow the strict linearity rule of PFNMAP mappings in 678 * order to support COWable mappings. 679 * 680 * Return: Returns the "struct page" if this is a "normal" mapping. Returns 681 * NULL if this is a "special" mapping. 682 */ 683 static inline struct page *__vm_normal_page(struct vm_area_struct *vma, 684 unsigned long addr, unsigned long pfn, bool special, 685 unsigned long long entry, enum pgtable_level level) 686 { 687 if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) { 688 if (unlikely(special)) { 689 #ifdef CONFIG_FIND_NORMAL_PAGE 690 if (vma->vm_ops && vma->vm_ops->find_normal_page) 691 return vma->vm_ops->find_normal_page(vma, addr); 692 #endif /* CONFIG_FIND_NORMAL_PAGE */ 693 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) 694 return NULL; 695 if (is_zero_pfn(pfn) || is_huge_zero_pfn(pfn)) 696 return NULL; 697 698 print_bad_page_map(vma, addr, entry, NULL, level); 699 return NULL; 700 } 701 /* 702 * With CONFIG_ARCH_HAS_PTE_SPECIAL, any special page table 703 * mappings (incl. shared zero folios) are marked accordingly. 704 */ 705 } else { 706 if (unlikely(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))) { 707 if (vma->vm_flags & VM_MIXEDMAP) { 708 /* If it has a "struct page", it's "normal". */ 709 if (!pfn_valid(pfn)) 710 return NULL; 711 } else { 712 unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT; 713 714 /* Only CoW'ed anon folios are "normal". */ 715 if (pfn == vma->vm_pgoff + off) 716 return NULL; 717 if (!is_cow_mapping(vma->vm_flags)) 718 return NULL; 719 } 720 } 721 722 if (is_zero_pfn(pfn) || is_huge_zero_pfn(pfn)) 723 return NULL; 724 } 725 726 if (unlikely(pfn > highest_memmap_pfn)) { 727 /* Corrupted page table entry. */ 728 print_bad_page_map(vma, addr, entry, NULL, level); 729 return NULL; 730 } 731 /* 732 * NOTE! We still have PageReserved() pages in the page tables. 733 * For example, VDSO mappings can cause them to exist. 734 */ 735 VM_WARN_ON_ONCE(is_zero_pfn(pfn) || is_huge_zero_pfn(pfn)); 736 return pfn_to_page(pfn); 737 } 738 739 /** 740 * vm_normal_page() - Get the "struct page" associated with a PTE 741 * @vma: The VMA mapping the @pte. 742 * @addr: The address where the @pte is mapped. 743 * @pte: The PTE. 744 * 745 * Get the "struct page" associated with a PTE. See __vm_normal_page() 746 * for details on "normal" and "special" mappings. 747 * 748 * Return: Returns the "struct page" if this is a "normal" mapping. Returns 749 * NULL if this is a "special" mapping. 750 */ 751 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 752 pte_t pte) 753 { 754 return __vm_normal_page(vma, addr, pte_pfn(pte), pte_special(pte), 755 pte_val(pte), PGTABLE_LEVEL_PTE); 756 } 757 758 /** 759 * vm_normal_folio() - Get the "struct folio" associated with a PTE 760 * @vma: The VMA mapping the @pte. 761 * @addr: The address where the @pte is mapped. 762 * @pte: The PTE. 763 * 764 * Get the "struct folio" associated with a PTE. See __vm_normal_page() 765 * for details on "normal" and "special" mappings. 766 * 767 * Return: Returns the "struct folio" if this is a "normal" mapping. Returns 768 * NULL if this is a "special" mapping. 769 */ 770 struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr, 771 pte_t pte) 772 { 773 struct page *page = vm_normal_page(vma, addr, pte); 774 775 if (page) 776 return page_folio(page); 777 return NULL; 778 } 779 780 #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES 781 /** 782 * vm_normal_page_pmd() - Get the "struct page" associated with a PMD 783 * @vma: The VMA mapping the @pmd. 784 * @addr: The address where the @pmd is mapped. 785 * @pmd: The PMD. 786 * 787 * Get the "struct page" associated with a PTE. See __vm_normal_page() 788 * for details on "normal" and "special" mappings. 789 * 790 * Return: Returns the "struct page" if this is a "normal" mapping. Returns 791 * NULL if this is a "special" mapping. 792 */ 793 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, 794 pmd_t pmd) 795 { 796 return __vm_normal_page(vma, addr, pmd_pfn(pmd), pmd_special(pmd), 797 pmd_val(pmd), PGTABLE_LEVEL_PMD); 798 } 799 800 /** 801 * vm_normal_folio_pmd() - Get the "struct folio" associated with a PMD 802 * @vma: The VMA mapping the @pmd. 803 * @addr: The address where the @pmd is mapped. 804 * @pmd: The PMD. 805 * 806 * Get the "struct folio" associated with a PTE. See __vm_normal_page() 807 * for details on "normal" and "special" mappings. 808 * 809 * Return: Returns the "struct folio" if this is a "normal" mapping. Returns 810 * NULL if this is a "special" mapping. 811 */ 812 struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma, 813 unsigned long addr, pmd_t pmd) 814 { 815 struct page *page = vm_normal_page_pmd(vma, addr, pmd); 816 817 if (page) 818 return page_folio(page); 819 return NULL; 820 } 821 822 /** 823 * vm_normal_page_pud() - Get the "struct page" associated with a PUD 824 * @vma: The VMA mapping the @pud. 825 * @addr: The address where the @pud is mapped. 826 * @pud: The PUD. 827 * 828 * Get the "struct page" associated with a PUD. See __vm_normal_page() 829 * for details on "normal" and "special" mappings. 830 * 831 * Return: Returns the "struct page" if this is a "normal" mapping. Returns 832 * NULL if this is a "special" mapping. 833 */ 834 struct page *vm_normal_page_pud(struct vm_area_struct *vma, 835 unsigned long addr, pud_t pud) 836 { 837 return __vm_normal_page(vma, addr, pud_pfn(pud), pud_special(pud), 838 pud_val(pud), PGTABLE_LEVEL_PUD); 839 } 840 #endif 841 842 /** 843 * restore_exclusive_pte - Restore a device-exclusive entry 844 * @vma: VMA covering @address 845 * @folio: the mapped folio 846 * @page: the mapped folio page 847 * @address: the virtual address 848 * @ptep: pte pointer into the locked page table mapping the folio page 849 * @orig_pte: pte value at @ptep 850 * 851 * Restore a device-exclusive non-swap entry to an ordinary present pte. 852 * 853 * The folio and the page table must be locked, and MMU notifiers must have 854 * been called to invalidate any (exclusive) device mappings. 855 * 856 * Locking the folio makes sure that anybody who just converted the pte to 857 * a device-exclusive entry can map it into the device to make forward 858 * progress without others converting it back until the folio was unlocked. 859 * 860 * If the folio lock ever becomes an issue, we can stop relying on the folio 861 * lock; it might make some scenarios with heavy thrashing less likely to 862 * make forward progress, but these scenarios might not be valid use cases. 863 * 864 * Note that the folio lock does not protect against all cases of concurrent 865 * page table modifications (e.g., MADV_DONTNEED, mprotect), so device drivers 866 * must use MMU notifiers to sync against any concurrent changes. 867 */ 868 static void restore_exclusive_pte(struct vm_area_struct *vma, 869 struct folio *folio, struct page *page, unsigned long address, 870 pte_t *ptep, pte_t orig_pte) 871 { 872 pte_t pte; 873 874 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); 875 876 pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot))); 877 if (pte_swp_soft_dirty(orig_pte)) 878 pte = pte_mksoft_dirty(pte); 879 880 if (pte_swp_uffd_wp(orig_pte)) 881 pte = pte_mkuffd_wp(pte); 882 883 if ((vma->vm_flags & VM_WRITE) && 884 can_change_pte_writable(vma, address, pte)) { 885 if (folio_test_dirty(folio)) 886 pte = pte_mkdirty(pte); 887 pte = pte_mkwrite(pte, vma); 888 } 889 set_pte_at(vma->vm_mm, address, ptep, pte); 890 891 /* 892 * No need to invalidate - it was non-present before. However 893 * secondary CPUs may have mappings that need invalidating. 894 */ 895 update_mmu_cache(vma, address, ptep); 896 } 897 898 /* 899 * Tries to restore an exclusive pte if the page lock can be acquired without 900 * sleeping. 901 */ 902 static int try_restore_exclusive_pte(struct vm_area_struct *vma, 903 unsigned long addr, pte_t *ptep, pte_t orig_pte) 904 { 905 const softleaf_t entry = softleaf_from_pte(orig_pte); 906 struct page *page = softleaf_to_page(entry); 907 struct folio *folio = page_folio(page); 908 909 if (folio_trylock(folio)) { 910 restore_exclusive_pte(vma, folio, page, addr, ptep, orig_pte); 911 folio_unlock(folio); 912 return 0; 913 } 914 915 return -EBUSY; 916 } 917 918 /* 919 * copy one vm_area from one task to the other. Assumes the page tables 920 * already present in the new task to be cleared in the whole range 921 * covered by this vma. 922 */ 923 924 static unsigned long 925 copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, 926 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma, 927 struct vm_area_struct *src_vma, unsigned long addr, int *rss) 928 { 929 vm_flags_t vm_flags = dst_vma->vm_flags; 930 pte_t orig_pte = ptep_get(src_pte); 931 softleaf_t entry = softleaf_from_pte(orig_pte); 932 pte_t pte = orig_pte; 933 struct folio *folio; 934 struct page *page; 935 936 if (likely(softleaf_is_swap(entry))) { 937 if (swap_dup_entry_direct(entry) < 0) 938 return -EIO; 939 940 /* make sure dst_mm is on swapoff's mmlist. */ 941 if (unlikely(list_empty(&dst_mm->mmlist))) { 942 spin_lock(&mmlist_lock); 943 if (list_empty(&dst_mm->mmlist)) 944 list_add(&dst_mm->mmlist, 945 &src_mm->mmlist); 946 spin_unlock(&mmlist_lock); 947 } 948 /* Mark the swap entry as shared. */ 949 if (pte_swp_exclusive(orig_pte)) { 950 pte = pte_swp_clear_exclusive(orig_pte); 951 set_pte_at(src_mm, addr, src_pte, pte); 952 } 953 rss[MM_SWAPENTS]++; 954 } else if (softleaf_is_migration(entry)) { 955 folio = softleaf_to_folio(entry); 956 957 rss[mm_counter(folio)]++; 958 959 if (!softleaf_is_migration_read(entry) && 960 is_cow_mapping(vm_flags)) { 961 /* 962 * COW mappings require pages in both parent and child 963 * to be set to read. A previously exclusive entry is 964 * now shared. 965 */ 966 entry = make_readable_migration_entry( 967 swp_offset(entry)); 968 pte = softleaf_to_pte(entry); 969 if (pte_swp_soft_dirty(orig_pte)) 970 pte = pte_swp_mksoft_dirty(pte); 971 if (pte_swp_uffd_wp(orig_pte)) 972 pte = pte_swp_mkuffd_wp(pte); 973 set_pte_at(src_mm, addr, src_pte, pte); 974 } 975 } else if (softleaf_is_device_private(entry)) { 976 page = softleaf_to_page(entry); 977 folio = page_folio(page); 978 979 /* 980 * Update rss count even for unaddressable pages, as 981 * they should treated just like normal pages in this 982 * respect. 983 * 984 * We will likely want to have some new rss counters 985 * for unaddressable pages, at some point. But for now 986 * keep things as they are. 987 */ 988 folio_get(folio); 989 rss[mm_counter(folio)]++; 990 /* Cannot fail as these pages cannot get pinned. */ 991 folio_try_dup_anon_rmap_pte(folio, page, dst_vma, src_vma); 992 993 /* 994 * We do not preserve soft-dirty information, because so 995 * far, checkpoint/restore is the only feature that 996 * requires that. And checkpoint/restore does not work 997 * when a device driver is involved (you cannot easily 998 * save and restore device driver state). 999 */ 1000 if (softleaf_is_device_private_write(entry) && 1001 is_cow_mapping(vm_flags)) { 1002 entry = make_readable_device_private_entry( 1003 swp_offset(entry)); 1004 pte = swp_entry_to_pte(entry); 1005 if (pte_swp_uffd_wp(orig_pte)) 1006 pte = pte_swp_mkuffd_wp(pte); 1007 set_pte_at(src_mm, addr, src_pte, pte); 1008 } 1009 } else if (softleaf_is_device_exclusive(entry)) { 1010 /* 1011 * Make device exclusive entries present by restoring the 1012 * original entry then copying as for a present pte. Device 1013 * exclusive entries currently only support private writable 1014 * (ie. COW) mappings. 1015 */ 1016 VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags)); 1017 if (try_restore_exclusive_pte(src_vma, addr, src_pte, orig_pte)) 1018 return -EBUSY; 1019 return -ENOENT; 1020 } else if (softleaf_is_marker(entry)) { 1021 pte_marker marker = copy_pte_marker(entry, dst_vma); 1022 1023 if (marker) 1024 set_pte_at(dst_mm, addr, dst_pte, 1025 make_pte_marker(marker)); 1026 return 0; 1027 } 1028 if (!userfaultfd_wp(dst_vma)) 1029 pte = pte_swp_clear_uffd_wp(pte); 1030 set_pte_at(dst_mm, addr, dst_pte, pte); 1031 return 0; 1032 } 1033 1034 /* 1035 * Copy a present and normal page. 1036 * 1037 * NOTE! The usual case is that this isn't required; 1038 * instead, the caller can just increase the page refcount 1039 * and re-use the pte the traditional way. 1040 * 1041 * And if we need a pre-allocated page but don't yet have 1042 * one, return a negative error to let the preallocation 1043 * code know so that it can do so outside the page table 1044 * lock. 1045 */ 1046 static inline int 1047 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 1048 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss, 1049 struct folio **prealloc, struct page *page) 1050 { 1051 struct folio *new_folio; 1052 pte_t pte; 1053 1054 new_folio = *prealloc; 1055 if (!new_folio) 1056 return -EAGAIN; 1057 1058 /* 1059 * We have a prealloc page, all good! Take it 1060 * over and copy the page & arm it. 1061 */ 1062 1063 if (copy_mc_user_highpage(&new_folio->page, page, addr, src_vma)) 1064 return -EHWPOISON; 1065 1066 *prealloc = NULL; 1067 __folio_mark_uptodate(new_folio); 1068 folio_add_new_anon_rmap(new_folio, dst_vma, addr, RMAP_EXCLUSIVE); 1069 folio_add_lru_vma(new_folio, dst_vma); 1070 rss[MM_ANONPAGES]++; 1071 1072 /* All done, just insert the new page copy in the child */ 1073 pte = folio_mk_pte(new_folio, dst_vma->vm_page_prot); 1074 pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma); 1075 if (userfaultfd_pte_wp(dst_vma, ptep_get(src_pte))) 1076 /* Uffd-wp needs to be delivered to dest pte as well */ 1077 pte = pte_mkuffd_wp(pte); 1078 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); 1079 return 0; 1080 } 1081 1082 static __always_inline void __copy_present_ptes(struct vm_area_struct *dst_vma, 1083 struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte, 1084 pte_t pte, unsigned long addr, int nr) 1085 { 1086 struct mm_struct *src_mm = src_vma->vm_mm; 1087 1088 /* If it's a COW mapping, write protect it both processes. */ 1089 if (is_cow_mapping(src_vma->vm_flags) && pte_write(pte)) { 1090 wrprotect_ptes(src_mm, addr, src_pte, nr); 1091 pte = pte_wrprotect(pte); 1092 } 1093 1094 /* If it's a shared mapping, mark it clean in the child. */ 1095 if (src_vma->vm_flags & VM_SHARED) 1096 pte = pte_mkclean(pte); 1097 pte = pte_mkold(pte); 1098 1099 if (!userfaultfd_wp(dst_vma)) 1100 pte = pte_clear_uffd_wp(pte); 1101 1102 set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr); 1103 } 1104 1105 /* 1106 * Copy one present PTE, trying to batch-process subsequent PTEs that map 1107 * consecutive pages of the same folio by copying them as well. 1108 * 1109 * Returns -EAGAIN if one preallocated page is required to copy the next PTE. 1110 * Otherwise, returns the number of copied PTEs (at least 1). 1111 */ 1112 static inline int 1113 copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 1114 pte_t *dst_pte, pte_t *src_pte, pte_t pte, unsigned long addr, 1115 int max_nr, int *rss, struct folio **prealloc) 1116 { 1117 fpb_t flags = FPB_MERGE_WRITE; 1118 struct page *page; 1119 struct folio *folio; 1120 int err, nr; 1121 1122 page = vm_normal_page(src_vma, addr, pte); 1123 if (unlikely(!page)) 1124 goto copy_pte; 1125 1126 folio = page_folio(page); 1127 1128 /* 1129 * If we likely have to copy, just don't bother with batching. Make 1130 * sure that the common "small folio" case is as fast as possible 1131 * by keeping the batching logic separate. 1132 */ 1133 if (unlikely(!*prealloc && folio_test_large(folio) && max_nr != 1)) { 1134 if (!(src_vma->vm_flags & VM_SHARED)) 1135 flags |= FPB_RESPECT_DIRTY; 1136 if (vma_soft_dirty_enabled(src_vma)) 1137 flags |= FPB_RESPECT_SOFT_DIRTY; 1138 1139 nr = folio_pte_batch_flags(folio, src_vma, src_pte, &pte, max_nr, flags); 1140 folio_ref_add(folio, nr); 1141 if (folio_test_anon(folio)) { 1142 if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page, 1143 nr, dst_vma, src_vma))) { 1144 folio_ref_sub(folio, nr); 1145 return -EAGAIN; 1146 } 1147 rss[MM_ANONPAGES] += nr; 1148 VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio); 1149 } else { 1150 folio_dup_file_rmap_ptes(folio, page, nr, dst_vma); 1151 rss[mm_counter_file(folio)] += nr; 1152 } 1153 __copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte, 1154 addr, nr); 1155 return nr; 1156 } 1157 1158 folio_get(folio); 1159 if (folio_test_anon(folio)) { 1160 /* 1161 * If this page may have been pinned by the parent process, 1162 * copy the page immediately for the child so that we'll always 1163 * guarantee the pinned page won't be randomly replaced in the 1164 * future. 1165 */ 1166 if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, dst_vma, src_vma))) { 1167 /* Page may be pinned, we have to copy. */ 1168 folio_put(folio); 1169 err = copy_present_page(dst_vma, src_vma, dst_pte, src_pte, 1170 addr, rss, prealloc, page); 1171 return err ? err : 1; 1172 } 1173 rss[MM_ANONPAGES]++; 1174 VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio); 1175 } else { 1176 folio_dup_file_rmap_pte(folio, page, dst_vma); 1177 rss[mm_counter_file(folio)]++; 1178 } 1179 1180 copy_pte: 1181 __copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte, addr, 1); 1182 return 1; 1183 } 1184 1185 static inline struct folio *folio_prealloc(struct mm_struct *src_mm, 1186 struct vm_area_struct *vma, unsigned long addr, bool need_zero) 1187 { 1188 struct folio *new_folio; 1189 1190 if (need_zero) 1191 new_folio = vma_alloc_zeroed_movable_folio(vma, addr); 1192 else 1193 new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr); 1194 1195 if (!new_folio) 1196 return NULL; 1197 1198 if (mem_cgroup_charge(new_folio, src_mm, GFP_KERNEL)) { 1199 folio_put(new_folio); 1200 return NULL; 1201 } 1202 folio_throttle_swaprate(new_folio, GFP_KERNEL); 1203 1204 return new_folio; 1205 } 1206 1207 static int 1208 copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 1209 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 1210 unsigned long end) 1211 { 1212 struct mm_struct *dst_mm = dst_vma->vm_mm; 1213 struct mm_struct *src_mm = src_vma->vm_mm; 1214 pte_t *orig_src_pte, *orig_dst_pte; 1215 pte_t *src_pte, *dst_pte; 1216 pmd_t dummy_pmdval; 1217 pte_t ptent; 1218 spinlock_t *src_ptl, *dst_ptl; 1219 int progress, max_nr, ret = 0; 1220 int rss[NR_MM_COUNTERS]; 1221 softleaf_t entry = softleaf_mk_none(); 1222 struct folio *prealloc = NULL; 1223 int nr; 1224 1225 again: 1226 progress = 0; 1227 init_rss_vec(rss); 1228 1229 /* 1230 * copy_pmd_range()'s prior pmd_none_or_clear_bad(src_pmd), and the 1231 * error handling here, assume that exclusive mmap_lock on dst and src 1232 * protects anon from unexpected THP transitions; with shmem and file 1233 * protected by mmap_lock-less collapse skipping areas with anon_vma 1234 * (whereas vma_needs_copy() skips areas without anon_vma). A rework 1235 * can remove such assumptions later, but this is good enough for now. 1236 */ 1237 dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); 1238 if (!dst_pte) { 1239 ret = -ENOMEM; 1240 goto out; 1241 } 1242 1243 /* 1244 * We already hold the exclusive mmap_lock, the copy_pte_range() and 1245 * retract_page_tables() are using vma->anon_vma to be exclusive, so 1246 * the PTE page is stable, and there is no need to get pmdval and do 1247 * pmd_same() check. 1248 */ 1249 src_pte = pte_offset_map_rw_nolock(src_mm, src_pmd, addr, &dummy_pmdval, 1250 &src_ptl); 1251 if (!src_pte) { 1252 pte_unmap_unlock(dst_pte, dst_ptl); 1253 /* ret == 0 */ 1254 goto out; 1255 } 1256 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 1257 orig_src_pte = src_pte; 1258 orig_dst_pte = dst_pte; 1259 lazy_mmu_mode_enable(); 1260 1261 do { 1262 nr = 1; 1263 1264 /* 1265 * We are holding two locks at this point - either of them 1266 * could generate latencies in another task on another CPU. 1267 */ 1268 if (progress >= 32) { 1269 progress = 0; 1270 if (need_resched() || 1271 spin_needbreak(src_ptl) || spin_needbreak(dst_ptl)) 1272 break; 1273 } 1274 ptent = ptep_get(src_pte); 1275 if (pte_none(ptent)) { 1276 progress++; 1277 continue; 1278 } 1279 if (unlikely(!pte_present(ptent))) { 1280 ret = copy_nonpresent_pte(dst_mm, src_mm, 1281 dst_pte, src_pte, 1282 dst_vma, src_vma, 1283 addr, rss); 1284 if (ret == -EIO) { 1285 entry = softleaf_from_pte(ptep_get(src_pte)); 1286 break; 1287 } else if (ret == -EBUSY) { 1288 break; 1289 } else if (!ret) { 1290 progress += 8; 1291 continue; 1292 } 1293 ptent = ptep_get(src_pte); 1294 VM_WARN_ON_ONCE(!pte_present(ptent)); 1295 1296 /* 1297 * Device exclusive entry restored, continue by copying 1298 * the now present pte. 1299 */ 1300 WARN_ON_ONCE(ret != -ENOENT); 1301 } 1302 /* copy_present_ptes() will clear `*prealloc' if consumed */ 1303 max_nr = (end - addr) / PAGE_SIZE; 1304 ret = copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, 1305 ptent, addr, max_nr, rss, &prealloc); 1306 /* 1307 * If we need a pre-allocated page for this pte, drop the 1308 * locks, allocate, and try again. 1309 * If copy failed due to hwpoison in source page, break out. 1310 */ 1311 if (unlikely(ret == -EAGAIN || ret == -EHWPOISON)) 1312 break; 1313 if (unlikely(prealloc)) { 1314 /* 1315 * pre-alloc page cannot be reused by next time so as 1316 * to strictly follow mempolicy (e.g., alloc_page_vma() 1317 * will allocate page according to address). This 1318 * could only happen if one pinned pte changed. 1319 */ 1320 folio_put(prealloc); 1321 prealloc = NULL; 1322 } 1323 nr = ret; 1324 progress += 8 * nr; 1325 } while (dst_pte += nr, src_pte += nr, addr += PAGE_SIZE * nr, 1326 addr != end); 1327 1328 lazy_mmu_mode_disable(); 1329 pte_unmap_unlock(orig_src_pte, src_ptl); 1330 add_mm_rss_vec(dst_mm, rss); 1331 pte_unmap_unlock(orig_dst_pte, dst_ptl); 1332 cond_resched(); 1333 1334 if (ret == -EIO) { 1335 VM_WARN_ON_ONCE(!entry.val); 1336 if (swap_retry_table_alloc(entry, GFP_KERNEL) < 0) { 1337 ret = -ENOMEM; 1338 goto out; 1339 } 1340 entry.val = 0; 1341 } else if (ret == -EBUSY || unlikely(ret == -EHWPOISON)) { 1342 goto out; 1343 } else if (ret == -EAGAIN) { 1344 prealloc = folio_prealloc(src_mm, src_vma, addr, false); 1345 if (!prealloc) 1346 return -ENOMEM; 1347 } else if (ret < 0) { 1348 VM_WARN_ON_ONCE(1); 1349 } 1350 1351 /* We've captured and resolved the error. Reset, try again. */ 1352 ret = 0; 1353 1354 if (addr != end) 1355 goto again; 1356 out: 1357 if (unlikely(prealloc)) 1358 folio_put(prealloc); 1359 return ret; 1360 } 1361 1362 static inline int 1363 copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 1364 pud_t *dst_pud, pud_t *src_pud, unsigned long addr, 1365 unsigned long end) 1366 { 1367 struct mm_struct *dst_mm = dst_vma->vm_mm; 1368 struct mm_struct *src_mm = src_vma->vm_mm; 1369 pmd_t *src_pmd, *dst_pmd; 1370 unsigned long next; 1371 1372 dst_pmd = pmd_alloc(dst_mm, dst_pud, addr); 1373 if (!dst_pmd) 1374 return -ENOMEM; 1375 src_pmd = pmd_offset(src_pud, addr); 1376 do { 1377 next = pmd_addr_end(addr, end); 1378 if (pmd_is_huge(*src_pmd)) { 1379 int err; 1380 1381 VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma); 1382 err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd, 1383 addr, dst_vma, src_vma); 1384 if (err == -ENOMEM) 1385 return -ENOMEM; 1386 if (!err) 1387 continue; 1388 /* fall through */ 1389 } 1390 if (pmd_none_or_clear_bad(src_pmd)) 1391 continue; 1392 if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd, 1393 addr, next)) 1394 return -ENOMEM; 1395 } while (dst_pmd++, src_pmd++, addr = next, addr != end); 1396 return 0; 1397 } 1398 1399 static inline int 1400 copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 1401 p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr, 1402 unsigned long end) 1403 { 1404 struct mm_struct *dst_mm = dst_vma->vm_mm; 1405 struct mm_struct *src_mm = src_vma->vm_mm; 1406 pud_t *src_pud, *dst_pud; 1407 unsigned long next; 1408 1409 dst_pud = pud_alloc(dst_mm, dst_p4d, addr); 1410 if (!dst_pud) 1411 return -ENOMEM; 1412 src_pud = pud_offset(src_p4d, addr); 1413 do { 1414 next = pud_addr_end(addr, end); 1415 if (pud_trans_huge(*src_pud)) { 1416 int err; 1417 1418 VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma); 1419 err = copy_huge_pud(dst_mm, src_mm, 1420 dst_pud, src_pud, addr, src_vma); 1421 if (err == -ENOMEM) 1422 return -ENOMEM; 1423 if (!err) 1424 continue; 1425 /* fall through */ 1426 } 1427 if (pud_none_or_clear_bad(src_pud)) 1428 continue; 1429 if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud, 1430 addr, next)) 1431 return -ENOMEM; 1432 } while (dst_pud++, src_pud++, addr = next, addr != end); 1433 return 0; 1434 } 1435 1436 static inline int 1437 copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 1438 pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr, 1439 unsigned long end) 1440 { 1441 struct mm_struct *dst_mm = dst_vma->vm_mm; 1442 p4d_t *src_p4d, *dst_p4d; 1443 unsigned long next; 1444 1445 dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr); 1446 if (!dst_p4d) 1447 return -ENOMEM; 1448 src_p4d = p4d_offset(src_pgd, addr); 1449 do { 1450 next = p4d_addr_end(addr, end); 1451 if (p4d_none_or_clear_bad(src_p4d)) 1452 continue; 1453 if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d, 1454 addr, next)) 1455 return -ENOMEM; 1456 } while (dst_p4d++, src_p4d++, addr = next, addr != end); 1457 return 0; 1458 } 1459 1460 /* 1461 * Return true if the vma needs to copy the pgtable during this fork(). Return 1462 * false when we can speed up fork() by allowing lazy page faults later until 1463 * when the child accesses the memory range. 1464 */ 1465 static bool 1466 vma_needs_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) 1467 { 1468 /* 1469 * We check against dst_vma as while sane VMA flags will have been 1470 * copied, VM_UFFD_WP may be set only on dst_vma. 1471 */ 1472 if (dst_vma->vm_flags & VM_COPY_ON_FORK) 1473 return true; 1474 /* 1475 * The presence of an anon_vma indicates an anonymous VMA has page 1476 * tables which naturally cannot be reconstituted on page fault. 1477 */ 1478 if (src_vma->anon_vma) 1479 return true; 1480 1481 /* 1482 * Don't copy ptes where a page fault will fill them correctly. Fork 1483 * becomes much lighter when there are big shared or private readonly 1484 * mappings. The tradeoff is that copy_page_range is more efficient 1485 * than faulting. 1486 */ 1487 return false; 1488 } 1489 1490 int 1491 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) 1492 { 1493 pgd_t *src_pgd, *dst_pgd; 1494 unsigned long addr = src_vma->vm_start; 1495 unsigned long end = src_vma->vm_end; 1496 struct mm_struct *dst_mm = dst_vma->vm_mm; 1497 struct mm_struct *src_mm = src_vma->vm_mm; 1498 struct mmu_notifier_range range; 1499 unsigned long next; 1500 bool is_cow; 1501 int ret; 1502 1503 if (!vma_needs_copy(dst_vma, src_vma)) 1504 return 0; 1505 1506 if (is_vm_hugetlb_page(src_vma)) 1507 return copy_hugetlb_page_range(dst_mm, src_mm, dst_vma, src_vma); 1508 1509 /* 1510 * We need to invalidate the secondary MMU mappings only when 1511 * there could be a permission downgrade on the ptes of the 1512 * parent mm. And a permission downgrade will only happen if 1513 * is_cow_mapping() returns true. 1514 */ 1515 is_cow = is_cow_mapping(src_vma->vm_flags); 1516 1517 if (is_cow) { 1518 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 1519 0, src_mm, addr, end); 1520 mmu_notifier_invalidate_range_start(&range); 1521 /* 1522 * Disabling preemption is not needed for the write side, as 1523 * the read side doesn't spin, but goes to the mmap_lock. 1524 * 1525 * Use the raw variant of the seqcount_t write API to avoid 1526 * lockdep complaining about preemptibility. 1527 */ 1528 vma_assert_write_locked(src_vma); 1529 raw_write_seqcount_begin(&src_mm->write_protect_seq); 1530 } 1531 1532 ret = 0; 1533 dst_pgd = pgd_offset(dst_mm, addr); 1534 src_pgd = pgd_offset(src_mm, addr); 1535 do { 1536 next = pgd_addr_end(addr, end); 1537 if (pgd_none_or_clear_bad(src_pgd)) 1538 continue; 1539 if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd, 1540 addr, next))) { 1541 ret = -ENOMEM; 1542 break; 1543 } 1544 } while (dst_pgd++, src_pgd++, addr = next, addr != end); 1545 1546 if (is_cow) { 1547 raw_write_seqcount_end(&src_mm->write_protect_seq); 1548 mmu_notifier_invalidate_range_end(&range); 1549 } 1550 return ret; 1551 } 1552 1553 /* Whether we should zap all COWed (private) pages too */ 1554 static inline bool should_zap_cows(struct zap_details *details) 1555 { 1556 /* By default, zap all pages */ 1557 if (!details) 1558 return true; 1559 1560 VM_WARN_ON_ONCE(details->skip_cows && details->reclaim_pt); 1561 1562 /* Or, we zap COWed pages only if the caller wants to */ 1563 return !details->skip_cows; 1564 } 1565 1566 /* Decides whether we should zap this folio with the folio pointer specified */ 1567 static inline bool should_zap_folio(struct zap_details *details, 1568 struct folio *folio) 1569 { 1570 /* If we can make a decision without *folio.. */ 1571 if (should_zap_cows(details)) 1572 return true; 1573 1574 /* Otherwise we should only zap non-anon folios */ 1575 return !folio_test_anon(folio); 1576 } 1577 1578 static inline bool zap_drop_markers(struct zap_details *details) 1579 { 1580 if (!details) 1581 return false; 1582 1583 return details->zap_flags & ZAP_FLAG_DROP_MARKER; 1584 } 1585 1586 /* 1587 * This function makes sure that we'll replace the none pte with an uffd-wp 1588 * swap special pte marker when necessary. Must be with the pgtable lock held. 1589 * 1590 * Returns true if uffd-wp ptes was installed, false otherwise. 1591 */ 1592 static inline bool 1593 zap_install_uffd_wp_if_needed(struct vm_area_struct *vma, 1594 unsigned long addr, pte_t *pte, int nr, 1595 struct zap_details *details, pte_t pteval) 1596 { 1597 bool was_installed = false; 1598 1599 if (!uffd_supports_wp_marker()) 1600 return false; 1601 1602 /* Zap on anonymous always means dropping everything */ 1603 if (vma_is_anonymous(vma)) 1604 return false; 1605 1606 if (zap_drop_markers(details)) 1607 return false; 1608 1609 for (;;) { 1610 /* the PFN in the PTE is irrelevant. */ 1611 if (pte_install_uffd_wp_if_needed(vma, addr, pte, pteval)) 1612 was_installed = true; 1613 if (--nr == 0) 1614 break; 1615 pte++; 1616 addr += PAGE_SIZE; 1617 } 1618 1619 return was_installed; 1620 } 1621 1622 static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb, 1623 struct vm_area_struct *vma, struct folio *folio, 1624 struct page *page, pte_t *pte, pte_t ptent, unsigned int nr, 1625 unsigned long addr, struct zap_details *details, int *rss, 1626 bool *force_flush, bool *force_break, bool *any_skipped) 1627 { 1628 struct mm_struct *mm = tlb->mm; 1629 bool delay_rmap = false; 1630 1631 if (!folio_test_anon(folio)) { 1632 ptent = get_and_clear_full_ptes(mm, addr, pte, nr, tlb->fullmm); 1633 if (pte_dirty(ptent)) { 1634 folio_mark_dirty(folio); 1635 if (tlb_delay_rmap(tlb)) { 1636 delay_rmap = true; 1637 *force_flush = true; 1638 } 1639 } 1640 if (pte_young(ptent) && likely(vma_has_recency(vma))) 1641 folio_mark_accessed(folio); 1642 rss[mm_counter(folio)] -= nr; 1643 } else { 1644 /* We don't need up-to-date accessed/dirty bits. */ 1645 clear_full_ptes(mm, addr, pte, nr, tlb->fullmm); 1646 rss[MM_ANONPAGES] -= nr; 1647 } 1648 /* Checking a single PTE in a batch is sufficient. */ 1649 arch_check_zapped_pte(vma, ptent); 1650 tlb_remove_tlb_entries(tlb, pte, nr, addr); 1651 if (unlikely(userfaultfd_pte_wp(vma, ptent))) 1652 *any_skipped = zap_install_uffd_wp_if_needed(vma, addr, pte, 1653 nr, details, ptent); 1654 1655 if (!delay_rmap) { 1656 folio_remove_rmap_ptes(folio, page, nr, vma); 1657 1658 if (unlikely(folio_mapcount(folio) < 0)) 1659 print_bad_pte(vma, addr, ptent, page); 1660 } 1661 if (unlikely(__tlb_remove_folio_pages(tlb, page, nr, delay_rmap))) { 1662 *force_flush = true; 1663 *force_break = true; 1664 } 1665 } 1666 1667 /* 1668 * Zap or skip at least one present PTE, trying to batch-process subsequent 1669 * PTEs that map consecutive pages of the same folio. 1670 * 1671 * Returns the number of processed (skipped or zapped) PTEs (at least 1). 1672 */ 1673 static inline int zap_present_ptes(struct mmu_gather *tlb, 1674 struct vm_area_struct *vma, pte_t *pte, pte_t ptent, 1675 unsigned int max_nr, unsigned long addr, 1676 struct zap_details *details, int *rss, bool *force_flush, 1677 bool *force_break, bool *any_skipped) 1678 { 1679 struct mm_struct *mm = tlb->mm; 1680 struct folio *folio; 1681 struct page *page; 1682 int nr; 1683 1684 page = vm_normal_page(vma, addr, ptent); 1685 if (!page) { 1686 /* We don't need up-to-date accessed/dirty bits. */ 1687 ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm); 1688 arch_check_zapped_pte(vma, ptent); 1689 tlb_remove_tlb_entry(tlb, pte, addr); 1690 if (userfaultfd_pte_wp(vma, ptent)) 1691 *any_skipped = zap_install_uffd_wp_if_needed(vma, addr, 1692 pte, 1, details, ptent); 1693 ksm_might_unmap_zero_page(mm, ptent); 1694 return 1; 1695 } 1696 1697 folio = page_folio(page); 1698 if (unlikely(!should_zap_folio(details, folio))) { 1699 *any_skipped = true; 1700 return 1; 1701 } 1702 1703 /* 1704 * Make sure that the common "small folio" case is as fast as possible 1705 * by keeping the batching logic separate. 1706 */ 1707 if (unlikely(folio_test_large(folio) && max_nr != 1)) { 1708 nr = folio_pte_batch(folio, pte, ptent, max_nr); 1709 zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr, 1710 addr, details, rss, force_flush, 1711 force_break, any_skipped); 1712 return nr; 1713 } 1714 zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, 1, addr, 1715 details, rss, force_flush, force_break, any_skipped); 1716 return 1; 1717 } 1718 1719 static inline int zap_nonpresent_ptes(struct mmu_gather *tlb, 1720 struct vm_area_struct *vma, pte_t *pte, pte_t ptent, 1721 unsigned int max_nr, unsigned long addr, 1722 struct zap_details *details, int *rss, bool *any_skipped) 1723 { 1724 softleaf_t entry; 1725 int nr = 1; 1726 1727 *any_skipped = true; 1728 entry = softleaf_from_pte(ptent); 1729 if (softleaf_is_device_private(entry) || 1730 softleaf_is_device_exclusive(entry)) { 1731 struct page *page = softleaf_to_page(entry); 1732 struct folio *folio = page_folio(page); 1733 1734 if (unlikely(!should_zap_folio(details, folio))) 1735 return 1; 1736 /* 1737 * Both device private/exclusive mappings should only 1738 * work with anonymous page so far, so we don't need to 1739 * consider uffd-wp bit when zap. For more information, 1740 * see zap_install_uffd_wp_if_needed(). 1741 */ 1742 WARN_ON_ONCE(!vma_is_anonymous(vma)); 1743 rss[mm_counter(folio)]--; 1744 folio_remove_rmap_pte(folio, page, vma); 1745 folio_put(folio); 1746 } else if (softleaf_is_swap(entry)) { 1747 /* Genuine swap entries, hence a private anon pages */ 1748 if (!should_zap_cows(details)) 1749 return 1; 1750 1751 nr = swap_pte_batch(pte, max_nr, ptent); 1752 rss[MM_SWAPENTS] -= nr; 1753 swap_put_entries_direct(entry, nr); 1754 } else if (softleaf_is_migration(entry)) { 1755 struct folio *folio = softleaf_to_folio(entry); 1756 1757 if (!should_zap_folio(details, folio)) 1758 return 1; 1759 rss[mm_counter(folio)]--; 1760 } else if (softleaf_is_uffd_wp_marker(entry)) { 1761 /* 1762 * For anon: always drop the marker; for file: only 1763 * drop the marker if explicitly requested. 1764 */ 1765 if (!vma_is_anonymous(vma) && !zap_drop_markers(details)) 1766 return 1; 1767 } else if (softleaf_is_guard_marker(entry)) { 1768 /* 1769 * Ordinary zapping should not remove guard PTE 1770 * markers. Only do so if we should remove PTE markers 1771 * in general. 1772 */ 1773 if (!zap_drop_markers(details)) 1774 return 1; 1775 } else if (softleaf_is_hwpoison(entry) || 1776 softleaf_is_poison_marker(entry)) { 1777 if (!should_zap_cows(details)) 1778 return 1; 1779 } else { 1780 /* We should have covered all the swap entry types */ 1781 pr_alert("unrecognized swap entry 0x%lx\n", entry.val); 1782 WARN_ON_ONCE(1); 1783 } 1784 clear_not_present_full_ptes(vma->vm_mm, addr, pte, nr, tlb->fullmm); 1785 *any_skipped = zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, ptent); 1786 1787 return nr; 1788 } 1789 1790 static inline int do_zap_pte_range(struct mmu_gather *tlb, 1791 struct vm_area_struct *vma, pte_t *pte, 1792 unsigned long addr, unsigned long end, 1793 struct zap_details *details, int *rss, 1794 bool *force_flush, bool *force_break, 1795 bool *any_skipped) 1796 { 1797 pte_t ptent = ptep_get(pte); 1798 int max_nr = (end - addr) / PAGE_SIZE; 1799 int nr = 0; 1800 1801 /* Skip all consecutive none ptes */ 1802 if (pte_none(ptent)) { 1803 for (nr = 1; nr < max_nr; nr++) { 1804 ptent = ptep_get(pte + nr); 1805 if (!pte_none(ptent)) 1806 break; 1807 } 1808 max_nr -= nr; 1809 if (!max_nr) 1810 return nr; 1811 pte += nr; 1812 addr += nr * PAGE_SIZE; 1813 } 1814 1815 if (pte_present(ptent)) 1816 nr += zap_present_ptes(tlb, vma, pte, ptent, max_nr, addr, 1817 details, rss, force_flush, force_break, 1818 any_skipped); 1819 else 1820 nr += zap_nonpresent_ptes(tlb, vma, pte, ptent, max_nr, addr, 1821 details, rss, any_skipped); 1822 1823 return nr; 1824 } 1825 1826 static bool pte_table_reclaim_possible(unsigned long start, unsigned long end, 1827 struct zap_details *details) 1828 { 1829 if (!IS_ENABLED(CONFIG_PT_RECLAIM)) 1830 return false; 1831 /* Only zap if we are allowed to and cover the full page table. */ 1832 return details && details->reclaim_pt && (end - start >= PMD_SIZE); 1833 } 1834 1835 static bool zap_empty_pte_table(struct mm_struct *mm, pmd_t *pmd, 1836 spinlock_t *ptl, pmd_t *pmdval) 1837 { 1838 spinlock_t *pml = pmd_lockptr(mm, pmd); 1839 1840 if (ptl != pml && !spin_trylock(pml)) 1841 return false; 1842 1843 *pmdval = pmdp_get(pmd); 1844 pmd_clear(pmd); 1845 if (ptl != pml) 1846 spin_unlock(pml); 1847 return true; 1848 } 1849 1850 static bool zap_pte_table_if_empty(struct mm_struct *mm, pmd_t *pmd, 1851 unsigned long addr, pmd_t *pmdval) 1852 { 1853 spinlock_t *pml, *ptl = NULL; 1854 pte_t *start_pte, *pte; 1855 int i; 1856 1857 pml = pmd_lock(mm, pmd); 1858 start_pte = pte_offset_map_rw_nolock(mm, pmd, addr, pmdval, &ptl); 1859 if (!start_pte) 1860 goto out_ptl; 1861 if (ptl != pml) 1862 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); 1863 1864 for (i = 0, pte = start_pte; i < PTRS_PER_PTE; i++, pte++) { 1865 if (!pte_none(ptep_get(pte))) 1866 goto out_ptl; 1867 } 1868 pte_unmap(start_pte); 1869 1870 pmd_clear(pmd); 1871 1872 if (ptl != pml) 1873 spin_unlock(ptl); 1874 spin_unlock(pml); 1875 return true; 1876 out_ptl: 1877 if (start_pte) 1878 pte_unmap_unlock(start_pte, ptl); 1879 if (ptl != pml) 1880 spin_unlock(pml); 1881 return false; 1882 } 1883 1884 static unsigned long zap_pte_range(struct mmu_gather *tlb, 1885 struct vm_area_struct *vma, pmd_t *pmd, 1886 unsigned long addr, unsigned long end, 1887 struct zap_details *details) 1888 { 1889 bool can_reclaim_pt = pte_table_reclaim_possible(addr, end, details); 1890 bool force_flush = false, force_break = false; 1891 struct mm_struct *mm = tlb->mm; 1892 int rss[NR_MM_COUNTERS]; 1893 spinlock_t *ptl; 1894 pte_t *start_pte; 1895 pte_t *pte; 1896 pmd_t pmdval; 1897 unsigned long start = addr; 1898 bool direct_reclaim = true; 1899 int nr; 1900 1901 retry: 1902 tlb_change_page_size(tlb, PAGE_SIZE); 1903 init_rss_vec(rss); 1904 start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 1905 if (!pte) 1906 return addr; 1907 1908 flush_tlb_batched_pending(mm); 1909 lazy_mmu_mode_enable(); 1910 do { 1911 bool any_skipped = false; 1912 1913 if (need_resched()) { 1914 direct_reclaim = false; 1915 break; 1916 } 1917 1918 nr = do_zap_pte_range(tlb, vma, pte, addr, end, details, rss, 1919 &force_flush, &force_break, &any_skipped); 1920 if (any_skipped) 1921 can_reclaim_pt = false; 1922 if (unlikely(force_break)) { 1923 addr += nr * PAGE_SIZE; 1924 direct_reclaim = false; 1925 break; 1926 } 1927 } while (pte += nr, addr += PAGE_SIZE * nr, addr != end); 1928 1929 /* 1930 * Fast path: try to hold the pmd lock and unmap the PTE page. 1931 * 1932 * If the pte lock was released midway (retry case), or if the attempt 1933 * to hold the pmd lock failed, then we need to recheck all pte entries 1934 * to ensure they are still none, thereby preventing the pte entries 1935 * from being repopulated by another thread. 1936 */ 1937 if (can_reclaim_pt && direct_reclaim && addr == end) 1938 direct_reclaim = zap_empty_pte_table(mm, pmd, ptl, &pmdval); 1939 1940 add_mm_rss_vec(mm, rss); 1941 lazy_mmu_mode_disable(); 1942 1943 /* Do the actual TLB flush before dropping ptl */ 1944 if (force_flush) { 1945 tlb_flush_mmu_tlbonly(tlb); 1946 tlb_flush_rmaps(tlb, vma); 1947 } 1948 pte_unmap_unlock(start_pte, ptl); 1949 1950 /* 1951 * If we forced a TLB flush (either due to running out of 1952 * batch buffers or because we needed to flush dirty TLB 1953 * entries before releasing the ptl), free the batched 1954 * memory too. Come back again if we didn't do everything. 1955 */ 1956 if (force_flush) 1957 tlb_flush_mmu(tlb); 1958 1959 if (addr != end) { 1960 cond_resched(); 1961 force_flush = false; 1962 force_break = false; 1963 goto retry; 1964 } 1965 1966 if (can_reclaim_pt) { 1967 if (direct_reclaim || zap_pte_table_if_empty(mm, pmd, start, &pmdval)) { 1968 pte_free_tlb(tlb, pmd_pgtable(pmdval), addr); 1969 mm_dec_nr_ptes(mm); 1970 } 1971 } 1972 1973 return addr; 1974 } 1975 1976 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, 1977 struct vm_area_struct *vma, pud_t *pud, 1978 unsigned long addr, unsigned long end, 1979 struct zap_details *details) 1980 { 1981 pmd_t *pmd; 1982 unsigned long next; 1983 1984 pmd = pmd_offset(pud, addr); 1985 do { 1986 next = pmd_addr_end(addr, end); 1987 if (pmd_is_huge(*pmd)) { 1988 if (next - addr != HPAGE_PMD_SIZE) 1989 __split_huge_pmd(vma, pmd, addr, false); 1990 else if (zap_huge_pmd(tlb, vma, pmd, addr)) { 1991 addr = next; 1992 continue; 1993 } 1994 /* fall through */ 1995 } else if (details && details->single_folio && 1996 folio_test_pmd_mappable(details->single_folio) && 1997 next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) { 1998 sync_with_folio_pmd_zap(tlb->mm, pmd); 1999 } 2000 if (pmd_none(*pmd)) { 2001 addr = next; 2002 continue; 2003 } 2004 addr = zap_pte_range(tlb, vma, pmd, addr, next, details); 2005 if (addr != next) 2006 pmd--; 2007 } while (pmd++, cond_resched(), addr != end); 2008 2009 return addr; 2010 } 2011 2012 static inline unsigned long zap_pud_range(struct mmu_gather *tlb, 2013 struct vm_area_struct *vma, p4d_t *p4d, 2014 unsigned long addr, unsigned long end, 2015 struct zap_details *details) 2016 { 2017 pud_t *pud; 2018 unsigned long next; 2019 2020 pud = pud_offset(p4d, addr); 2021 do { 2022 next = pud_addr_end(addr, end); 2023 if (pud_trans_huge(*pud)) { 2024 if (next - addr != HPAGE_PUD_SIZE) 2025 split_huge_pud(vma, pud, addr); 2026 else if (zap_huge_pud(tlb, vma, pud, addr)) 2027 goto next; 2028 /* fall through */ 2029 } 2030 if (pud_none_or_clear_bad(pud)) 2031 continue; 2032 next = zap_pmd_range(tlb, vma, pud, addr, next, details); 2033 next: 2034 cond_resched(); 2035 } while (pud++, addr = next, addr != end); 2036 2037 return addr; 2038 } 2039 2040 static inline unsigned long zap_p4d_range(struct mmu_gather *tlb, 2041 struct vm_area_struct *vma, pgd_t *pgd, 2042 unsigned long addr, unsigned long end, 2043 struct zap_details *details) 2044 { 2045 p4d_t *p4d; 2046 unsigned long next; 2047 2048 p4d = p4d_offset(pgd, addr); 2049 do { 2050 next = p4d_addr_end(addr, end); 2051 if (p4d_none_or_clear_bad(p4d)) 2052 continue; 2053 next = zap_pud_range(tlb, vma, p4d, addr, next, details); 2054 } while (p4d++, addr = next, addr != end); 2055 2056 return addr; 2057 } 2058 2059 static void __zap_vma_range(struct mmu_gather *tlb, struct vm_area_struct *vma, 2060 unsigned long start, unsigned long end, 2061 struct zap_details *details) 2062 { 2063 const bool reaping = details && details->reaping; 2064 2065 VM_WARN_ON_ONCE(start >= end || !range_in_vma(vma, start, end)); 2066 2067 /* uprobe_munmap() might sleep, so skip it when reaping. */ 2068 if (vma->vm_file && !reaping) 2069 uprobe_munmap(vma, start, end); 2070 2071 if (unlikely(is_vm_hugetlb_page(vma))) { 2072 zap_flags_t zap_flags = details ? details->zap_flags : 0; 2073 2074 VM_WARN_ON_ONCE(reaping); 2075 /* 2076 * vm_file will be NULL when we fail early while instantiating 2077 * a new mapping. In this case, no pages were mapped yet and 2078 * there is nothing to do. 2079 */ 2080 if (!vma->vm_file) 2081 return; 2082 __unmap_hugepage_range(tlb, vma, start, end, NULL, zap_flags); 2083 } else { 2084 unsigned long next, addr = start; 2085 pgd_t *pgd; 2086 2087 tlb_start_vma(tlb, vma); 2088 pgd = pgd_offset(vma->vm_mm, addr); 2089 do { 2090 next = pgd_addr_end(addr, end); 2091 if (pgd_none_or_clear_bad(pgd)) 2092 continue; 2093 next = zap_p4d_range(tlb, vma, pgd, addr, next, details); 2094 } while (pgd++, addr = next, addr != end); 2095 tlb_end_vma(tlb, vma); 2096 } 2097 } 2098 2099 /** 2100 * zap_vma_for_reaping - zap all page table entries in the vma without blocking 2101 * @vma: The vma to zap. 2102 * 2103 * Zap all page table entries in the vma without blocking for use by the oom 2104 * killer. Hugetlb vmas are not supported. 2105 * 2106 * Returns: 0 on success, -EBUSY if we would have to block. 2107 */ 2108 int zap_vma_for_reaping(struct vm_area_struct *vma) 2109 { 2110 struct zap_details details = { 2111 .reaping = true, 2112 }; 2113 struct mmu_notifier_range range; 2114 struct mmu_gather tlb; 2115 2116 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 2117 vma->vm_start, vma->vm_end); 2118 tlb_gather_mmu(&tlb, vma->vm_mm); 2119 if (mmu_notifier_invalidate_range_start_nonblock(&range)) { 2120 tlb_finish_mmu(&tlb); 2121 return -EBUSY; 2122 } 2123 __zap_vma_range(&tlb, vma, range.start, range.end, &details); 2124 mmu_notifier_invalidate_range_end(&range); 2125 tlb_finish_mmu(&tlb); 2126 return 0; 2127 } 2128 2129 /** 2130 * unmap_vmas - unmap a range of memory covered by a list of vma's 2131 * @tlb: address of the caller's struct mmu_gather 2132 * @unmap: The unmap_desc 2133 * 2134 * Unmap all pages in the vma list. 2135 * 2136 * Only addresses between `start' and `end' will be unmapped. 2137 * 2138 * The VMA list must be sorted in ascending virtual address order. 2139 * 2140 * unmap_vmas() assumes that the caller will flush the whole unmapped address 2141 * range after unmap_vmas() returns. So the only responsibility here is to 2142 * ensure that any thus-far unmapped pages are flushed before unmap_vmas() 2143 * drops the lock and schedules. 2144 */ 2145 void unmap_vmas(struct mmu_gather *tlb, struct unmap_desc *unmap) 2146 { 2147 struct vm_area_struct *vma; 2148 struct mmu_notifier_range range; 2149 struct zap_details details = { 2150 .zap_flags = ZAP_FLAG_DROP_MARKER | ZAP_FLAG_UNMAP, 2151 }; 2152 2153 vma = unmap->first; 2154 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm, 2155 unmap->vma_start, unmap->vma_end); 2156 mmu_notifier_invalidate_range_start(&range); 2157 do { 2158 unsigned long start = max(vma->vm_start, unmap->vma_start); 2159 unsigned long end = min(vma->vm_end, unmap->vma_end); 2160 2161 hugetlb_zap_begin(vma, &start, &end); 2162 __zap_vma_range(tlb, vma, start, end, &details); 2163 hugetlb_zap_end(vma, &details); 2164 vma = mas_find(unmap->mas, unmap->tree_end - 1); 2165 } while (vma); 2166 mmu_notifier_invalidate_range_end(&range); 2167 } 2168 2169 /** 2170 * zap_vma_range_batched - zap page table entries in a vma range 2171 * @tlb: pointer to the caller's struct mmu_gather 2172 * @vma: the vma covering the range to zap 2173 * @address: starting address of the range to zap 2174 * @size: number of bytes to zap 2175 * @details: details specifying zapping behavior 2176 * 2177 * @tlb must not be NULL. The provided address range must be fully 2178 * contained within @vma. If @vma is for hugetlb, @tlb is flushed and 2179 * re-initialized by this function. 2180 * 2181 * If @details is NULL, this function will zap all page table entries. 2182 */ 2183 void zap_vma_range_batched(struct mmu_gather *tlb, 2184 struct vm_area_struct *vma, unsigned long address, 2185 unsigned long size, struct zap_details *details) 2186 { 2187 const unsigned long end = address + size; 2188 struct mmu_notifier_range range; 2189 2190 VM_WARN_ON_ONCE(!tlb || tlb->mm != vma->vm_mm); 2191 2192 if (unlikely(!size)) 2193 return; 2194 2195 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 2196 address, end); 2197 hugetlb_zap_begin(vma, &range.start, &range.end); 2198 update_hiwater_rss(vma->vm_mm); 2199 mmu_notifier_invalidate_range_start(&range); 2200 /* 2201 * unmap 'address-end' not 'range.start-range.end' as range 2202 * could have been expanded for hugetlb pmd sharing. 2203 */ 2204 __zap_vma_range(tlb, vma, address, end, details); 2205 mmu_notifier_invalidate_range_end(&range); 2206 if (is_vm_hugetlb_page(vma)) { 2207 /* 2208 * flush tlb and free resources before hugetlb_zap_end(), to 2209 * avoid concurrent page faults' allocation failure. 2210 */ 2211 tlb_finish_mmu(tlb); 2212 hugetlb_zap_end(vma, details); 2213 tlb_gather_mmu(tlb, vma->vm_mm); 2214 } 2215 } 2216 2217 /** 2218 * zap_vma_range - zap all page table entries in a vma range 2219 * @vma: the vma covering the range to zap 2220 * @address: starting address of the range to zap 2221 * @size: number of bytes to zap 2222 * 2223 * The provided address range must be fully contained within @vma. 2224 */ 2225 void zap_vma_range(struct vm_area_struct *vma, unsigned long address, 2226 unsigned long size) 2227 { 2228 struct mmu_gather tlb; 2229 2230 tlb_gather_mmu(&tlb, vma->vm_mm); 2231 zap_vma_range_batched(&tlb, vma, address, size, NULL); 2232 tlb_finish_mmu(&tlb); 2233 } 2234 2235 /** 2236 * zap_special_vma_range - zap all page table entries in a special vma range 2237 * @vma: the vma covering the range to zap 2238 * @address: starting address of the range to zap 2239 * @size: number of bytes to zap 2240 * 2241 * This function does nothing when the provided address range is not fully 2242 * contained in @vma, or when the @vma is not VM_PFNMAP or VM_MIXEDMAP. 2243 */ 2244 void zap_special_vma_range(struct vm_area_struct *vma, unsigned long address, 2245 unsigned long size) 2246 { 2247 if (!range_in_vma(vma, address, address + size) || 2248 !(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))) 2249 return; 2250 2251 zap_vma_range(vma, address, size); 2252 } 2253 EXPORT_SYMBOL_GPL(zap_special_vma_range); 2254 2255 static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr) 2256 { 2257 pgd_t *pgd; 2258 p4d_t *p4d; 2259 pud_t *pud; 2260 pmd_t *pmd; 2261 2262 pgd = pgd_offset(mm, addr); 2263 p4d = p4d_alloc(mm, pgd, addr); 2264 if (!p4d) 2265 return NULL; 2266 pud = pud_alloc(mm, p4d, addr); 2267 if (!pud) 2268 return NULL; 2269 pmd = pmd_alloc(mm, pud, addr); 2270 if (!pmd) 2271 return NULL; 2272 2273 VM_BUG_ON(pmd_trans_huge(*pmd)); 2274 return pmd; 2275 } 2276 2277 pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, 2278 spinlock_t **ptl) 2279 { 2280 pmd_t *pmd = walk_to_pmd(mm, addr); 2281 2282 if (!pmd) 2283 return NULL; 2284 return pte_alloc_map_lock(mm, pmd, addr, ptl); 2285 } 2286 2287 static bool vm_mixed_zeropage_allowed(struct vm_area_struct *vma) 2288 { 2289 VM_WARN_ON_ONCE(vma->vm_flags & VM_PFNMAP); 2290 /* 2291 * Whoever wants to forbid the zeropage after some zeropages 2292 * might already have been mapped has to scan the page tables and 2293 * bail out on any zeropages. Zeropages in COW mappings can 2294 * be unshared using FAULT_FLAG_UNSHARE faults. 2295 */ 2296 if (mm_forbids_zeropage(vma->vm_mm)) 2297 return false; 2298 /* zeropages in COW mappings are common and unproblematic. */ 2299 if (is_cow_mapping(vma->vm_flags)) 2300 return true; 2301 /* Mappings that do not allow for writable PTEs are unproblematic. */ 2302 if (!(vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) 2303 return true; 2304 /* 2305 * Why not allow any VMA that has vm_ops->pfn_mkwrite? GUP could 2306 * find the shared zeropage and longterm-pin it, which would 2307 * be problematic as soon as the zeropage gets replaced by a different 2308 * page due to vma->vm_ops->pfn_mkwrite, because what's mapped would 2309 * now differ to what GUP looked up. FSDAX is incompatible to 2310 * FOLL_LONGTERM and VM_IO is incompatible to GUP completely (see 2311 * check_vma_flags). 2312 */ 2313 return vma->vm_ops && vma->vm_ops->pfn_mkwrite && 2314 (vma_is_fsdax(vma) || vma->vm_flags & VM_IO); 2315 } 2316 2317 static int validate_page_before_insert(struct vm_area_struct *vma, 2318 struct page *page) 2319 { 2320 struct folio *folio = page_folio(page); 2321 2322 if (!folio_ref_count(folio)) 2323 return -EINVAL; 2324 if (unlikely(is_zero_folio(folio))) { 2325 if (!vm_mixed_zeropage_allowed(vma)) 2326 return -EINVAL; 2327 return 0; 2328 } 2329 if (folio_test_anon(folio) || page_has_type(page)) 2330 return -EINVAL; 2331 flush_dcache_folio(folio); 2332 return 0; 2333 } 2334 2335 static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte, 2336 unsigned long addr, struct page *page, 2337 pgprot_t prot, bool mkwrite) 2338 { 2339 struct folio *folio = page_folio(page); 2340 pte_t pteval = ptep_get(pte); 2341 2342 if (!pte_none(pteval)) { 2343 if (!mkwrite) 2344 return -EBUSY; 2345 2346 /* see insert_pfn(). */ 2347 if (pte_pfn(pteval) != page_to_pfn(page)) { 2348 WARN_ON_ONCE(!is_zero_pfn(pte_pfn(pteval))); 2349 return -EFAULT; 2350 } 2351 pteval = maybe_mkwrite(pteval, vma); 2352 pteval = pte_mkyoung(pteval); 2353 if (ptep_set_access_flags(vma, addr, pte, pteval, 1)) 2354 update_mmu_cache(vma, addr, pte); 2355 return 0; 2356 } 2357 2358 /* Ok, finally just insert the thing.. */ 2359 pteval = mk_pte(page, prot); 2360 if (unlikely(is_zero_folio(folio))) { 2361 pteval = pte_mkspecial(pteval); 2362 } else { 2363 folio_get(folio); 2364 pteval = mk_pte(page, prot); 2365 if (mkwrite) { 2366 pteval = pte_mkyoung(pteval); 2367 pteval = maybe_mkwrite(pte_mkdirty(pteval), vma); 2368 } 2369 inc_mm_counter(vma->vm_mm, mm_counter_file(folio)); 2370 folio_add_file_rmap_pte(folio, page, vma); 2371 } 2372 set_pte_at(vma->vm_mm, addr, pte, pteval); 2373 return 0; 2374 } 2375 2376 static int insert_page(struct vm_area_struct *vma, unsigned long addr, 2377 struct page *page, pgprot_t prot, bool mkwrite) 2378 { 2379 int retval; 2380 pte_t *pte; 2381 spinlock_t *ptl; 2382 2383 retval = validate_page_before_insert(vma, page); 2384 if (retval) 2385 goto out; 2386 retval = -ENOMEM; 2387 pte = get_locked_pte(vma->vm_mm, addr, &ptl); 2388 if (!pte) 2389 goto out; 2390 retval = insert_page_into_pte_locked(vma, pte, addr, page, prot, 2391 mkwrite); 2392 pte_unmap_unlock(pte, ptl); 2393 out: 2394 return retval; 2395 } 2396 2397 static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte, 2398 unsigned long addr, struct page *page, pgprot_t prot) 2399 { 2400 int err; 2401 2402 err = validate_page_before_insert(vma, page); 2403 if (err) 2404 return err; 2405 return insert_page_into_pte_locked(vma, pte, addr, page, prot, false); 2406 } 2407 2408 /* insert_pages() amortizes the cost of spinlock operations 2409 * when inserting pages in a loop. 2410 */ 2411 static int insert_pages(struct vm_area_struct *vma, unsigned long addr, 2412 struct page **pages, unsigned long *num, pgprot_t prot) 2413 { 2414 pmd_t *pmd = NULL; 2415 pte_t *start_pte, *pte; 2416 spinlock_t *pte_lock; 2417 struct mm_struct *const mm = vma->vm_mm; 2418 unsigned long curr_page_idx = 0; 2419 unsigned long remaining_pages_total = *num; 2420 unsigned long pages_to_write_in_pmd; 2421 int ret; 2422 more: 2423 ret = -EFAULT; 2424 pmd = walk_to_pmd(mm, addr); 2425 if (!pmd) 2426 goto out; 2427 2428 pages_to_write_in_pmd = min_t(unsigned long, 2429 remaining_pages_total, PTRS_PER_PTE - pte_index(addr)); 2430 2431 /* Allocate the PTE if necessary; takes PMD lock once only. */ 2432 ret = -ENOMEM; 2433 if (pte_alloc(mm, pmd)) 2434 goto out; 2435 2436 while (pages_to_write_in_pmd) { 2437 int pte_idx = 0; 2438 const int batch_size = min_t(int, pages_to_write_in_pmd, 8); 2439 2440 start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock); 2441 if (!start_pte) { 2442 ret = -EFAULT; 2443 goto out; 2444 } 2445 for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) { 2446 int err = insert_page_in_batch_locked(vma, pte, 2447 addr, pages[curr_page_idx], prot); 2448 if (unlikely(err)) { 2449 pte_unmap_unlock(start_pte, pte_lock); 2450 ret = err; 2451 remaining_pages_total -= pte_idx; 2452 goto out; 2453 } 2454 addr += PAGE_SIZE; 2455 ++curr_page_idx; 2456 } 2457 pte_unmap_unlock(start_pte, pte_lock); 2458 pages_to_write_in_pmd -= batch_size; 2459 remaining_pages_total -= batch_size; 2460 } 2461 if (remaining_pages_total) 2462 goto more; 2463 ret = 0; 2464 out: 2465 *num = remaining_pages_total; 2466 return ret; 2467 } 2468 2469 /** 2470 * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock. 2471 * @vma: user vma to map to 2472 * @addr: target start user address of these pages 2473 * @pages: source kernel pages 2474 * @num: in: number of pages to map. out: number of pages that were *not* 2475 * mapped. (0 means all pages were successfully mapped). 2476 * 2477 * Preferred over vm_insert_page() when inserting multiple pages. 2478 * 2479 * In case of error, we may have mapped a subset of the provided 2480 * pages. It is the caller's responsibility to account for this case. 2481 * 2482 * The same restrictions apply as in vm_insert_page(). 2483 */ 2484 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, 2485 struct page **pages, unsigned long *num) 2486 { 2487 const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1; 2488 2489 if (addr < vma->vm_start || end_addr >= vma->vm_end) 2490 return -EFAULT; 2491 if (!(vma->vm_flags & VM_MIXEDMAP)) { 2492 BUG_ON(mmap_read_trylock(vma->vm_mm)); 2493 BUG_ON(vma->vm_flags & VM_PFNMAP); 2494 vm_flags_set(vma, VM_MIXEDMAP); 2495 } 2496 /* Defer page refcount checking till we're about to map that page. */ 2497 return insert_pages(vma, addr, pages, num, vma->vm_page_prot); 2498 } 2499 EXPORT_SYMBOL(vm_insert_pages); 2500 2501 /** 2502 * vm_insert_page - insert single page into user vma 2503 * @vma: user vma to map to 2504 * @addr: target user address of this page 2505 * @page: source kernel page 2506 * 2507 * This allows drivers to insert individual pages they've allocated 2508 * into a user vma. The zeropage is supported in some VMAs, 2509 * see vm_mixed_zeropage_allowed(). 2510 * 2511 * The page has to be a nice clean _individual_ kernel allocation. 2512 * If you allocate a compound page, you need to have marked it as 2513 * such (__GFP_COMP), or manually just split the page up yourself 2514 * (see split_page()). 2515 * 2516 * NOTE! Traditionally this was done with "remap_pfn_range()" which 2517 * took an arbitrary page protection parameter. This doesn't allow 2518 * that. Your vma protection will have to be set up correctly, which 2519 * means that if you want a shared writable mapping, you'd better 2520 * ask for a shared writable mapping! 2521 * 2522 * The page does not need to be reserved. 2523 * 2524 * Usually this function is called from f_op->mmap() handler 2525 * under mm->mmap_lock write-lock, so it can change vma->vm_flags. 2526 * Caller must set VM_MIXEDMAP on vma if it wants to call this 2527 * function from other places, for example from page-fault handler. 2528 * 2529 * Return: %0 on success, negative error code otherwise. 2530 */ 2531 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, 2532 struct page *page) 2533 { 2534 if (addr < vma->vm_start || addr >= vma->vm_end) 2535 return -EFAULT; 2536 if (!(vma->vm_flags & VM_MIXEDMAP)) { 2537 BUG_ON(mmap_read_trylock(vma->vm_mm)); 2538 BUG_ON(vma->vm_flags & VM_PFNMAP); 2539 vm_flags_set(vma, VM_MIXEDMAP); 2540 } 2541 return insert_page(vma, addr, page, vma->vm_page_prot, false); 2542 } 2543 EXPORT_SYMBOL(vm_insert_page); 2544 2545 /* 2546 * __vm_map_pages - maps range of kernel pages into user vma 2547 * @vma: user vma to map to 2548 * @pages: pointer to array of source kernel pages 2549 * @num: number of pages in page array 2550 * @offset: user's requested vm_pgoff 2551 * 2552 * This allows drivers to map range of kernel pages into a user vma. 2553 * The zeropage is supported in some VMAs, see 2554 * vm_mixed_zeropage_allowed(). 2555 * 2556 * Return: 0 on success and error code otherwise. 2557 */ 2558 static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages, 2559 unsigned long num, unsigned long offset) 2560 { 2561 unsigned long count = vma_pages(vma); 2562 unsigned long uaddr = vma->vm_start; 2563 2564 /* Fail if the user requested offset is beyond the end of the object */ 2565 if (offset >= num) 2566 return -ENXIO; 2567 2568 /* Fail if the user requested size exceeds available object size */ 2569 if (count > num - offset) 2570 return -ENXIO; 2571 2572 return vm_insert_pages(vma, uaddr, pages + offset, &count); 2573 } 2574 2575 /** 2576 * vm_map_pages - maps range of kernel pages starts with non zero offset 2577 * @vma: user vma to map to 2578 * @pages: pointer to array of source kernel pages 2579 * @num: number of pages in page array 2580 * 2581 * Maps an object consisting of @num pages, catering for the user's 2582 * requested vm_pgoff 2583 * 2584 * If we fail to insert any page into the vma, the function will return 2585 * immediately leaving any previously inserted pages present. Callers 2586 * from the mmap handler may immediately return the error as their caller 2587 * will destroy the vma, removing any successfully inserted pages. Other 2588 * callers should make their own arrangements for calling unmap_region(). 2589 * 2590 * Context: Process context. Called by mmap handlers. 2591 * Return: 0 on success and error code otherwise. 2592 */ 2593 int vm_map_pages(struct vm_area_struct *vma, struct page **pages, 2594 unsigned long num) 2595 { 2596 return __vm_map_pages(vma, pages, num, vma->vm_pgoff); 2597 } 2598 EXPORT_SYMBOL(vm_map_pages); 2599 2600 /** 2601 * vm_map_pages_zero - map range of kernel pages starts with zero offset 2602 * @vma: user vma to map to 2603 * @pages: pointer to array of source kernel pages 2604 * @num: number of pages in page array 2605 * 2606 * Similar to vm_map_pages(), except that it explicitly sets the offset 2607 * to 0. This function is intended for the drivers that did not consider 2608 * vm_pgoff. 2609 * 2610 * Context: Process context. Called by mmap handlers. 2611 * Return: 0 on success and error code otherwise. 2612 */ 2613 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, 2614 unsigned long num) 2615 { 2616 return __vm_map_pages(vma, pages, num, 0); 2617 } 2618 EXPORT_SYMBOL(vm_map_pages_zero); 2619 2620 static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr, 2621 unsigned long pfn, pgprot_t prot, bool mkwrite) 2622 { 2623 struct mm_struct *mm = vma->vm_mm; 2624 pte_t *pte, entry; 2625 spinlock_t *ptl; 2626 2627 pte = get_locked_pte(mm, addr, &ptl); 2628 if (!pte) 2629 return VM_FAULT_OOM; 2630 entry = ptep_get(pte); 2631 if (!pte_none(entry)) { 2632 if (mkwrite) { 2633 /* 2634 * For read faults on private mappings the PFN passed 2635 * in may not match the PFN we have mapped if the 2636 * mapped PFN is a writeable COW page. In the mkwrite 2637 * case we are creating a writable PTE for a shared 2638 * mapping and we expect the PFNs to match. If they 2639 * don't match, we are likely racing with block 2640 * allocation and mapping invalidation so just skip the 2641 * update. 2642 */ 2643 if (pte_pfn(entry) != pfn) { 2644 WARN_ON_ONCE(!is_zero_pfn(pte_pfn(entry))); 2645 goto out_unlock; 2646 } 2647 entry = pte_mkyoung(entry); 2648 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2649 if (ptep_set_access_flags(vma, addr, pte, entry, 1)) 2650 update_mmu_cache(vma, addr, pte); 2651 } 2652 goto out_unlock; 2653 } 2654 2655 /* Ok, finally just insert the thing.. */ 2656 entry = pte_mkspecial(pfn_pte(pfn, prot)); 2657 2658 if (mkwrite) { 2659 entry = pte_mkyoung(entry); 2660 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2661 } 2662 2663 set_pte_at(mm, addr, pte, entry); 2664 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ 2665 2666 out_unlock: 2667 pte_unmap_unlock(pte, ptl); 2668 return VM_FAULT_NOPAGE; 2669 } 2670 2671 /** 2672 * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot 2673 * @vma: user vma to map to 2674 * @addr: target user address of this page 2675 * @pfn: source kernel pfn 2676 * @pgprot: pgprot flags for the inserted page 2677 * 2678 * This is exactly like vmf_insert_pfn(), except that it allows drivers 2679 * to override pgprot on a per-page basis. 2680 * 2681 * This only makes sense for IO mappings, and it makes no sense for 2682 * COW mappings. In general, using multiple vmas is preferable; 2683 * vmf_insert_pfn_prot should only be used if using multiple VMAs is 2684 * impractical. 2685 * 2686 * pgprot typically only differs from @vma->vm_page_prot when drivers set 2687 * caching- and encryption bits different than those of @vma->vm_page_prot, 2688 * because the caching- or encryption mode may not be known at mmap() time. 2689 * 2690 * This is ok as long as @vma->vm_page_prot is not used by the core vm 2691 * to set caching and encryption bits for those vmas (except for COW pages). 2692 * This is ensured by core vm only modifying these page table entries using 2693 * functions that don't touch caching- or encryption bits, using pte_modify() 2694 * if needed. (See for example mprotect()). 2695 * 2696 * Also when new page-table entries are created, this is only done using the 2697 * fault() callback, and never using the value of vma->vm_page_prot, 2698 * except for page-table entries that point to anonymous pages as the result 2699 * of COW. 2700 * 2701 * Context: Process context. May allocate using %GFP_KERNEL. 2702 * Return: vm_fault_t value. 2703 */ 2704 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, 2705 unsigned long pfn, pgprot_t pgprot) 2706 { 2707 /* 2708 * Technically, architectures with pte_special can avoid all these 2709 * restrictions (same for remap_pfn_range). However we would like 2710 * consistency in testing and feature parity among all, so we should 2711 * try to keep these invariants in place for everybody. 2712 */ 2713 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); 2714 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 2715 (VM_PFNMAP|VM_MIXEDMAP)); 2716 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 2717 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); 2718 2719 if (addr < vma->vm_start || addr >= vma->vm_end) 2720 return VM_FAULT_SIGBUS; 2721 2722 if (!pfn_modify_allowed(pfn, pgprot)) 2723 return VM_FAULT_SIGBUS; 2724 2725 pfnmap_setup_cachemode_pfn(pfn, &pgprot); 2726 2727 return insert_pfn(vma, addr, pfn, pgprot, false); 2728 } 2729 EXPORT_SYMBOL(vmf_insert_pfn_prot); 2730 2731 /** 2732 * vmf_insert_pfn - insert single pfn into user vma 2733 * @vma: user vma to map to 2734 * @addr: target user address of this page 2735 * @pfn: source kernel pfn 2736 * 2737 * Similar to vm_insert_page, this allows drivers to insert individual pages 2738 * they've allocated into a user vma. Same comments apply. 2739 * 2740 * This function should only be called from a vm_ops->fault handler, and 2741 * in that case the handler should return the result of this function. 2742 * 2743 * vma cannot be a COW mapping. 2744 * 2745 * As this is called only for pages that do not currently exist, we 2746 * do not need to flush old virtual caches or the TLB. 2747 * 2748 * Context: Process context. May allocate using %GFP_KERNEL. 2749 * Return: vm_fault_t value. 2750 */ 2751 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 2752 unsigned long pfn) 2753 { 2754 return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot); 2755 } 2756 EXPORT_SYMBOL(vmf_insert_pfn); 2757 2758 static bool vm_mixed_ok(struct vm_area_struct *vma, unsigned long pfn, 2759 bool mkwrite) 2760 { 2761 if (unlikely(is_zero_pfn(pfn)) && 2762 (mkwrite || !vm_mixed_zeropage_allowed(vma))) 2763 return false; 2764 /* these checks mirror the abort conditions in vm_normal_page */ 2765 if (vma->vm_flags & VM_MIXEDMAP) 2766 return true; 2767 if (is_zero_pfn(pfn)) 2768 return true; 2769 return false; 2770 } 2771 2772 static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma, 2773 unsigned long addr, unsigned long pfn, bool mkwrite) 2774 { 2775 pgprot_t pgprot = vma->vm_page_prot; 2776 int err; 2777 2778 if (!vm_mixed_ok(vma, pfn, mkwrite)) 2779 return VM_FAULT_SIGBUS; 2780 2781 if (addr < vma->vm_start || addr >= vma->vm_end) 2782 return VM_FAULT_SIGBUS; 2783 2784 pfnmap_setup_cachemode_pfn(pfn, &pgprot); 2785 2786 if (!pfn_modify_allowed(pfn, pgprot)) 2787 return VM_FAULT_SIGBUS; 2788 2789 /* 2790 * If we don't have pte special, then we have to use the pfn_valid() 2791 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must* 2792 * refcount the page if pfn_valid is true (hence insert_page rather 2793 * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP 2794 * without pte special, it would there be refcounted as a normal page. 2795 */ 2796 if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pfn_valid(pfn)) { 2797 struct page *page; 2798 2799 /* 2800 * At this point we are committed to insert_page() 2801 * regardless of whether the caller specified flags that 2802 * result in pfn_t_has_page() == false. 2803 */ 2804 page = pfn_to_page(pfn); 2805 err = insert_page(vma, addr, page, pgprot, mkwrite); 2806 } else { 2807 return insert_pfn(vma, addr, pfn, pgprot, mkwrite); 2808 } 2809 2810 if (err == -ENOMEM) 2811 return VM_FAULT_OOM; 2812 if (err < 0 && err != -EBUSY) 2813 return VM_FAULT_SIGBUS; 2814 2815 return VM_FAULT_NOPAGE; 2816 } 2817 2818 vm_fault_t vmf_insert_page_mkwrite(struct vm_fault *vmf, struct page *page, 2819 bool write) 2820 { 2821 pgprot_t pgprot = vmf->vma->vm_page_prot; 2822 unsigned long addr = vmf->address; 2823 int err; 2824 2825 if (addr < vmf->vma->vm_start || addr >= vmf->vma->vm_end) 2826 return VM_FAULT_SIGBUS; 2827 2828 err = insert_page(vmf->vma, addr, page, pgprot, write); 2829 if (err == -ENOMEM) 2830 return VM_FAULT_OOM; 2831 if (err < 0 && err != -EBUSY) 2832 return VM_FAULT_SIGBUS; 2833 2834 return VM_FAULT_NOPAGE; 2835 } 2836 EXPORT_SYMBOL_GPL(vmf_insert_page_mkwrite); 2837 2838 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 2839 unsigned long pfn) 2840 { 2841 return __vm_insert_mixed(vma, addr, pfn, false); 2842 } 2843 EXPORT_SYMBOL(vmf_insert_mixed); 2844 2845 /* 2846 * If the insertion of PTE failed because someone else already added a 2847 * different entry in the mean time, we treat that as success as we assume 2848 * the same entry was actually inserted. 2849 */ 2850 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, 2851 unsigned long addr, unsigned long pfn) 2852 { 2853 return __vm_insert_mixed(vma, addr, pfn, true); 2854 } 2855 2856 /* 2857 * maps a range of physical memory into the requested pages. the old 2858 * mappings are removed. any references to nonexistent pages results 2859 * in null mappings (currently treated as "copy-on-access") 2860 */ 2861 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, 2862 unsigned long addr, unsigned long end, 2863 unsigned long pfn, pgprot_t prot) 2864 { 2865 pte_t *pte, *mapped_pte; 2866 spinlock_t *ptl; 2867 int err = 0; 2868 2869 mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); 2870 if (!pte) 2871 return -ENOMEM; 2872 lazy_mmu_mode_enable(); 2873 do { 2874 BUG_ON(!pte_none(ptep_get(pte))); 2875 if (!pfn_modify_allowed(pfn, prot)) { 2876 err = -EACCES; 2877 break; 2878 } 2879 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); 2880 pfn++; 2881 } while (pte++, addr += PAGE_SIZE, addr != end); 2882 lazy_mmu_mode_disable(); 2883 pte_unmap_unlock(mapped_pte, ptl); 2884 return err; 2885 } 2886 2887 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, 2888 unsigned long addr, unsigned long end, 2889 unsigned long pfn, pgprot_t prot) 2890 { 2891 pmd_t *pmd; 2892 unsigned long next; 2893 int err; 2894 2895 pfn -= addr >> PAGE_SHIFT; 2896 pmd = pmd_alloc(mm, pud, addr); 2897 if (!pmd) 2898 return -ENOMEM; 2899 VM_BUG_ON(pmd_trans_huge(*pmd)); 2900 do { 2901 next = pmd_addr_end(addr, end); 2902 err = remap_pte_range(mm, pmd, addr, next, 2903 pfn + (addr >> PAGE_SHIFT), prot); 2904 if (err) 2905 return err; 2906 } while (pmd++, addr = next, addr != end); 2907 return 0; 2908 } 2909 2910 static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d, 2911 unsigned long addr, unsigned long end, 2912 unsigned long pfn, pgprot_t prot) 2913 { 2914 pud_t *pud; 2915 unsigned long next; 2916 int err; 2917 2918 pfn -= addr >> PAGE_SHIFT; 2919 pud = pud_alloc(mm, p4d, addr); 2920 if (!pud) 2921 return -ENOMEM; 2922 do { 2923 next = pud_addr_end(addr, end); 2924 err = remap_pmd_range(mm, pud, addr, next, 2925 pfn + (addr >> PAGE_SHIFT), prot); 2926 if (err) 2927 return err; 2928 } while (pud++, addr = next, addr != end); 2929 return 0; 2930 } 2931 2932 static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd, 2933 unsigned long addr, unsigned long end, 2934 unsigned long pfn, pgprot_t prot) 2935 { 2936 p4d_t *p4d; 2937 unsigned long next; 2938 int err; 2939 2940 pfn -= addr >> PAGE_SHIFT; 2941 p4d = p4d_alloc(mm, pgd, addr); 2942 if (!p4d) 2943 return -ENOMEM; 2944 do { 2945 next = p4d_addr_end(addr, end); 2946 err = remap_pud_range(mm, p4d, addr, next, 2947 pfn + (addr >> PAGE_SHIFT), prot); 2948 if (err) 2949 return err; 2950 } while (p4d++, addr = next, addr != end); 2951 return 0; 2952 } 2953 2954 static int get_remap_pgoff(bool is_cow, unsigned long addr, 2955 unsigned long end, unsigned long vm_start, unsigned long vm_end, 2956 unsigned long pfn, pgoff_t *vm_pgoff_p) 2957 { 2958 /* 2959 * There's a horrible special case to handle copy-on-write 2960 * behaviour that some programs depend on. We mark the "original" 2961 * un-COW'ed pages by matching them up with "vma->vm_pgoff". 2962 * See vm_normal_page() for details. 2963 */ 2964 if (is_cow) { 2965 if (addr != vm_start || end != vm_end) 2966 return -EINVAL; 2967 *vm_pgoff_p = pfn; 2968 } 2969 2970 return 0; 2971 } 2972 2973 static int remap_pfn_range_internal(struct vm_area_struct *vma, unsigned long addr, 2974 unsigned long pfn, unsigned long size, pgprot_t prot) 2975 { 2976 pgd_t *pgd; 2977 unsigned long next; 2978 unsigned long end = addr + PAGE_ALIGN(size); 2979 struct mm_struct *mm = vma->vm_mm; 2980 int err; 2981 2982 if (WARN_ON_ONCE(!PAGE_ALIGNED(addr))) 2983 return -EINVAL; 2984 2985 VM_WARN_ON_ONCE(!vma_test_all_mask(vma, VMA_REMAP_FLAGS)); 2986 2987 BUG_ON(addr >= end); 2988 pfn -= addr >> PAGE_SHIFT; 2989 pgd = pgd_offset(mm, addr); 2990 flush_cache_range(vma, addr, end); 2991 do { 2992 next = pgd_addr_end(addr, end); 2993 err = remap_p4d_range(mm, pgd, addr, next, 2994 pfn + (addr >> PAGE_SHIFT), prot); 2995 if (err) 2996 return err; 2997 } while (pgd++, addr = next, addr != end); 2998 2999 return 0; 3000 } 3001 3002 /* 3003 * Variant of remap_pfn_range that does not call track_pfn_remap. The caller 3004 * must have pre-validated the caching bits of the pgprot_t. 3005 */ 3006 static int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, 3007 unsigned long pfn, unsigned long size, pgprot_t prot) 3008 { 3009 int error = remap_pfn_range_internal(vma, addr, pfn, size, prot); 3010 3011 if (!error) 3012 return 0; 3013 3014 /* 3015 * A partial pfn range mapping is dangerous: it does not 3016 * maintain page reference counts, and callers may free 3017 * pages due to the error. So zap it early. 3018 */ 3019 zap_vma_range(vma, addr, size); 3020 return error; 3021 } 3022 3023 #ifdef __HAVE_PFNMAP_TRACKING 3024 static inline struct pfnmap_track_ctx *pfnmap_track_ctx_alloc(unsigned long pfn, 3025 unsigned long size, pgprot_t *prot) 3026 { 3027 struct pfnmap_track_ctx *ctx; 3028 3029 if (pfnmap_track(pfn, size, prot)) 3030 return ERR_PTR(-EINVAL); 3031 3032 ctx = kmalloc_obj(*ctx); 3033 if (unlikely(!ctx)) { 3034 pfnmap_untrack(pfn, size); 3035 return ERR_PTR(-ENOMEM); 3036 } 3037 3038 ctx->pfn = pfn; 3039 ctx->size = size; 3040 kref_init(&ctx->kref); 3041 return ctx; 3042 } 3043 3044 void pfnmap_track_ctx_release(struct kref *ref) 3045 { 3046 struct pfnmap_track_ctx *ctx = container_of(ref, struct pfnmap_track_ctx, kref); 3047 3048 pfnmap_untrack(ctx->pfn, ctx->size); 3049 kfree(ctx); 3050 } 3051 3052 static int remap_pfn_range_track(struct vm_area_struct *vma, unsigned long addr, 3053 unsigned long pfn, unsigned long size, pgprot_t prot) 3054 { 3055 struct pfnmap_track_ctx *ctx = NULL; 3056 int err; 3057 3058 size = PAGE_ALIGN(size); 3059 3060 /* 3061 * If we cover the full VMA, we'll perform actual tracking, and 3062 * remember to untrack when the last reference to our tracking 3063 * context from a VMA goes away. We'll keep tracking the whole pfn 3064 * range even during VMA splits and partial unmapping. 3065 * 3066 * If we only cover parts of the VMA, we'll only setup the cachemode 3067 * in the pgprot for the pfn range. 3068 */ 3069 if (addr == vma->vm_start && addr + size == vma->vm_end) { 3070 if (vma->pfnmap_track_ctx) 3071 return -EINVAL; 3072 ctx = pfnmap_track_ctx_alloc(pfn, size, &prot); 3073 if (IS_ERR(ctx)) 3074 return PTR_ERR(ctx); 3075 } else if (pfnmap_setup_cachemode(pfn, size, &prot)) { 3076 return -EINVAL; 3077 } 3078 3079 err = remap_pfn_range_notrack(vma, addr, pfn, size, prot); 3080 if (ctx) { 3081 if (err) 3082 kref_put(&ctx->kref, pfnmap_track_ctx_release); 3083 else 3084 vma->pfnmap_track_ctx = ctx; 3085 } 3086 return err; 3087 } 3088 3089 static int do_remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, 3090 unsigned long pfn, unsigned long size, pgprot_t prot) 3091 { 3092 return remap_pfn_range_track(vma, addr, pfn, size, prot); 3093 } 3094 #else 3095 static int do_remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, 3096 unsigned long pfn, unsigned long size, pgprot_t prot) 3097 { 3098 return remap_pfn_range_notrack(vma, addr, pfn, size, prot); 3099 } 3100 #endif 3101 3102 int remap_pfn_range_prepare(struct vm_area_desc *desc) 3103 { 3104 const struct mmap_action *action = &desc->action; 3105 const unsigned long start = action->remap.start; 3106 const unsigned long end = start + action->remap.size; 3107 const unsigned long pfn = action->remap.start_pfn; 3108 const bool is_cow = vma_desc_is_cow_mapping(desc); 3109 int err; 3110 3111 err = get_remap_pgoff(is_cow, start, end, desc->start, desc->end, pfn, 3112 &desc->pgoff); 3113 if (err) 3114 return err; 3115 3116 vma_desc_set_flags_mask(desc, VMA_REMAP_FLAGS); 3117 return 0; 3118 } 3119 3120 static int remap_pfn_range_prepare_vma(struct vm_area_struct *vma, 3121 unsigned long addr, unsigned long pfn, 3122 unsigned long size) 3123 { 3124 const unsigned long end = addr + PAGE_ALIGN(size); 3125 const bool is_cow = is_cow_mapping(vma->vm_flags); 3126 int err; 3127 3128 err = get_remap_pgoff(is_cow, addr, end, vma->vm_start, vma->vm_end, 3129 pfn, &vma->vm_pgoff); 3130 if (err) 3131 return err; 3132 3133 vma_set_flags_mask(vma, VMA_REMAP_FLAGS); 3134 return 0; 3135 } 3136 3137 /** 3138 * remap_pfn_range - remap kernel memory to userspace 3139 * @vma: user vma to map to 3140 * @addr: target page aligned user address to start at 3141 * @pfn: page frame number of kernel physical memory address 3142 * @size: size of mapping area 3143 * @prot: page protection flags for this mapping 3144 * 3145 * Note: this is only safe if the mm semaphore is held when called. 3146 * 3147 * Return: %0 on success, negative error code otherwise. 3148 */ 3149 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, 3150 unsigned long pfn, unsigned long size, pgprot_t prot) 3151 { 3152 int err; 3153 3154 err = remap_pfn_range_prepare_vma(vma, addr, pfn, size); 3155 if (err) 3156 return err; 3157 3158 return do_remap_pfn_range(vma, addr, pfn, size, prot); 3159 } 3160 EXPORT_SYMBOL(remap_pfn_range); 3161 3162 int remap_pfn_range_complete(struct vm_area_struct *vma, 3163 struct mmap_action *action) 3164 { 3165 const unsigned long start = action->remap.start; 3166 const unsigned long pfn = action->remap.start_pfn; 3167 const unsigned long size = action->remap.size; 3168 const pgprot_t prot = action->remap.pgprot; 3169 3170 return do_remap_pfn_range(vma, start, pfn, size, prot); 3171 } 3172 3173 static int __simple_ioremap_prep(unsigned long vm_len, pgoff_t vm_pgoff, 3174 phys_addr_t start_phys, unsigned long size, 3175 unsigned long *pfnp) 3176 { 3177 unsigned long pfn, pages; 3178 3179 /* Check that the physical memory area passed in looks valid */ 3180 if (start_phys + size < start_phys) 3181 return -EINVAL; 3182 /* 3183 * You *really* shouldn't map things that aren't page-aligned, 3184 * but we've historically allowed it because IO memory might 3185 * just have smaller alignment. 3186 */ 3187 size += start_phys & ~PAGE_MASK; 3188 pfn = start_phys >> PAGE_SHIFT; 3189 pages = (size + ~PAGE_MASK) >> PAGE_SHIFT; 3190 if (pfn + pages < pfn) 3191 return -EINVAL; 3192 3193 /* We start the mapping 'vm_pgoff' pages into the area */ 3194 if (vm_pgoff > pages) 3195 return -EINVAL; 3196 pfn += vm_pgoff; 3197 pages -= vm_pgoff; 3198 3199 /* Can we fit all of the mapping? */ 3200 if ((vm_len >> PAGE_SHIFT) > pages) 3201 return -EINVAL; 3202 3203 *pfnp = pfn; 3204 return 0; 3205 } 3206 3207 int simple_ioremap_prepare(struct vm_area_desc *desc) 3208 { 3209 struct mmap_action *action = &desc->action; 3210 const phys_addr_t start = action->simple_ioremap.start_phys_addr; 3211 const unsigned long size = action->simple_ioremap.size; 3212 unsigned long pfn; 3213 int err; 3214 3215 err = __simple_ioremap_prep(vma_desc_size(desc), desc->pgoff, 3216 start, size, &pfn); 3217 if (err) 3218 return err; 3219 3220 /* The I/O remap logic does the heavy lifting. */ 3221 mmap_action_ioremap_full(desc, pfn); 3222 return io_remap_pfn_range_prepare(desc); 3223 } 3224 3225 /** 3226 * vm_iomap_memory - remap memory to userspace 3227 * @vma: user vma to map to 3228 * @start: start of the physical memory to be mapped 3229 * @len: size of area 3230 * 3231 * This is a simplified io_remap_pfn_range() for common driver use. The 3232 * driver just needs to give us the physical memory range to be mapped, 3233 * we'll figure out the rest from the vma information. 3234 * 3235 * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get 3236 * whatever write-combining details or similar. 3237 * 3238 * Return: %0 on success, negative error code otherwise. 3239 */ 3240 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) 3241 { 3242 const unsigned long vm_start = vma->vm_start; 3243 const unsigned long vm_end = vma->vm_end; 3244 const unsigned long vm_len = vm_end - vm_start; 3245 unsigned long pfn; 3246 int err; 3247 3248 err = __simple_ioremap_prep(vm_len, vma->vm_pgoff, start, len, &pfn); 3249 if (err) 3250 return err; 3251 3252 /* Ok, let it rip */ 3253 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); 3254 } 3255 EXPORT_SYMBOL(vm_iomap_memory); 3256 3257 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, 3258 unsigned long addr, unsigned long end, 3259 pte_fn_t fn, void *data, bool create, 3260 pgtbl_mod_mask *mask) 3261 { 3262 pte_t *pte, *mapped_pte; 3263 int err = 0; 3264 spinlock_t *ptl; 3265 3266 if (create) { 3267 mapped_pte = pte = (mm == &init_mm) ? 3268 pte_alloc_kernel_track(pmd, addr, mask) : 3269 pte_alloc_map_lock(mm, pmd, addr, &ptl); 3270 if (!pte) 3271 return -ENOMEM; 3272 } else { 3273 mapped_pte = pte = (mm == &init_mm) ? 3274 pte_offset_kernel(pmd, addr) : 3275 pte_offset_map_lock(mm, pmd, addr, &ptl); 3276 if (!pte) 3277 return -EINVAL; 3278 } 3279 3280 lazy_mmu_mode_enable(); 3281 3282 if (fn) { 3283 do { 3284 if (create || !pte_none(ptep_get(pte))) { 3285 err = fn(pte, addr, data); 3286 if (err) 3287 break; 3288 } 3289 } while (pte++, addr += PAGE_SIZE, addr != end); 3290 } 3291 *mask |= PGTBL_PTE_MODIFIED; 3292 3293 lazy_mmu_mode_disable(); 3294 3295 if (mm != &init_mm) 3296 pte_unmap_unlock(mapped_pte, ptl); 3297 return err; 3298 } 3299 3300 static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, 3301 unsigned long addr, unsigned long end, 3302 pte_fn_t fn, void *data, bool create, 3303 pgtbl_mod_mask *mask) 3304 { 3305 pmd_t *pmd; 3306 unsigned long next; 3307 int err = 0; 3308 3309 BUG_ON(pud_leaf(*pud)); 3310 3311 if (create) { 3312 pmd = pmd_alloc_track(mm, pud, addr, mask); 3313 if (!pmd) 3314 return -ENOMEM; 3315 } else { 3316 pmd = pmd_offset(pud, addr); 3317 } 3318 do { 3319 next = pmd_addr_end(addr, end); 3320 if (pmd_none(*pmd) && !create) 3321 continue; 3322 if (WARN_ON_ONCE(pmd_leaf(*pmd))) 3323 return -EINVAL; 3324 if (!pmd_none(*pmd) && WARN_ON_ONCE(pmd_bad(*pmd))) { 3325 if (!create) 3326 continue; 3327 pmd_clear_bad(pmd); 3328 } 3329 err = apply_to_pte_range(mm, pmd, addr, next, 3330 fn, data, create, mask); 3331 if (err) 3332 break; 3333 } while (pmd++, addr = next, addr != end); 3334 3335 return err; 3336 } 3337 3338 static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d, 3339 unsigned long addr, unsigned long end, 3340 pte_fn_t fn, void *data, bool create, 3341 pgtbl_mod_mask *mask) 3342 { 3343 pud_t *pud; 3344 unsigned long next; 3345 int err = 0; 3346 3347 if (create) { 3348 pud = pud_alloc_track(mm, p4d, addr, mask); 3349 if (!pud) 3350 return -ENOMEM; 3351 } else { 3352 pud = pud_offset(p4d, addr); 3353 } 3354 do { 3355 next = pud_addr_end(addr, end); 3356 if (pud_none(*pud) && !create) 3357 continue; 3358 if (WARN_ON_ONCE(pud_leaf(*pud))) 3359 return -EINVAL; 3360 if (!pud_none(*pud) && WARN_ON_ONCE(pud_bad(*pud))) { 3361 if (!create) 3362 continue; 3363 pud_clear_bad(pud); 3364 } 3365 err = apply_to_pmd_range(mm, pud, addr, next, 3366 fn, data, create, mask); 3367 if (err) 3368 break; 3369 } while (pud++, addr = next, addr != end); 3370 3371 return err; 3372 } 3373 3374 static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd, 3375 unsigned long addr, unsigned long end, 3376 pte_fn_t fn, void *data, bool create, 3377 pgtbl_mod_mask *mask) 3378 { 3379 p4d_t *p4d; 3380 unsigned long next; 3381 int err = 0; 3382 3383 if (create) { 3384 p4d = p4d_alloc_track(mm, pgd, addr, mask); 3385 if (!p4d) 3386 return -ENOMEM; 3387 } else { 3388 p4d = p4d_offset(pgd, addr); 3389 } 3390 do { 3391 next = p4d_addr_end(addr, end); 3392 if (p4d_none(*p4d) && !create) 3393 continue; 3394 if (WARN_ON_ONCE(p4d_leaf(*p4d))) 3395 return -EINVAL; 3396 if (!p4d_none(*p4d) && WARN_ON_ONCE(p4d_bad(*p4d))) { 3397 if (!create) 3398 continue; 3399 p4d_clear_bad(p4d); 3400 } 3401 err = apply_to_pud_range(mm, p4d, addr, next, 3402 fn, data, create, mask); 3403 if (err) 3404 break; 3405 } while (p4d++, addr = next, addr != end); 3406 3407 return err; 3408 } 3409 3410 static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr, 3411 unsigned long size, pte_fn_t fn, 3412 void *data, bool create) 3413 { 3414 pgd_t *pgd; 3415 unsigned long start = addr, next; 3416 unsigned long end = addr + size; 3417 pgtbl_mod_mask mask = 0; 3418 int err = 0; 3419 3420 if (WARN_ON(addr >= end)) 3421 return -EINVAL; 3422 3423 pgd = pgd_offset(mm, addr); 3424 do { 3425 next = pgd_addr_end(addr, end); 3426 if (pgd_none(*pgd) && !create) 3427 continue; 3428 if (WARN_ON_ONCE(pgd_leaf(*pgd))) { 3429 err = -EINVAL; 3430 break; 3431 } 3432 if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) { 3433 if (!create) 3434 continue; 3435 pgd_clear_bad(pgd); 3436 } 3437 err = apply_to_p4d_range(mm, pgd, addr, next, 3438 fn, data, create, &mask); 3439 if (err) 3440 break; 3441 } while (pgd++, addr = next, addr != end); 3442 3443 if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 3444 arch_sync_kernel_mappings(start, start + size); 3445 3446 return err; 3447 } 3448 3449 /* 3450 * Scan a region of virtual memory, filling in page tables as necessary 3451 * and calling a provided function on each leaf page table. 3452 */ 3453 int apply_to_page_range(struct mm_struct *mm, unsigned long addr, 3454 unsigned long size, pte_fn_t fn, void *data) 3455 { 3456 return __apply_to_page_range(mm, addr, size, fn, data, true); 3457 } 3458 EXPORT_SYMBOL_GPL(apply_to_page_range); 3459 3460 /* 3461 * Scan a region of virtual memory, calling a provided function on 3462 * each leaf page table where it exists. 3463 * 3464 * Unlike apply_to_page_range, this does _not_ fill in page tables 3465 * where they are absent. 3466 */ 3467 int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr, 3468 unsigned long size, pte_fn_t fn, void *data) 3469 { 3470 return __apply_to_page_range(mm, addr, size, fn, data, false); 3471 } 3472 3473 /* 3474 * handle_pte_fault chooses page fault handler according to an entry which was 3475 * read non-atomically. Before making any commitment, on those architectures 3476 * or configurations (e.g. i386 with PAE) which might give a mix of unmatched 3477 * parts, do_swap_page must check under lock before unmapping the pte and 3478 * proceeding (but do_wp_page is only called after already making such a check; 3479 * and do_anonymous_page can safely check later on). 3480 */ 3481 static inline int pte_unmap_same(struct vm_fault *vmf) 3482 { 3483 int same = 1; 3484 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION) 3485 if (sizeof(pte_t) > sizeof(unsigned long)) { 3486 spin_lock(vmf->ptl); 3487 same = pte_same(ptep_get(vmf->pte), vmf->orig_pte); 3488 spin_unlock(vmf->ptl); 3489 } 3490 #endif 3491 pte_unmap(vmf->pte); 3492 vmf->pte = NULL; 3493 return same; 3494 } 3495 3496 /* 3497 * Return: 3498 * 0: copied succeeded 3499 * -EHWPOISON: copy failed due to hwpoison in source page 3500 * -EAGAIN: copied failed (some other reason) 3501 */ 3502 static inline int __wp_page_copy_user(struct page *dst, struct page *src, 3503 struct vm_fault *vmf) 3504 { 3505 int ret; 3506 void *kaddr; 3507 void __user *uaddr; 3508 struct vm_area_struct *vma = vmf->vma; 3509 struct mm_struct *mm = vma->vm_mm; 3510 unsigned long addr = vmf->address; 3511 3512 if (likely(src)) { 3513 if (copy_mc_user_highpage(dst, src, addr, vma)) 3514 return -EHWPOISON; 3515 return 0; 3516 } 3517 3518 /* 3519 * If the source page was a PFN mapping, we don't have 3520 * a "struct page" for it. We do a best-effort copy by 3521 * just copying from the original user address. If that 3522 * fails, we just zero-fill it. Live with it. 3523 */ 3524 kaddr = kmap_local_page(dst); 3525 pagefault_disable(); 3526 uaddr = (void __user *)(addr & PAGE_MASK); 3527 3528 /* 3529 * On architectures with software "accessed" bits, we would 3530 * take a double page fault, so mark it accessed here. 3531 */ 3532 vmf->pte = NULL; 3533 if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) { 3534 pte_t entry; 3535 3536 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); 3537 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { 3538 /* 3539 * Other thread has already handled the fault 3540 * and update local tlb only 3541 */ 3542 if (vmf->pte) 3543 update_mmu_tlb(vma, addr, vmf->pte); 3544 ret = -EAGAIN; 3545 goto pte_unlock; 3546 } 3547 3548 entry = pte_mkyoung(vmf->orig_pte); 3549 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0)) 3550 update_mmu_cache_range(vmf, vma, addr, vmf->pte, 1); 3551 } 3552 3553 /* 3554 * This really shouldn't fail, because the page is there 3555 * in the page tables. But it might just be unreadable, 3556 * in which case we just give up and fill the result with 3557 * zeroes. 3558 */ 3559 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) { 3560 if (vmf->pte) 3561 goto warn; 3562 3563 /* Re-validate under PTL if the page is still mapped */ 3564 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); 3565 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { 3566 /* The PTE changed under us, update local tlb */ 3567 if (vmf->pte) 3568 update_mmu_tlb(vma, addr, vmf->pte); 3569 ret = -EAGAIN; 3570 goto pte_unlock; 3571 } 3572 3573 /* 3574 * The same page can be mapped back since last copy attempt. 3575 * Try to copy again under PTL. 3576 */ 3577 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) { 3578 /* 3579 * Give a warn in case there can be some obscure 3580 * use-case 3581 */ 3582 warn: 3583 WARN_ON_ONCE(1); 3584 clear_page(kaddr); 3585 } 3586 } 3587 3588 ret = 0; 3589 3590 pte_unlock: 3591 if (vmf->pte) 3592 pte_unmap_unlock(vmf->pte, vmf->ptl); 3593 pagefault_enable(); 3594 kunmap_local(kaddr); 3595 flush_dcache_page(dst); 3596 3597 return ret; 3598 } 3599 3600 static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma) 3601 { 3602 struct file *vm_file = vma->vm_file; 3603 3604 if (vm_file) 3605 return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO; 3606 3607 /* 3608 * Special mappings (e.g. VDSO) do not have any file so fake 3609 * a default GFP_KERNEL for them. 3610 */ 3611 return GFP_KERNEL; 3612 } 3613 3614 /* 3615 * Notify the address space that the page is about to become writable so that 3616 * it can prohibit this or wait for the page to get into an appropriate state. 3617 * 3618 * We do this without the lock held, so that it can sleep if it needs to. 3619 */ 3620 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf, struct folio *folio) 3621 { 3622 vm_fault_t ret; 3623 unsigned int old_flags = vmf->flags; 3624 3625 vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; 3626 3627 if (vmf->vma->vm_file && 3628 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host)) 3629 return VM_FAULT_SIGBUS; 3630 3631 ret = vmf->vma->vm_ops->page_mkwrite(vmf); 3632 /* Restore original flags so that caller is not surprised */ 3633 vmf->flags = old_flags; 3634 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) 3635 return ret; 3636 if (unlikely(!(ret & VM_FAULT_LOCKED))) { 3637 folio_lock(folio); 3638 if (!folio->mapping) { 3639 folio_unlock(folio); 3640 return 0; /* retry */ 3641 } 3642 ret |= VM_FAULT_LOCKED; 3643 } else 3644 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 3645 return ret; 3646 } 3647 3648 /* 3649 * Handle dirtying of a page in shared file mapping on a write fault. 3650 * 3651 * The function expects the page to be locked and unlocks it. 3652 */ 3653 static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf) 3654 { 3655 struct vm_area_struct *vma = vmf->vma; 3656 struct address_space *mapping; 3657 struct folio *folio = page_folio(vmf->page); 3658 bool dirtied; 3659 bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite; 3660 3661 dirtied = folio_mark_dirty(folio); 3662 VM_BUG_ON_FOLIO(folio_test_anon(folio), folio); 3663 /* 3664 * Take a local copy of the address_space - folio.mapping may be zeroed 3665 * by truncate after folio_unlock(). The address_space itself remains 3666 * pinned by vma->vm_file's reference. We rely on folio_unlock()'s 3667 * release semantics to prevent the compiler from undoing this copying. 3668 */ 3669 mapping = folio_raw_mapping(folio); 3670 folio_unlock(folio); 3671 3672 if (!page_mkwrite) 3673 file_update_time(vma->vm_file); 3674 3675 /* 3676 * Throttle page dirtying rate down to writeback speed. 3677 * 3678 * mapping may be NULL here because some device drivers do not 3679 * set page.mapping but still dirty their pages 3680 * 3681 * Drop the mmap_lock before waiting on IO, if we can. The file 3682 * is pinning the mapping, as per above. 3683 */ 3684 if ((dirtied || page_mkwrite) && mapping) { 3685 struct file *fpin; 3686 3687 fpin = maybe_unlock_mmap_for_io(vmf, NULL); 3688 balance_dirty_pages_ratelimited(mapping); 3689 if (fpin) { 3690 fput(fpin); 3691 return VM_FAULT_COMPLETED; 3692 } 3693 } 3694 3695 return 0; 3696 } 3697 3698 /* 3699 * Handle write page faults for pages that can be reused in the current vma 3700 * 3701 * This can happen either due to the mapping being with the VM_SHARED flag, 3702 * or due to us being the last reference standing to the page. In either 3703 * case, all we need to do here is to mark the page as writable and update 3704 * any related book-keeping. 3705 */ 3706 static inline void wp_page_reuse(struct vm_fault *vmf, struct folio *folio) 3707 __releases(vmf->ptl) 3708 { 3709 struct vm_area_struct *vma = vmf->vma; 3710 pte_t entry; 3711 3712 VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE)); 3713 VM_WARN_ON(is_zero_pfn(pte_pfn(vmf->orig_pte))); 3714 3715 if (folio) { 3716 VM_BUG_ON(folio_test_anon(folio) && 3717 !PageAnonExclusive(vmf->page)); 3718 /* 3719 * Clear the folio's cpupid information as the existing 3720 * information potentially belongs to a now completely 3721 * unrelated process. 3722 */ 3723 folio_xchg_last_cpupid(folio, (1 << LAST_CPUPID_SHIFT) - 1); 3724 } 3725 3726 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); 3727 entry = pte_mkyoung(vmf->orig_pte); 3728 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 3729 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) 3730 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); 3731 pte_unmap_unlock(vmf->pte, vmf->ptl); 3732 count_vm_event(PGREUSE); 3733 } 3734 3735 /* 3736 * We could add a bitflag somewhere, but for now, we know that all 3737 * vm_ops that have a ->map_pages have been audited and don't need 3738 * the mmap_lock to be held. 3739 */ 3740 static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf) 3741 { 3742 struct vm_area_struct *vma = vmf->vma; 3743 3744 if (vma->vm_ops->map_pages || !(vmf->flags & FAULT_FLAG_VMA_LOCK)) 3745 return 0; 3746 vma_end_read(vma); 3747 return VM_FAULT_RETRY; 3748 } 3749 3750 /** 3751 * __vmf_anon_prepare - Prepare to handle an anonymous fault. 3752 * @vmf: The vm_fault descriptor passed from the fault handler. 3753 * 3754 * When preparing to insert an anonymous page into a VMA from a 3755 * fault handler, call this function rather than anon_vma_prepare(). 3756 * If this vma does not already have an associated anon_vma and we are 3757 * only protected by the per-VMA lock, the caller must retry with the 3758 * mmap_lock held. __anon_vma_prepare() will look at adjacent VMAs to 3759 * determine if this VMA can share its anon_vma, and that's not safe to 3760 * do with only the per-VMA lock held for this VMA. 3761 * 3762 * Return: 0 if fault handling can proceed. Any other value should be 3763 * returned to the caller. 3764 */ 3765 vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf) 3766 { 3767 struct vm_area_struct *vma = vmf->vma; 3768 vm_fault_t ret = 0; 3769 3770 if (likely(vma->anon_vma)) 3771 return 0; 3772 if (vmf->flags & FAULT_FLAG_VMA_LOCK) { 3773 if (!mmap_read_trylock(vma->vm_mm)) 3774 return VM_FAULT_RETRY; 3775 } 3776 if (__anon_vma_prepare(vma)) 3777 ret = VM_FAULT_OOM; 3778 if (vmf->flags & FAULT_FLAG_VMA_LOCK) 3779 mmap_read_unlock(vma->vm_mm); 3780 return ret; 3781 } 3782 3783 /* 3784 * Handle the case of a page which we actually need to copy to a new page, 3785 * either due to COW or unsharing. 3786 * 3787 * Called with mmap_lock locked and the old page referenced, but 3788 * without the ptl held. 3789 * 3790 * High level logic flow: 3791 * 3792 * - Allocate a page, copy the content of the old page to the new one. 3793 * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc. 3794 * - Take the PTL. If the pte changed, bail out and release the allocated page 3795 * - If the pte is still the way we remember it, update the page table and all 3796 * relevant references. This includes dropping the reference the page-table 3797 * held to the old page, as well as updating the rmap. 3798 * - In any case, unlock the PTL and drop the reference we took to the old page. 3799 */ 3800 static vm_fault_t wp_page_copy(struct vm_fault *vmf) 3801 { 3802 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; 3803 struct vm_area_struct *vma = vmf->vma; 3804 struct mm_struct *mm = vma->vm_mm; 3805 struct folio *old_folio = NULL; 3806 struct folio *new_folio = NULL; 3807 pte_t entry; 3808 int page_copied = 0; 3809 struct mmu_notifier_range range; 3810 vm_fault_t ret; 3811 bool pfn_is_zero; 3812 3813 delayacct_wpcopy_start(); 3814 3815 if (vmf->page) 3816 old_folio = page_folio(vmf->page); 3817 ret = vmf_anon_prepare(vmf); 3818 if (unlikely(ret)) 3819 goto out; 3820 3821 pfn_is_zero = is_zero_pfn(pte_pfn(vmf->orig_pte)); 3822 new_folio = folio_prealloc(mm, vma, vmf->address, pfn_is_zero); 3823 if (!new_folio) 3824 goto oom; 3825 3826 if (!pfn_is_zero) { 3827 int err; 3828 3829 err = __wp_page_copy_user(&new_folio->page, vmf->page, vmf); 3830 if (err) { 3831 /* 3832 * COW failed, if the fault was solved by other, 3833 * it's fine. If not, userspace would re-fault on 3834 * the same address and we will handle the fault 3835 * from the second attempt. 3836 * The -EHWPOISON case will not be retried. 3837 */ 3838 folio_put(new_folio); 3839 if (old_folio) 3840 folio_put(old_folio); 3841 3842 delayacct_wpcopy_end(); 3843 return err == -EHWPOISON ? VM_FAULT_HWPOISON : 0; 3844 } 3845 kmsan_copy_page_meta(&new_folio->page, vmf->page); 3846 } 3847 3848 __folio_mark_uptodate(new_folio); 3849 3850 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, 3851 vmf->address & PAGE_MASK, 3852 (vmf->address & PAGE_MASK) + PAGE_SIZE); 3853 mmu_notifier_invalidate_range_start(&range); 3854 3855 /* 3856 * Re-check the pte - we dropped the lock 3857 */ 3858 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); 3859 if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { 3860 if (old_folio) { 3861 if (!folio_test_anon(old_folio)) { 3862 dec_mm_counter(mm, mm_counter_file(old_folio)); 3863 inc_mm_counter(mm, MM_ANONPAGES); 3864 } 3865 } else { 3866 ksm_might_unmap_zero_page(mm, vmf->orig_pte); 3867 inc_mm_counter(mm, MM_ANONPAGES); 3868 } 3869 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); 3870 entry = folio_mk_pte(new_folio, vma->vm_page_prot); 3871 entry = pte_sw_mkyoung(entry); 3872 if (unlikely(unshare)) { 3873 if (pte_soft_dirty(vmf->orig_pte)) 3874 entry = pte_mksoft_dirty(entry); 3875 if (pte_uffd_wp(vmf->orig_pte)) 3876 entry = pte_mkuffd_wp(entry); 3877 } else { 3878 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 3879 } 3880 3881 /* 3882 * Clear the pte entry and flush it first, before updating the 3883 * pte with the new entry, to keep TLBs on different CPUs in 3884 * sync. This code used to set the new PTE then flush TLBs, but 3885 * that left a window where the new PTE could be loaded into 3886 * some TLBs while the old PTE remains in others. 3887 */ 3888 ptep_clear_flush(vma, vmf->address, vmf->pte); 3889 folio_add_new_anon_rmap(new_folio, vma, vmf->address, RMAP_EXCLUSIVE); 3890 folio_add_lru_vma(new_folio, vma); 3891 BUG_ON(unshare && pte_write(entry)); 3892 set_pte_at(mm, vmf->address, vmf->pte, entry); 3893 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); 3894 if (old_folio) { 3895 /* 3896 * Only after switching the pte to the new page may 3897 * we remove the mapcount here. Otherwise another 3898 * process may come and find the rmap count decremented 3899 * before the pte is switched to the new page, and 3900 * "reuse" the old page writing into it while our pte 3901 * here still points into it and can be read by other 3902 * threads. 3903 * 3904 * The critical issue is to order this 3905 * folio_remove_rmap_pte() with the ptp_clear_flush 3906 * above. Those stores are ordered by (if nothing else,) 3907 * the barrier present in the atomic_add_negative 3908 * in folio_remove_rmap_pte(); 3909 * 3910 * Then the TLB flush in ptep_clear_flush ensures that 3911 * no process can access the old page before the 3912 * decremented mapcount is visible. And the old page 3913 * cannot be reused until after the decremented 3914 * mapcount is visible. So transitively, TLBs to 3915 * old page will be flushed before it can be reused. 3916 */ 3917 folio_remove_rmap_pte(old_folio, vmf->page, vma); 3918 } 3919 3920 /* Free the old page.. */ 3921 new_folio = old_folio; 3922 page_copied = 1; 3923 pte_unmap_unlock(vmf->pte, vmf->ptl); 3924 } else if (vmf->pte) { 3925 update_mmu_tlb(vma, vmf->address, vmf->pte); 3926 pte_unmap_unlock(vmf->pte, vmf->ptl); 3927 } 3928 3929 mmu_notifier_invalidate_range_end(&range); 3930 3931 if (new_folio) 3932 folio_put(new_folio); 3933 if (old_folio) { 3934 if (page_copied) 3935 free_swap_cache(old_folio); 3936 folio_put(old_folio); 3937 } 3938 3939 delayacct_wpcopy_end(); 3940 return 0; 3941 oom: 3942 ret = VM_FAULT_OOM; 3943 out: 3944 if (old_folio) 3945 folio_put(old_folio); 3946 3947 delayacct_wpcopy_end(); 3948 return ret; 3949 } 3950 3951 /** 3952 * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE 3953 * writeable once the page is prepared 3954 * 3955 * @vmf: structure describing the fault 3956 * @folio: the folio of vmf->page 3957 * 3958 * This function handles all that is needed to finish a write page fault in a 3959 * shared mapping due to PTE being read-only once the mapped page is prepared. 3960 * It handles locking of PTE and modifying it. 3961 * 3962 * The function expects the page to be locked or other protection against 3963 * concurrent faults / writeback (such as DAX radix tree locks). 3964 * 3965 * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before 3966 * we acquired PTE lock. 3967 */ 3968 static vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf, struct folio *folio) 3969 { 3970 WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); 3971 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, 3972 &vmf->ptl); 3973 if (!vmf->pte) 3974 return VM_FAULT_NOPAGE; 3975 /* 3976 * We might have raced with another page fault while we released the 3977 * pte_offset_map_lock. 3978 */ 3979 if (!pte_same(ptep_get(vmf->pte), vmf->orig_pte)) { 3980 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); 3981 pte_unmap_unlock(vmf->pte, vmf->ptl); 3982 return VM_FAULT_NOPAGE; 3983 } 3984 wp_page_reuse(vmf, folio); 3985 return 0; 3986 } 3987 3988 /* 3989 * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED 3990 * mapping 3991 */ 3992 static vm_fault_t wp_pfn_shared(struct vm_fault *vmf) 3993 { 3994 struct vm_area_struct *vma = vmf->vma; 3995 3996 if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { 3997 vm_fault_t ret; 3998 3999 pte_unmap_unlock(vmf->pte, vmf->ptl); 4000 ret = vmf_can_call_fault(vmf); 4001 if (ret) 4002 return ret; 4003 4004 vmf->flags |= FAULT_FLAG_MKWRITE; 4005 ret = vma->vm_ops->pfn_mkwrite(vmf); 4006 if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)) 4007 return ret; 4008 return finish_mkwrite_fault(vmf, NULL); 4009 } 4010 wp_page_reuse(vmf, NULL); 4011 return 0; 4012 } 4013 4014 static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio) 4015 __releases(vmf->ptl) 4016 { 4017 struct vm_area_struct *vma = vmf->vma; 4018 vm_fault_t ret = 0; 4019 4020 folio_get(folio); 4021 4022 if (vma->vm_ops && vma->vm_ops->page_mkwrite) { 4023 vm_fault_t tmp; 4024 4025 pte_unmap_unlock(vmf->pte, vmf->ptl); 4026 tmp = vmf_can_call_fault(vmf); 4027 if (tmp) { 4028 folio_put(folio); 4029 return tmp; 4030 } 4031 4032 tmp = do_page_mkwrite(vmf, folio); 4033 if (unlikely(!tmp || (tmp & 4034 (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { 4035 folio_put(folio); 4036 return tmp; 4037 } 4038 tmp = finish_mkwrite_fault(vmf, folio); 4039 if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { 4040 folio_unlock(folio); 4041 folio_put(folio); 4042 return tmp; 4043 } 4044 } else { 4045 wp_page_reuse(vmf, folio); 4046 folio_lock(folio); 4047 } 4048 ret |= fault_dirty_shared_page(vmf); 4049 folio_put(folio); 4050 4051 return ret; 4052 } 4053 4054 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4055 static bool __wp_can_reuse_large_anon_folio(struct folio *folio, 4056 struct vm_area_struct *vma) 4057 { 4058 bool exclusive = false; 4059 4060 /* Let's just free up a large folio if only a single page is mapped. */ 4061 if (folio_large_mapcount(folio) <= 1) 4062 return false; 4063 4064 /* 4065 * The assumption for anonymous folios is that each page can only get 4066 * mapped once into each MM. The only exception are KSM folios, which 4067 * are always small. 4068 * 4069 * Each taken mapcount must be paired with exactly one taken reference, 4070 * whereby the refcount must be incremented before the mapcount when 4071 * mapping a page, and the refcount must be decremented after the 4072 * mapcount when unmapping a page. 4073 * 4074 * If all folio references are from mappings, and all mappings are in 4075 * the page tables of this MM, then this folio is exclusive to this MM. 4076 */ 4077 if (test_bit(FOLIO_MM_IDS_SHARED_BITNUM, &folio->_mm_ids)) 4078 return false; 4079 4080 VM_WARN_ON_ONCE(folio_test_ksm(folio)); 4081 4082 if (unlikely(folio_test_swapcache(folio))) { 4083 /* 4084 * Note: freeing up the swapcache will fail if some PTEs are 4085 * still swap entries. 4086 */ 4087 if (!folio_trylock(folio)) 4088 return false; 4089 folio_free_swap(folio); 4090 folio_unlock(folio); 4091 } 4092 4093 if (folio_large_mapcount(folio) != folio_ref_count(folio)) 4094 return false; 4095 4096 /* Stabilize the mapcount vs. refcount and recheck. */ 4097 folio_lock_large_mapcount(folio); 4098 VM_WARN_ON_ONCE_FOLIO(folio_large_mapcount(folio) > folio_ref_count(folio), folio); 4099 4100 if (test_bit(FOLIO_MM_IDS_SHARED_BITNUM, &folio->_mm_ids)) 4101 goto unlock; 4102 if (folio_large_mapcount(folio) != folio_ref_count(folio)) 4103 goto unlock; 4104 4105 VM_WARN_ON_ONCE_FOLIO(folio_large_mapcount(folio) > folio_nr_pages(folio), folio); 4106 VM_WARN_ON_ONCE_FOLIO(folio_entire_mapcount(folio), folio); 4107 VM_WARN_ON_ONCE(folio_mm_id(folio, 0) != vma->vm_mm->mm_id && 4108 folio_mm_id(folio, 1) != vma->vm_mm->mm_id); 4109 4110 /* 4111 * Do we need the folio lock? Likely not. If there would have been 4112 * references from page migration/swapout, we would have detected 4113 * an additional folio reference and never ended up here. 4114 */ 4115 exclusive = true; 4116 unlock: 4117 folio_unlock_large_mapcount(folio); 4118 return exclusive; 4119 } 4120 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ 4121 static bool __wp_can_reuse_large_anon_folio(struct folio *folio, 4122 struct vm_area_struct *vma) 4123 { 4124 BUILD_BUG(); 4125 } 4126 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 4127 4128 static bool wp_can_reuse_anon_folio(struct folio *folio, 4129 struct vm_area_struct *vma) 4130 { 4131 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && folio_test_large(folio)) 4132 return __wp_can_reuse_large_anon_folio(folio, vma); 4133 4134 /* 4135 * We have to verify under folio lock: these early checks are 4136 * just an optimization to avoid locking the folio and freeing 4137 * the swapcache if there is little hope that we can reuse. 4138 * 4139 * KSM doesn't necessarily raise the folio refcount. 4140 */ 4141 if (folio_test_ksm(folio) || folio_ref_count(folio) > 3) 4142 return false; 4143 if (!folio_test_lru(folio)) 4144 /* 4145 * We cannot easily detect+handle references from 4146 * remote LRU caches or references to LRU folios. 4147 */ 4148 lru_add_drain(); 4149 if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio)) 4150 return false; 4151 if (!folio_trylock(folio)) 4152 return false; 4153 if (folio_test_swapcache(folio)) 4154 folio_free_swap(folio); 4155 if (folio_test_ksm(folio) || folio_ref_count(folio) != 1) { 4156 folio_unlock(folio); 4157 return false; 4158 } 4159 /* 4160 * Ok, we've got the only folio reference from our mapping 4161 * and the folio is locked, it's dark out, and we're wearing 4162 * sunglasses. Hit it. 4163 */ 4164 folio_move_anon_rmap(folio, vma); 4165 folio_unlock(folio); 4166 return true; 4167 } 4168 4169 /* 4170 * This routine handles present pages, when 4171 * * users try to write to a shared page (FAULT_FLAG_WRITE) 4172 * * GUP wants to take a R/O pin on a possibly shared anonymous page 4173 * (FAULT_FLAG_UNSHARE) 4174 * 4175 * It is done by copying the page to a new address and decrementing the 4176 * shared-page counter for the old page. 4177 * 4178 * Note that this routine assumes that the protection checks have been 4179 * done by the caller (the low-level page fault routine in most cases). 4180 * Thus, with FAULT_FLAG_WRITE, we can safely just mark it writable once we've 4181 * done any necessary COW. 4182 * 4183 * In case of FAULT_FLAG_WRITE, we also mark the page dirty at this point even 4184 * though the page will change only once the write actually happens. This 4185 * avoids a few races, and potentially makes it more efficient. 4186 * 4187 * We enter with non-exclusive mmap_lock (to exclude vma changes, 4188 * but allow concurrent faults), with pte both mapped and locked. 4189 * We return with mmap_lock still held, but pte unmapped and unlocked. 4190 */ 4191 static vm_fault_t do_wp_page(struct vm_fault *vmf) 4192 __releases(vmf->ptl) 4193 { 4194 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; 4195 struct vm_area_struct *vma = vmf->vma; 4196 struct folio *folio = NULL; 4197 pte_t pte; 4198 4199 if (likely(!unshare)) { 4200 if (userfaultfd_pte_wp(vma, ptep_get(vmf->pte))) { 4201 if (!userfaultfd_wp_async(vma)) { 4202 pte_unmap_unlock(vmf->pte, vmf->ptl); 4203 return handle_userfault(vmf, VM_UFFD_WP); 4204 } 4205 4206 /* 4207 * Nothing needed (cache flush, TLB invalidations, 4208 * etc.) because we're only removing the uffd-wp bit, 4209 * which is completely invisible to the user. 4210 */ 4211 pte = pte_clear_uffd_wp(ptep_get(vmf->pte)); 4212 4213 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); 4214 /* 4215 * Update this to be prepared for following up CoW 4216 * handling 4217 */ 4218 vmf->orig_pte = pte; 4219 } 4220 4221 /* 4222 * Userfaultfd write-protect can defer flushes. Ensure the TLB 4223 * is flushed in this case before copying. 4224 */ 4225 if (unlikely(userfaultfd_wp(vmf->vma) && 4226 mm_tlb_flush_pending(vmf->vma->vm_mm))) 4227 flush_tlb_page(vmf->vma, vmf->address); 4228 } 4229 4230 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); 4231 4232 if (vmf->page) 4233 folio = page_folio(vmf->page); 4234 4235 /* 4236 * Shared mapping: we are guaranteed to have VM_WRITE and 4237 * FAULT_FLAG_WRITE set at this point. 4238 */ 4239 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { 4240 /* 4241 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a 4242 * VM_PFNMAP VMA. FS DAX also wants ops->pfn_mkwrite called. 4243 * 4244 * We should not cow pages in a shared writeable mapping. 4245 * Just mark the pages writable and/or call ops->pfn_mkwrite. 4246 */ 4247 if (!vmf->page || is_fsdax_page(vmf->page)) { 4248 vmf->page = NULL; 4249 return wp_pfn_shared(vmf); 4250 } 4251 return wp_page_shared(vmf, folio); 4252 } 4253 4254 /* 4255 * Private mapping: create an exclusive anonymous page copy if reuse 4256 * is impossible. We might miss VM_WRITE for FOLL_FORCE handling. 4257 * 4258 * If we encounter a page that is marked exclusive, we must reuse 4259 * the page without further checks. 4260 */ 4261 if (folio && folio_test_anon(folio) && 4262 (PageAnonExclusive(vmf->page) || wp_can_reuse_anon_folio(folio, vma))) { 4263 if (!PageAnonExclusive(vmf->page)) 4264 SetPageAnonExclusive(vmf->page); 4265 if (unlikely(unshare)) { 4266 pte_unmap_unlock(vmf->pte, vmf->ptl); 4267 return 0; 4268 } 4269 wp_page_reuse(vmf, folio); 4270 return 0; 4271 } 4272 /* 4273 * Ok, we need to copy. Oh, well.. 4274 */ 4275 if (folio) 4276 folio_get(folio); 4277 4278 pte_unmap_unlock(vmf->pte, vmf->ptl); 4279 #ifdef CONFIG_KSM 4280 if (folio && folio_test_ksm(folio)) 4281 count_vm_event(COW_KSM); 4282 #endif 4283 return wp_page_copy(vmf); 4284 } 4285 4286 static inline void unmap_mapping_range_tree(struct rb_root_cached *root, 4287 pgoff_t first_index, 4288 pgoff_t last_index, 4289 struct zap_details *details) 4290 { 4291 struct vm_area_struct *vma; 4292 unsigned long start, size; 4293 struct mmu_gather tlb; 4294 4295 vma_interval_tree_foreach(vma, root, first_index, last_index) { 4296 const pgoff_t start_idx = max(first_index, vma->vm_pgoff); 4297 const pgoff_t end_idx = min(last_index, vma_last_pgoff(vma)) + 1; 4298 4299 start = vma->vm_start + ((start_idx - vma->vm_pgoff) << PAGE_SHIFT); 4300 size = (end_idx - start_idx) << PAGE_SHIFT; 4301 4302 tlb_gather_mmu(&tlb, vma->vm_mm); 4303 zap_vma_range_batched(&tlb, vma, start, size, details); 4304 tlb_finish_mmu(&tlb); 4305 } 4306 } 4307 4308 /** 4309 * unmap_mapping_folio() - Unmap single folio from processes. 4310 * @folio: The locked folio to be unmapped. 4311 * 4312 * Unmap this folio from any userspace process which still has it mmaped. 4313 * Typically, for efficiency, the range of nearby pages has already been 4314 * unmapped by unmap_mapping_pages() or unmap_mapping_range(). But once 4315 * truncation or invalidation holds the lock on a folio, it may find that 4316 * the page has been remapped again: and then uses unmap_mapping_folio() 4317 * to unmap it finally. 4318 */ 4319 void unmap_mapping_folio(struct folio *folio) 4320 { 4321 struct address_space *mapping = folio->mapping; 4322 struct zap_details details = { }; 4323 pgoff_t first_index; 4324 pgoff_t last_index; 4325 4326 VM_BUG_ON(!folio_test_locked(folio)); 4327 4328 first_index = folio->index; 4329 last_index = folio_next_index(folio) - 1; 4330 4331 details.skip_cows = true; 4332 details.single_folio = folio; 4333 details.zap_flags = ZAP_FLAG_DROP_MARKER; 4334 4335 i_mmap_lock_read(mapping); 4336 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))) 4337 unmap_mapping_range_tree(&mapping->i_mmap, first_index, 4338 last_index, &details); 4339 i_mmap_unlock_read(mapping); 4340 } 4341 4342 /** 4343 * unmap_mapping_pages() - Unmap pages from processes. 4344 * @mapping: The address space containing pages to be unmapped. 4345 * @start: Index of first page to be unmapped. 4346 * @nr: Number of pages to be unmapped. 0 to unmap to end of file. 4347 * @even_cows: Whether to unmap even private COWed pages. 4348 * 4349 * Unmap the pages in this address space from any userspace process which 4350 * has them mmaped. Generally, you want to remove COWed pages as well when 4351 * a file is being truncated, but not when invalidating pages from the page 4352 * cache. 4353 */ 4354 void unmap_mapping_pages(struct address_space *mapping, pgoff_t start, 4355 pgoff_t nr, bool even_cows) 4356 { 4357 struct zap_details details = { }; 4358 pgoff_t first_index = start; 4359 pgoff_t last_index = start + nr - 1; 4360 4361 details.skip_cows = !even_cows; 4362 if (last_index < first_index) 4363 last_index = ULONG_MAX; 4364 4365 i_mmap_lock_read(mapping); 4366 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))) 4367 unmap_mapping_range_tree(&mapping->i_mmap, first_index, 4368 last_index, &details); 4369 i_mmap_unlock_read(mapping); 4370 } 4371 EXPORT_SYMBOL_GPL(unmap_mapping_pages); 4372 4373 /** 4374 * unmap_mapping_range - unmap the portion of all mmaps in the specified 4375 * address_space corresponding to the specified byte range in the underlying 4376 * file. 4377 * 4378 * @mapping: the address space containing mmaps to be unmapped. 4379 * @holebegin: byte in first page to unmap, relative to the start of 4380 * the underlying file. This will be rounded down to a PAGE_SIZE 4381 * boundary. Note that this is different from truncate_pagecache(), which 4382 * must keep the partial page. In contrast, we must get rid of 4383 * partial pages. 4384 * @holelen: size of prospective hole in bytes. This will be rounded 4385 * up to a PAGE_SIZE boundary. A holelen of zero truncates to the 4386 * end of the file. 4387 * @even_cows: 1 when truncating a file, unmap even private COWed pages; 4388 * but 0 when invalidating pagecache, don't throw away private data. 4389 */ 4390 void unmap_mapping_range(struct address_space *mapping, 4391 loff_t const holebegin, loff_t const holelen, int even_cows) 4392 { 4393 pgoff_t hba = (pgoff_t)(holebegin) >> PAGE_SHIFT; 4394 pgoff_t hlen = ((pgoff_t)(holelen) + PAGE_SIZE - 1) >> PAGE_SHIFT; 4395 4396 /* Check for overflow. */ 4397 if (sizeof(holelen) > sizeof(hlen)) { 4398 long long holeend = 4399 (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; 4400 if (holeend & ~(long long)ULONG_MAX) 4401 hlen = ULONG_MAX - hba + 1; 4402 } 4403 4404 unmap_mapping_pages(mapping, hba, hlen, even_cows); 4405 } 4406 EXPORT_SYMBOL(unmap_mapping_range); 4407 4408 /* 4409 * Restore a potential device exclusive pte to a working pte entry 4410 */ 4411 static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf) 4412 { 4413 struct folio *folio = page_folio(vmf->page); 4414 struct vm_area_struct *vma = vmf->vma; 4415 struct mmu_notifier_range range; 4416 vm_fault_t ret; 4417 4418 /* 4419 * We need a reference to lock the folio because we don't hold 4420 * the PTL so a racing thread can remove the device-exclusive 4421 * entry and unmap it. If the folio is free the entry must 4422 * have been removed already. If it happens to have already 4423 * been re-allocated after being freed all we do is lock and 4424 * unlock it. 4425 */ 4426 if (!folio_try_get(folio)) 4427 return 0; 4428 4429 ret = folio_lock_or_retry(folio, vmf); 4430 if (ret) { 4431 folio_put(folio); 4432 return ret; 4433 } 4434 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_CLEAR, 0, 4435 vma->vm_mm, vmf->address & PAGE_MASK, 4436 (vmf->address & PAGE_MASK) + PAGE_SIZE, NULL); 4437 mmu_notifier_invalidate_range_start(&range); 4438 4439 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, 4440 &vmf->ptl); 4441 if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) 4442 restore_exclusive_pte(vma, folio, vmf->page, vmf->address, 4443 vmf->pte, vmf->orig_pte); 4444 4445 if (vmf->pte) 4446 pte_unmap_unlock(vmf->pte, vmf->ptl); 4447 folio_unlock(folio); 4448 folio_put(folio); 4449 4450 mmu_notifier_invalidate_range_end(&range); 4451 return 0; 4452 } 4453 4454 /* 4455 * Check if we should call folio_free_swap to free the swap cache. 4456 * folio_free_swap only frees the swap cache to release the slot if swap 4457 * count is zero, so we don't need to check the swap count here. 4458 */ 4459 static inline bool should_try_to_free_swap(struct swap_info_struct *si, 4460 struct folio *folio, 4461 struct vm_area_struct *vma, 4462 unsigned int extra_refs, 4463 unsigned int fault_flags) 4464 { 4465 if (!folio_test_swapcache(folio)) 4466 return false; 4467 /* 4468 * Always try to free swap cache for SWP_SYNCHRONOUS_IO devices. Swap 4469 * cache can help save some IO or memory overhead, but these devices 4470 * are fast, and meanwhile, swap cache pinning the slot deferring the 4471 * release of metadata or fragmentation is a more critical issue. 4472 */ 4473 if (data_race(si->flags & SWP_SYNCHRONOUS_IO)) 4474 return true; 4475 if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) || 4476 folio_test_mlocked(folio)) 4477 return true; 4478 /* 4479 * If we want to map a page that's in the swapcache writable, we 4480 * have to detect via the refcount if we're really the exclusive 4481 * user. Try freeing the swapcache to get rid of the swapcache 4482 * reference only in case it's likely that we'll be the exclusive user. 4483 */ 4484 return (fault_flags & FAULT_FLAG_WRITE) && !folio_test_ksm(folio) && 4485 folio_ref_count(folio) == (extra_refs + folio_nr_pages(folio)); 4486 } 4487 4488 static vm_fault_t pte_marker_clear(struct vm_fault *vmf) 4489 { 4490 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, 4491 vmf->address, &vmf->ptl); 4492 if (!vmf->pte) 4493 return 0; 4494 /* 4495 * Be careful so that we will only recover a special uffd-wp pte into a 4496 * none pte. Otherwise it means the pte could have changed, so retry. 4497 * 4498 * This should also cover the case where e.g. the pte changed 4499 * quickly from a PTE_MARKER_UFFD_WP into PTE_MARKER_POISONED. 4500 * So pte_is_marker() check is not enough to safely drop the pte. 4501 */ 4502 if (pte_same(vmf->orig_pte, ptep_get(vmf->pte))) 4503 pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte); 4504 pte_unmap_unlock(vmf->pte, vmf->ptl); 4505 return 0; 4506 } 4507 4508 static vm_fault_t do_pte_missing(struct vm_fault *vmf) 4509 { 4510 if (vma_is_anonymous(vmf->vma)) 4511 return do_anonymous_page(vmf); 4512 else 4513 return do_fault(vmf); 4514 } 4515 4516 /* 4517 * This is actually a page-missing access, but with uffd-wp special pte 4518 * installed. It means this pte was wr-protected before being unmapped. 4519 */ 4520 static vm_fault_t pte_marker_handle_uffd_wp(struct vm_fault *vmf) 4521 { 4522 /* 4523 * Just in case there're leftover special ptes even after the region 4524 * got unregistered - we can simply clear them. 4525 */ 4526 if (unlikely(!userfaultfd_wp(vmf->vma))) 4527 return pte_marker_clear(vmf); 4528 4529 return do_pte_missing(vmf); 4530 } 4531 4532 static vm_fault_t handle_pte_marker(struct vm_fault *vmf) 4533 { 4534 const softleaf_t entry = softleaf_from_pte(vmf->orig_pte); 4535 const pte_marker marker = softleaf_to_marker(entry); 4536 4537 /* 4538 * PTE markers should never be empty. If anything weird happened, 4539 * the best thing to do is to kill the process along with its mm. 4540 */ 4541 if (WARN_ON_ONCE(!marker)) 4542 return VM_FAULT_SIGBUS; 4543 4544 /* Higher priority than uffd-wp when data corrupted */ 4545 if (marker & PTE_MARKER_POISONED) 4546 return VM_FAULT_HWPOISON; 4547 4548 /* Hitting a guard page is always a fatal condition. */ 4549 if (marker & PTE_MARKER_GUARD) 4550 return VM_FAULT_SIGSEGV; 4551 4552 if (softleaf_is_uffd_wp_marker(entry)) 4553 return pte_marker_handle_uffd_wp(vmf); 4554 4555 /* This is an unknown pte marker */ 4556 return VM_FAULT_SIGBUS; 4557 } 4558 4559 static struct folio *__alloc_swap_folio(struct vm_fault *vmf) 4560 { 4561 struct vm_area_struct *vma = vmf->vma; 4562 struct folio *folio; 4563 softleaf_t entry; 4564 4565 folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vmf->address); 4566 if (!folio) 4567 return NULL; 4568 4569 entry = softleaf_from_pte(vmf->orig_pte); 4570 if (mem_cgroup_swapin_charge_folio(folio, vma->vm_mm, 4571 GFP_KERNEL, entry)) { 4572 folio_put(folio); 4573 return NULL; 4574 } 4575 4576 return folio; 4577 } 4578 4579 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4580 /* 4581 * Check if the PTEs within a range are contiguous swap entries 4582 * and have consistent swapcache, zeromap. 4583 */ 4584 static bool can_swapin_thp(struct vm_fault *vmf, pte_t *ptep, int nr_pages) 4585 { 4586 unsigned long addr; 4587 softleaf_t entry; 4588 int idx; 4589 pte_t pte; 4590 4591 addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE); 4592 idx = (vmf->address - addr) / PAGE_SIZE; 4593 pte = ptep_get(ptep); 4594 4595 if (!pte_same(pte, pte_move_swp_offset(vmf->orig_pte, -idx))) 4596 return false; 4597 entry = softleaf_from_pte(pte); 4598 if (swap_pte_batch(ptep, nr_pages, pte) != nr_pages) 4599 return false; 4600 4601 /* 4602 * swap_read_folio() can't handle the case a large folio is hybridly 4603 * from different backends. And they are likely corner cases. Similar 4604 * things might be added once zswap support large folios. 4605 */ 4606 if (unlikely(swap_zeromap_batch(entry, nr_pages, NULL) != nr_pages)) 4607 return false; 4608 if (unlikely(non_swapcache_batch(entry, nr_pages) != nr_pages)) 4609 return false; 4610 4611 return true; 4612 } 4613 4614 static inline unsigned long thp_swap_suitable_orders(pgoff_t swp_offset, 4615 unsigned long addr, 4616 unsigned long orders) 4617 { 4618 int order, nr; 4619 4620 order = highest_order(orders); 4621 4622 /* 4623 * To swap in a THP with nr pages, we require that its first swap_offset 4624 * is aligned with that number, as it was when the THP was swapped out. 4625 * This helps filter out most invalid entries. 4626 */ 4627 while (orders) { 4628 nr = 1 << order; 4629 if ((addr >> PAGE_SHIFT) % nr == swp_offset % nr) 4630 break; 4631 order = next_order(&orders, order); 4632 } 4633 4634 return orders; 4635 } 4636 4637 static struct folio *alloc_swap_folio(struct vm_fault *vmf) 4638 { 4639 struct vm_area_struct *vma = vmf->vma; 4640 unsigned long orders; 4641 struct folio *folio; 4642 unsigned long addr; 4643 softleaf_t entry; 4644 spinlock_t *ptl; 4645 pte_t *pte; 4646 gfp_t gfp; 4647 int order; 4648 4649 /* 4650 * If uffd is active for the vma we need per-page fault fidelity to 4651 * maintain the uffd semantics. 4652 */ 4653 if (unlikely(userfaultfd_armed(vma))) 4654 goto fallback; 4655 4656 /* 4657 * A large swapped out folio could be partially or fully in zswap. We 4658 * lack handling for such cases, so fallback to swapping in order-0 4659 * folio. 4660 */ 4661 if (!zswap_never_enabled()) 4662 goto fallback; 4663 4664 entry = softleaf_from_pte(vmf->orig_pte); 4665 /* 4666 * Get a list of all the (large) orders below PMD_ORDER that are enabled 4667 * and suitable for swapping THP. 4668 */ 4669 orders = thp_vma_allowable_orders(vma, vma->vm_flags, TVA_PAGEFAULT, 4670 BIT(PMD_ORDER) - 1); 4671 orders = thp_vma_suitable_orders(vma, vmf->address, orders); 4672 orders = thp_swap_suitable_orders(swp_offset(entry), 4673 vmf->address, orders); 4674 4675 if (!orders) 4676 goto fallback; 4677 4678 pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, 4679 vmf->address & PMD_MASK, &ptl); 4680 if (unlikely(!pte)) 4681 goto fallback; 4682 4683 /* 4684 * For do_swap_page, find the highest order where the aligned range is 4685 * completely swap entries with contiguous swap offsets. 4686 */ 4687 order = highest_order(orders); 4688 while (orders) { 4689 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); 4690 if (can_swapin_thp(vmf, pte + pte_index(addr), 1 << order)) 4691 break; 4692 order = next_order(&orders, order); 4693 } 4694 4695 pte_unmap_unlock(pte, ptl); 4696 4697 /* Try allocating the highest of the remaining orders. */ 4698 gfp = vma_thp_gfp_mask(vma); 4699 while (orders) { 4700 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); 4701 folio = vma_alloc_folio(gfp, order, vma, addr); 4702 if (folio) { 4703 if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm, 4704 gfp, entry)) 4705 return folio; 4706 count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK_CHARGE); 4707 folio_put(folio); 4708 } 4709 count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK); 4710 order = next_order(&orders, order); 4711 } 4712 4713 fallback: 4714 return __alloc_swap_folio(vmf); 4715 } 4716 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ 4717 static struct folio *alloc_swap_folio(struct vm_fault *vmf) 4718 { 4719 return __alloc_swap_folio(vmf); 4720 } 4721 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 4722 4723 /* Sanity check that a folio is fully exclusive */ 4724 static void check_swap_exclusive(struct folio *folio, swp_entry_t entry, 4725 unsigned int nr_pages) 4726 { 4727 /* Called under PT locked and folio locked, the swap count is stable */ 4728 do { 4729 VM_WARN_ON_ONCE_FOLIO(__swap_count(entry) != 1, folio); 4730 entry.val++; 4731 } while (--nr_pages); 4732 } 4733 4734 /* 4735 * We enter with non-exclusive mmap_lock (to exclude vma changes, 4736 * but allow concurrent faults), and pte mapped but not yet locked. 4737 * We return with pte unmapped and unlocked. 4738 * 4739 * We return with the mmap_lock locked or unlocked in the same cases 4740 * as does filemap_fault(). 4741 */ 4742 vm_fault_t do_swap_page(struct vm_fault *vmf) 4743 { 4744 struct vm_area_struct *vma = vmf->vma; 4745 struct folio *swapcache = NULL, *folio; 4746 struct page *page; 4747 struct swap_info_struct *si = NULL; 4748 rmap_t rmap_flags = RMAP_NONE; 4749 bool exclusive = false; 4750 softleaf_t entry; 4751 pte_t pte; 4752 vm_fault_t ret = 0; 4753 int nr_pages; 4754 unsigned long page_idx; 4755 unsigned long address; 4756 pte_t *ptep; 4757 4758 if (!pte_unmap_same(vmf)) 4759 goto out; 4760 4761 entry = softleaf_from_pte(vmf->orig_pte); 4762 if (unlikely(!softleaf_is_swap(entry))) { 4763 if (softleaf_is_migration(entry)) { 4764 migration_entry_wait(vma->vm_mm, vmf->pmd, 4765 vmf->address); 4766 } else if (softleaf_is_device_exclusive(entry)) { 4767 vmf->page = softleaf_to_page(entry); 4768 ret = remove_device_exclusive_entry(vmf); 4769 } else if (softleaf_is_device_private(entry)) { 4770 if (vmf->flags & FAULT_FLAG_VMA_LOCK) { 4771 /* 4772 * migrate_to_ram is not yet ready to operate 4773 * under VMA lock. 4774 */ 4775 vma_end_read(vma); 4776 ret = VM_FAULT_RETRY; 4777 goto out; 4778 } 4779 4780 vmf->page = softleaf_to_page(entry); 4781 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 4782 vmf->address, &vmf->ptl); 4783 if (unlikely(!vmf->pte || 4784 !pte_same(ptep_get(vmf->pte), 4785 vmf->orig_pte))) 4786 goto unlock; 4787 4788 /* 4789 * Get a page reference while we know the page can't be 4790 * freed. 4791 */ 4792 if (trylock_page(vmf->page)) { 4793 struct dev_pagemap *pgmap; 4794 4795 get_page(vmf->page); 4796 pte_unmap_unlock(vmf->pte, vmf->ptl); 4797 pgmap = page_pgmap(vmf->page); 4798 ret = pgmap->ops->migrate_to_ram(vmf); 4799 unlock_page(vmf->page); 4800 put_page(vmf->page); 4801 } else { 4802 pte_unmap(vmf->pte); 4803 softleaf_entry_wait_on_locked(entry, vmf->ptl); 4804 } 4805 } else if (softleaf_is_hwpoison(entry)) { 4806 ret = VM_FAULT_HWPOISON; 4807 } else if (softleaf_is_marker(entry)) { 4808 ret = handle_pte_marker(vmf); 4809 } else { 4810 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL); 4811 ret = VM_FAULT_SIGBUS; 4812 } 4813 goto out; 4814 } 4815 4816 /* Prevent swapoff from happening to us. */ 4817 si = get_swap_device(entry); 4818 if (unlikely(!si)) 4819 goto out; 4820 4821 folio = swap_cache_get_folio(entry); 4822 if (folio) 4823 swap_update_readahead(folio, vma, vmf->address); 4824 if (!folio) { 4825 if (data_race(si->flags & SWP_SYNCHRONOUS_IO)) { 4826 folio = alloc_swap_folio(vmf); 4827 if (folio) { 4828 /* 4829 * folio is charged, so swapin can only fail due 4830 * to raced swapin and return NULL. 4831 */ 4832 swapcache = swapin_folio(entry, folio); 4833 if (swapcache != folio) 4834 folio_put(folio); 4835 folio = swapcache; 4836 } 4837 } else { 4838 folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, vmf); 4839 } 4840 4841 if (!folio) { 4842 /* 4843 * Back out if somebody else faulted in this pte 4844 * while we released the pte lock. 4845 */ 4846 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 4847 vmf->address, &vmf->ptl); 4848 if (likely(vmf->pte && 4849 pte_same(ptep_get(vmf->pte), vmf->orig_pte))) 4850 ret = VM_FAULT_OOM; 4851 goto unlock; 4852 } 4853 4854 /* Had to read the page from swap area: Major fault */ 4855 ret = VM_FAULT_MAJOR; 4856 count_vm_event(PGMAJFAULT); 4857 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); 4858 } 4859 4860 swapcache = folio; 4861 ret |= folio_lock_or_retry(folio, vmf); 4862 if (ret & VM_FAULT_RETRY) 4863 goto out_release; 4864 4865 page = folio_file_page(folio, swp_offset(entry)); 4866 /* 4867 * Make sure folio_free_swap() or swapoff did not release the 4868 * swapcache from under us. The page pin, and pte_same test 4869 * below, are not enough to exclude that. Even if it is still 4870 * swapcache, we need to check that the page's swap has not 4871 * changed. 4872 */ 4873 if (unlikely(!folio_matches_swap_entry(folio, entry))) 4874 goto out_page; 4875 4876 if (unlikely(PageHWPoison(page))) { 4877 /* 4878 * hwpoisoned dirty swapcache pages are kept for killing 4879 * owner processes (which may be unknown at hwpoison time) 4880 */ 4881 ret = VM_FAULT_HWPOISON; 4882 goto out_page; 4883 } 4884 4885 /* 4886 * KSM sometimes has to copy on read faults, for example, if 4887 * folio->index of non-ksm folios would be nonlinear inside the 4888 * anon VMA -- the ksm flag is lost on actual swapout. 4889 */ 4890 folio = ksm_might_need_to_copy(folio, vma, vmf->address); 4891 if (unlikely(!folio)) { 4892 ret = VM_FAULT_OOM; 4893 folio = swapcache; 4894 goto out_page; 4895 } else if (unlikely(folio == ERR_PTR(-EHWPOISON))) { 4896 ret = VM_FAULT_HWPOISON; 4897 folio = swapcache; 4898 goto out_page; 4899 } else if (folio != swapcache) 4900 page = folio_page(folio, 0); 4901 4902 /* 4903 * If we want to map a page that's in the swapcache writable, we 4904 * have to detect via the refcount if we're really the exclusive 4905 * owner. Try removing the extra reference from the local LRU 4906 * caches if required. 4907 */ 4908 if ((vmf->flags & FAULT_FLAG_WRITE) && 4909 !folio_test_ksm(folio) && !folio_test_lru(folio)) 4910 lru_add_drain(); 4911 4912 folio_throttle_swaprate(folio, GFP_KERNEL); 4913 4914 /* 4915 * Back out if somebody else already faulted in this pte. 4916 */ 4917 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, 4918 &vmf->ptl); 4919 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) 4920 goto out_nomap; 4921 4922 if (unlikely(!folio_test_uptodate(folio))) { 4923 ret = VM_FAULT_SIGBUS; 4924 goto out_nomap; 4925 } 4926 4927 nr_pages = 1; 4928 page_idx = 0; 4929 address = vmf->address; 4930 ptep = vmf->pte; 4931 if (folio_test_large(folio) && folio_test_swapcache(folio)) { 4932 int nr = folio_nr_pages(folio); 4933 unsigned long idx = folio_page_idx(folio, page); 4934 unsigned long folio_start = address - idx * PAGE_SIZE; 4935 unsigned long folio_end = folio_start + nr * PAGE_SIZE; 4936 pte_t *folio_ptep; 4937 pte_t folio_pte; 4938 4939 if (unlikely(folio_start < max(address & PMD_MASK, vma->vm_start))) 4940 goto check_folio; 4941 if (unlikely(folio_end > pmd_addr_end(address, vma->vm_end))) 4942 goto check_folio; 4943 4944 folio_ptep = vmf->pte - idx; 4945 folio_pte = ptep_get(folio_ptep); 4946 if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) || 4947 swap_pte_batch(folio_ptep, nr, folio_pte) != nr) 4948 goto check_folio; 4949 4950 page_idx = idx; 4951 address = folio_start; 4952 ptep = folio_ptep; 4953 nr_pages = nr; 4954 entry = folio->swap; 4955 page = &folio->page; 4956 } 4957 4958 check_folio: 4959 /* 4960 * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte 4961 * must never point at an anonymous page in the swapcache that is 4962 * PG_anon_exclusive. Sanity check that this holds and especially, that 4963 * no filesystem set PG_mappedtodisk on a page in the swapcache. Sanity 4964 * check after taking the PT lock and making sure that nobody 4965 * concurrently faulted in this page and set PG_anon_exclusive. 4966 */ 4967 BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio)); 4968 BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page)); 4969 4970 /* 4971 * If a large folio already belongs to anon mapping, then we 4972 * can just go on and map it partially. 4973 * If not, with the large swapin check above failing, the page table 4974 * have changed, so sub pages might got charged to the wrong cgroup, 4975 * or even should be shmem. So we have to free it and fallback. 4976 * Nothing should have touched it, both anon and shmem checks if a 4977 * large folio is fully appliable before use. 4978 * 4979 * This will be removed once we unify folio allocation in the swap cache 4980 * layer, where allocation of a folio stabilizes the swap entries. 4981 */ 4982 if (!folio_test_anon(folio) && folio_test_large(folio) && 4983 nr_pages != folio_nr_pages(folio)) { 4984 if (!WARN_ON_ONCE(folio_test_dirty(folio))) 4985 swap_cache_del_folio(folio); 4986 goto out_nomap; 4987 } 4988 4989 /* 4990 * Check under PT lock (to protect against concurrent fork() sharing 4991 * the swap entry concurrently) for certainly exclusive pages. 4992 */ 4993 if (!folio_test_ksm(folio)) { 4994 /* 4995 * The can_swapin_thp check above ensures all PTE have 4996 * same exclusiveness. Checking just one PTE is fine. 4997 */ 4998 exclusive = pte_swp_exclusive(vmf->orig_pte); 4999 if (exclusive) 5000 check_swap_exclusive(folio, entry, nr_pages); 5001 if (folio != swapcache) { 5002 /* 5003 * We have a fresh page that is not exposed to the 5004 * swapcache -> certainly exclusive. 5005 */ 5006 exclusive = true; 5007 } else if (exclusive && folio_test_writeback(folio) && 5008 data_race(si->flags & SWP_STABLE_WRITES)) { 5009 /* 5010 * This is tricky: not all swap backends support 5011 * concurrent page modifications while under writeback. 5012 * 5013 * So if we stumble over such a page in the swapcache 5014 * we must not set the page exclusive, otherwise we can 5015 * map it writable without further checks and modify it 5016 * while still under writeback. 5017 * 5018 * For these problematic swap backends, simply drop the 5019 * exclusive marker: this is perfectly fine as we start 5020 * writeback only if we fully unmapped the page and 5021 * there are no unexpected references on the page after 5022 * unmapping succeeded. After fully unmapped, no 5023 * further GUP references (FOLL_GET and FOLL_PIN) can 5024 * appear, so dropping the exclusive marker and mapping 5025 * it only R/O is fine. 5026 */ 5027 exclusive = false; 5028 } 5029 } 5030 5031 /* 5032 * Some architectures may have to restore extra metadata to the page 5033 * when reading from swap. This metadata may be indexed by swap entry 5034 * so this must be called before folio_put_swap(). 5035 */ 5036 arch_swap_restore(folio_swap(entry, folio), folio); 5037 5038 add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages); 5039 add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages); 5040 pte = mk_pte(page, vma->vm_page_prot); 5041 if (pte_swp_soft_dirty(vmf->orig_pte)) 5042 pte = pte_mksoft_dirty(pte); 5043 if (pte_swp_uffd_wp(vmf->orig_pte)) 5044 pte = pte_mkuffd_wp(pte); 5045 5046 /* 5047 * Same logic as in do_wp_page(); however, optimize for pages that are 5048 * certainly not shared either because we just allocated them without 5049 * exposing them to the swapcache or because the swap entry indicates 5050 * exclusivity. 5051 */ 5052 if (!folio_test_ksm(folio) && 5053 (exclusive || folio_ref_count(folio) == 1)) { 5054 if ((vma->vm_flags & VM_WRITE) && !userfaultfd_pte_wp(vma, pte) && 5055 !pte_needs_soft_dirty_wp(vma, pte)) { 5056 pte = pte_mkwrite(pte, vma); 5057 if (vmf->flags & FAULT_FLAG_WRITE) { 5058 pte = pte_mkdirty(pte); 5059 vmf->flags &= ~FAULT_FLAG_WRITE; 5060 } 5061 } 5062 rmap_flags |= RMAP_EXCLUSIVE; 5063 } 5064 folio_ref_add(folio, nr_pages - 1); 5065 flush_icache_pages(vma, page, nr_pages); 5066 vmf->orig_pte = pte_advance_pfn(pte, page_idx); 5067 5068 /* ksm created a completely new copy */ 5069 if (unlikely(folio != swapcache)) { 5070 folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE); 5071 folio_add_lru_vma(folio, vma); 5072 folio_put_swap(swapcache, NULL); 5073 } else if (!folio_test_anon(folio)) { 5074 /* 5075 * We currently only expect !anon folios that are fully 5076 * mappable. See the comment after can_swapin_thp above. 5077 */ 5078 VM_WARN_ON_ONCE_FOLIO(folio_nr_pages(folio) != nr_pages, folio); 5079 VM_WARN_ON_ONCE_FOLIO(folio_mapped(folio), folio); 5080 folio_add_new_anon_rmap(folio, vma, address, rmap_flags); 5081 folio_put_swap(folio, NULL); 5082 } else { 5083 VM_WARN_ON_ONCE(nr_pages != 1 && nr_pages != folio_nr_pages(folio)); 5084 folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address, 5085 rmap_flags); 5086 folio_put_swap(folio, nr_pages == 1 ? page : NULL); 5087 } 5088 5089 VM_BUG_ON(!folio_test_anon(folio) || 5090 (pte_write(pte) && !PageAnonExclusive(page))); 5091 set_ptes(vma->vm_mm, address, ptep, pte, nr_pages); 5092 arch_do_swap_page_nr(vma->vm_mm, vma, address, 5093 pte, pte, nr_pages); 5094 5095 /* 5096 * Remove the swap entry and conditionally try to free up the swapcache. 5097 * Do it after mapping, so raced page faults will likely see the folio 5098 * in swap cache and wait on the folio lock. 5099 */ 5100 if (should_try_to_free_swap(si, folio, vma, nr_pages, vmf->flags)) 5101 folio_free_swap(folio); 5102 5103 folio_unlock(folio); 5104 if (unlikely(folio != swapcache)) { 5105 /* 5106 * Hold the lock to avoid the swap entry to be reused 5107 * until we take the PT lock for the pte_same() check 5108 * (to avoid false positives from pte_same). For 5109 * further safety release the lock after the folio_put_swap 5110 * so that the swap count won't change under a 5111 * parallel locked swapcache. 5112 */ 5113 folio_unlock(swapcache); 5114 folio_put(swapcache); 5115 } 5116 5117 if (vmf->flags & FAULT_FLAG_WRITE) { 5118 ret |= do_wp_page(vmf); 5119 if (ret & VM_FAULT_ERROR) 5120 ret &= VM_FAULT_ERROR; 5121 goto out; 5122 } 5123 5124 /* No need to invalidate - it was non-present before */ 5125 update_mmu_cache_range(vmf, vma, address, ptep, nr_pages); 5126 unlock: 5127 if (vmf->pte) 5128 pte_unmap_unlock(vmf->pte, vmf->ptl); 5129 out: 5130 if (si) 5131 put_swap_device(si); 5132 return ret; 5133 out_nomap: 5134 if (vmf->pte) 5135 pte_unmap_unlock(vmf->pte, vmf->ptl); 5136 out_page: 5137 if (folio_test_swapcache(folio)) 5138 folio_free_swap(folio); 5139 folio_unlock(folio); 5140 out_release: 5141 folio_put(folio); 5142 if (folio != swapcache) { 5143 folio_unlock(swapcache); 5144 folio_put(swapcache); 5145 } 5146 if (si) 5147 put_swap_device(si); 5148 return ret; 5149 } 5150 5151 static bool pte_range_none(pte_t *pte, int nr_pages) 5152 { 5153 int i; 5154 5155 for (i = 0; i < nr_pages; i++) { 5156 if (!pte_none(ptep_get_lockless(pte + i))) 5157 return false; 5158 } 5159 5160 return true; 5161 } 5162 5163 static struct folio *alloc_anon_folio(struct vm_fault *vmf) 5164 { 5165 struct vm_area_struct *vma = vmf->vma; 5166 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5167 unsigned long orders; 5168 struct folio *folio; 5169 unsigned long addr; 5170 pte_t *pte; 5171 gfp_t gfp; 5172 int order; 5173 5174 /* 5175 * If uffd is active for the vma we need per-page fault fidelity to 5176 * maintain the uffd semantics. 5177 */ 5178 if (unlikely(userfaultfd_armed(vma))) 5179 goto fallback; 5180 5181 /* 5182 * Get a list of all the (large) orders below PMD_ORDER that are enabled 5183 * for this vma. Then filter out the orders that can't be allocated over 5184 * the faulting address and still be fully contained in the vma. 5185 */ 5186 orders = thp_vma_allowable_orders(vma, vma->vm_flags, TVA_PAGEFAULT, 5187 BIT(PMD_ORDER) - 1); 5188 orders = thp_vma_suitable_orders(vma, vmf->address, orders); 5189 5190 if (!orders) 5191 goto fallback; 5192 5193 pte = pte_offset_map(vmf->pmd, vmf->address & PMD_MASK); 5194 if (!pte) 5195 return ERR_PTR(-EAGAIN); 5196 5197 /* 5198 * Find the highest order where the aligned range is completely 5199 * pte_none(). Note that all remaining orders will be completely 5200 * pte_none(). 5201 */ 5202 order = highest_order(orders); 5203 while (orders) { 5204 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); 5205 if (pte_range_none(pte + pte_index(addr), 1 << order)) 5206 break; 5207 order = next_order(&orders, order); 5208 } 5209 5210 pte_unmap(pte); 5211 5212 if (!orders) 5213 goto fallback; 5214 5215 /* Try allocating the highest of the remaining orders. */ 5216 gfp = vma_thp_gfp_mask(vma); 5217 while (orders) { 5218 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); 5219 folio = vma_alloc_folio(gfp, order, vma, addr); 5220 if (folio) { 5221 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) { 5222 count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); 5223 folio_put(folio); 5224 goto next; 5225 } 5226 folio_throttle_swaprate(folio, gfp); 5227 /* 5228 * When a folio is not zeroed during allocation 5229 * (__GFP_ZERO not used) or user folios require special 5230 * handling, folio_zero_user() is used to make sure 5231 * that the page corresponding to the faulting address 5232 * will be hot in the cache after zeroing. 5233 */ 5234 if (user_alloc_needs_zeroing()) 5235 folio_zero_user(folio, vmf->address); 5236 return folio; 5237 } 5238 next: 5239 count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK); 5240 order = next_order(&orders, order); 5241 } 5242 5243 fallback: 5244 #endif 5245 return folio_prealloc(vma->vm_mm, vma, vmf->address, true); 5246 } 5247 5248 void map_anon_folio_pte_nopf(struct folio *folio, pte_t *pte, 5249 struct vm_area_struct *vma, unsigned long addr, 5250 bool uffd_wp) 5251 { 5252 const unsigned int nr_pages = folio_nr_pages(folio); 5253 pte_t entry = folio_mk_pte(folio, vma->vm_page_prot); 5254 5255 entry = pte_sw_mkyoung(entry); 5256 5257 if (vma->vm_flags & VM_WRITE) 5258 entry = pte_mkwrite(pte_mkdirty(entry), vma); 5259 if (uffd_wp) 5260 entry = pte_mkuffd_wp(entry); 5261 5262 folio_ref_add(folio, nr_pages - 1); 5263 folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE); 5264 folio_add_lru_vma(folio, vma); 5265 set_ptes(vma->vm_mm, addr, pte, entry, nr_pages); 5266 update_mmu_cache_range(NULL, vma, addr, pte, nr_pages); 5267 } 5268 5269 static void map_anon_folio_pte_pf(struct folio *folio, pte_t *pte, 5270 struct vm_area_struct *vma, unsigned long addr, bool uffd_wp) 5271 { 5272 const unsigned int order = folio_order(folio); 5273 5274 map_anon_folio_pte_nopf(folio, pte, vma, addr, uffd_wp); 5275 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1L << order); 5276 count_mthp_stat(order, MTHP_STAT_ANON_FAULT_ALLOC); 5277 } 5278 5279 /* 5280 * We enter with non-exclusive mmap_lock (to exclude vma changes, 5281 * but allow concurrent faults), and pte mapped but not yet locked. 5282 * We return with mmap_lock still held, but pte unmapped and unlocked. 5283 */ 5284 static vm_fault_t do_anonymous_page(struct vm_fault *vmf) 5285 { 5286 struct vm_area_struct *vma = vmf->vma; 5287 unsigned long addr = vmf->address; 5288 struct folio *folio; 5289 vm_fault_t ret = 0; 5290 int nr_pages; 5291 pte_t entry; 5292 5293 /* File mapping without ->vm_ops ? */ 5294 if (vma->vm_flags & VM_SHARED) 5295 return VM_FAULT_SIGBUS; 5296 5297 /* 5298 * Use pte_alloc() instead of pte_alloc_map(), so that OOM can 5299 * be distinguished from a transient failure of pte_offset_map(). 5300 */ 5301 if (pte_alloc(vma->vm_mm, vmf->pmd)) 5302 return VM_FAULT_OOM; 5303 5304 /* Use the zero-page for reads */ 5305 if (!(vmf->flags & FAULT_FLAG_WRITE) && 5306 !mm_forbids_zeropage(vma->vm_mm)) { 5307 entry = pte_mkspecial(pfn_pte(zero_pfn(vmf->address), 5308 vma->vm_page_prot)); 5309 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 5310 vmf->address, &vmf->ptl); 5311 if (!vmf->pte) 5312 goto unlock; 5313 if (vmf_pte_changed(vmf)) { 5314 update_mmu_tlb(vma, vmf->address, vmf->pte); 5315 goto unlock; 5316 } 5317 ret = check_stable_address_space(vma->vm_mm); 5318 if (ret) 5319 goto unlock; 5320 /* Deliver the page fault to userland, check inside PT lock */ 5321 if (userfaultfd_missing(vma)) { 5322 pte_unmap_unlock(vmf->pte, vmf->ptl); 5323 return handle_userfault(vmf, VM_UFFD_MISSING); 5324 } 5325 if (vmf_orig_pte_uffd_wp(vmf)) 5326 entry = pte_mkuffd_wp(entry); 5327 set_pte_at(vma->vm_mm, addr, vmf->pte, entry); 5328 5329 /* No need to invalidate - it was non-present before */ 5330 update_mmu_cache(vma, addr, vmf->pte); 5331 goto unlock; 5332 } 5333 5334 /* Allocate our own private page. */ 5335 ret = vmf_anon_prepare(vmf); 5336 if (ret) 5337 return ret; 5338 /* Returns NULL on OOM or ERR_PTR(-EAGAIN) if we must retry the fault */ 5339 folio = alloc_anon_folio(vmf); 5340 if (IS_ERR(folio)) 5341 return 0; 5342 if (!folio) 5343 goto oom; 5344 5345 nr_pages = folio_nr_pages(folio); 5346 addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE); 5347 5348 /* 5349 * The memory barrier inside __folio_mark_uptodate makes sure that 5350 * preceding stores to the page contents become visible before 5351 * the set_pte_at() write. 5352 */ 5353 __folio_mark_uptodate(folio); 5354 5355 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); 5356 if (!vmf->pte) 5357 goto release; 5358 if (nr_pages == 1 && vmf_pte_changed(vmf)) { 5359 update_mmu_tlb(vma, addr, vmf->pte); 5360 goto release; 5361 } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) { 5362 update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages); 5363 goto release; 5364 } 5365 5366 ret = check_stable_address_space(vma->vm_mm); 5367 if (ret) 5368 goto release; 5369 5370 /* Deliver the page fault to userland, check inside PT lock */ 5371 if (userfaultfd_missing(vma)) { 5372 pte_unmap_unlock(vmf->pte, vmf->ptl); 5373 folio_put(folio); 5374 return handle_userfault(vmf, VM_UFFD_MISSING); 5375 } 5376 map_anon_folio_pte_pf(folio, vmf->pte, vma, addr, 5377 vmf_orig_pte_uffd_wp(vmf)); 5378 unlock: 5379 if (vmf->pte) 5380 pte_unmap_unlock(vmf->pte, vmf->ptl); 5381 return ret; 5382 release: 5383 folio_put(folio); 5384 goto unlock; 5385 oom: 5386 return VM_FAULT_OOM; 5387 } 5388 5389 /* 5390 * The mmap_lock must have been held on entry, and may have been 5391 * released depending on flags and vma->vm_ops->fault() return value. 5392 * See filemap_fault() and __lock_page_retry(). 5393 */ 5394 static vm_fault_t __do_fault(struct vm_fault *vmf) 5395 { 5396 struct vm_area_struct *vma = vmf->vma; 5397 struct folio *folio; 5398 vm_fault_t ret; 5399 5400 /* 5401 * Preallocate pte before we take page_lock because this might lead to 5402 * deadlocks for memcg reclaim which waits for pages under writeback: 5403 * lock_page(A) 5404 * SetPageWriteback(A) 5405 * unlock_page(A) 5406 * lock_page(B) 5407 * lock_page(B) 5408 * pte_alloc_one 5409 * shrink_folio_list 5410 * wait_on_page_writeback(A) 5411 * SetPageWriteback(B) 5412 * unlock_page(B) 5413 * # flush A, B to clear the writeback 5414 */ 5415 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) { 5416 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); 5417 if (!vmf->prealloc_pte) 5418 return VM_FAULT_OOM; 5419 } 5420 5421 ret = vma->vm_ops->fault(vmf); 5422 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY | 5423 VM_FAULT_DONE_COW))) 5424 return ret; 5425 5426 folio = page_folio(vmf->page); 5427 if (unlikely(PageHWPoison(vmf->page))) { 5428 vm_fault_t poisonret = VM_FAULT_HWPOISON; 5429 if (ret & VM_FAULT_LOCKED) { 5430 if (page_mapped(vmf->page)) 5431 unmap_mapping_folio(folio); 5432 /* Retry if a clean folio was removed from the cache. */ 5433 if (mapping_evict_folio(folio->mapping, folio)) 5434 poisonret = VM_FAULT_NOPAGE; 5435 folio_unlock(folio); 5436 } 5437 folio_put(folio); 5438 vmf->page = NULL; 5439 return poisonret; 5440 } 5441 5442 if (unlikely(!(ret & VM_FAULT_LOCKED))) 5443 folio_lock(folio); 5444 else 5445 VM_BUG_ON_PAGE(!folio_test_locked(folio), vmf->page); 5446 5447 return ret; 5448 } 5449 5450 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5451 static void deposit_prealloc_pte(struct vm_fault *vmf) 5452 { 5453 struct vm_area_struct *vma = vmf->vma; 5454 5455 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); 5456 /* 5457 * We are going to consume the prealloc table, 5458 * count that as nr_ptes. 5459 */ 5460 mm_inc_nr_ptes(vma->vm_mm); 5461 vmf->prealloc_pte = NULL; 5462 } 5463 5464 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *page) 5465 { 5466 struct vm_area_struct *vma = vmf->vma; 5467 bool write = vmf->flags & FAULT_FLAG_WRITE; 5468 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 5469 pmd_t entry; 5470 vm_fault_t ret = VM_FAULT_FALLBACK; 5471 5472 /* 5473 * It is too late to allocate a small folio, we already have a large 5474 * folio in the pagecache: especially s390 KVM cannot tolerate any 5475 * PMD mappings, but PTE-mapped THP are fine. So let's simply refuse any 5476 * PMD mappings if THPs are disabled. As we already have a THP, 5477 * behave as if we are forcing a collapse. 5478 */ 5479 if (thp_disabled_by_hw() || vma_thp_disabled(vma, vma->vm_flags, 5480 /* forced_collapse=*/ true)) 5481 return ret; 5482 5483 if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER)) 5484 return ret; 5485 5486 if (!is_pmd_order(folio_order(folio))) 5487 return ret; 5488 page = &folio->page; 5489 5490 /* 5491 * Just backoff if any subpage of a THP is corrupted otherwise 5492 * the corrupted page may mapped by PMD silently to escape the 5493 * check. This kind of THP just can be PTE mapped. Access to 5494 * the corrupted subpage should trigger SIGBUS as expected. 5495 */ 5496 if (unlikely(folio_test_has_hwpoisoned(folio))) 5497 return ret; 5498 5499 /* 5500 * Archs like ppc64 need additional space to store information 5501 * related to pte entry. Use the preallocated table for that. 5502 */ 5503 if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) { 5504 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); 5505 if (!vmf->prealloc_pte) 5506 return VM_FAULT_OOM; 5507 } 5508 5509 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 5510 if (unlikely(!pmd_none(*vmf->pmd))) 5511 goto out; 5512 5513 flush_icache_pages(vma, page, HPAGE_PMD_NR); 5514 5515 entry = folio_mk_pmd(folio, vma->vm_page_prot); 5516 if (write) 5517 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 5518 5519 add_mm_counter(vma->vm_mm, mm_counter_file(folio), HPAGE_PMD_NR); 5520 folio_add_file_rmap_pmd(folio, page, vma); 5521 5522 /* 5523 * deposit and withdraw with pmd lock held 5524 */ 5525 if (arch_needs_pgtable_deposit()) 5526 deposit_prealloc_pte(vmf); 5527 5528 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); 5529 5530 update_mmu_cache_pmd(vma, haddr, vmf->pmd); 5531 5532 /* fault is handled */ 5533 ret = 0; 5534 count_vm_event(THP_FILE_MAPPED); 5535 out: 5536 spin_unlock(vmf->ptl); 5537 return ret; 5538 } 5539 #else 5540 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *page) 5541 { 5542 return VM_FAULT_FALLBACK; 5543 } 5544 #endif 5545 5546 /** 5547 * set_pte_range - Set a range of PTEs to point to pages in a folio. 5548 * @vmf: Fault description. 5549 * @folio: The folio that contains @page. 5550 * @page: The first page to create a PTE for. 5551 * @nr: The number of PTEs to create. 5552 * @addr: The first address to create a PTE for. 5553 */ 5554 void set_pte_range(struct vm_fault *vmf, struct folio *folio, 5555 struct page *page, unsigned int nr, unsigned long addr) 5556 { 5557 struct vm_area_struct *vma = vmf->vma; 5558 bool write = vmf->flags & FAULT_FLAG_WRITE; 5559 bool prefault = !in_range(vmf->address, addr, nr * PAGE_SIZE); 5560 pte_t entry; 5561 5562 flush_icache_pages(vma, page, nr); 5563 entry = mk_pte(page, vma->vm_page_prot); 5564 5565 if (prefault && arch_wants_old_prefaulted_pte()) 5566 entry = pte_mkold(entry); 5567 else 5568 entry = pte_sw_mkyoung(entry); 5569 5570 if (write) 5571 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 5572 else if (pte_write(entry) && folio_test_dirty(folio)) 5573 entry = pte_mkdirty(entry); 5574 if (unlikely(vmf_orig_pte_uffd_wp(vmf))) 5575 entry = pte_mkuffd_wp(entry); 5576 /* copy-on-write page */ 5577 if (write && !(vma->vm_flags & VM_SHARED)) { 5578 VM_BUG_ON_FOLIO(nr != 1, folio); 5579 folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE); 5580 folio_add_lru_vma(folio, vma); 5581 } else { 5582 folio_add_file_rmap_ptes(folio, page, nr, vma); 5583 } 5584 set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr); 5585 5586 /* no need to invalidate: a not-present page won't be cached */ 5587 update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr); 5588 } 5589 5590 static bool vmf_pte_changed(struct vm_fault *vmf) 5591 { 5592 if (vmf->flags & FAULT_FLAG_ORIG_PTE_VALID) 5593 return !pte_same(ptep_get(vmf->pte), vmf->orig_pte); 5594 5595 return !pte_none(ptep_get(vmf->pte)); 5596 } 5597 5598 /** 5599 * finish_fault - finish page fault once we have prepared the page to fault 5600 * 5601 * @vmf: structure describing the fault 5602 * 5603 * This function handles all that is needed to finish a page fault once the 5604 * page to fault in is prepared. It handles locking of PTEs, inserts PTE for 5605 * given page, adds reverse page mapping, handles memcg charges and LRU 5606 * addition. 5607 * 5608 * The function expects the page to be locked and on success it consumes a 5609 * reference of a page being mapped (for the PTE which maps it). 5610 * 5611 * Return: %0 on success, %VM_FAULT_ code in case of error. 5612 */ 5613 vm_fault_t finish_fault(struct vm_fault *vmf) 5614 { 5615 struct vm_area_struct *vma = vmf->vma; 5616 struct page *page; 5617 struct folio *folio; 5618 vm_fault_t ret; 5619 bool is_cow = (vmf->flags & FAULT_FLAG_WRITE) && 5620 !(vma->vm_flags & VM_SHARED); 5621 int type, nr_pages; 5622 unsigned long addr; 5623 bool needs_fallback = false; 5624 5625 fallback: 5626 addr = vmf->address; 5627 5628 /* Did we COW the page? */ 5629 if (is_cow) 5630 page = vmf->cow_page; 5631 else 5632 page = vmf->page; 5633 5634 folio = page_folio(page); 5635 /* 5636 * check even for read faults because we might have lost our CoWed 5637 * page 5638 */ 5639 if (!(vma->vm_flags & VM_SHARED)) { 5640 ret = check_stable_address_space(vma->vm_mm); 5641 if (ret) 5642 return ret; 5643 } 5644 5645 if (!needs_fallback && vma->vm_file) { 5646 struct address_space *mapping = vma->vm_file->f_mapping; 5647 pgoff_t file_end; 5648 5649 file_end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); 5650 5651 /* 5652 * Do not allow to map with PTEs beyond i_size and with PMD 5653 * across i_size to preserve SIGBUS semantics. 5654 * 5655 * Make an exception for shmem/tmpfs that for long time 5656 * intentionally mapped with PMDs across i_size. 5657 */ 5658 needs_fallback = !shmem_mapping(mapping) && 5659 file_end < folio_next_index(folio); 5660 } 5661 5662 if (pmd_none(*vmf->pmd)) { 5663 if (!needs_fallback && folio_test_pmd_mappable(folio)) { 5664 ret = do_set_pmd(vmf, folio, page); 5665 if (ret != VM_FAULT_FALLBACK) 5666 return ret; 5667 } 5668 5669 if (vmf->prealloc_pte) 5670 pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte); 5671 else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) 5672 return VM_FAULT_OOM; 5673 } 5674 5675 nr_pages = folio_nr_pages(folio); 5676 5677 /* Using per-page fault to maintain the uffd semantics */ 5678 if (unlikely(userfaultfd_armed(vma)) || unlikely(needs_fallback)) { 5679 nr_pages = 1; 5680 } else if (nr_pages > 1) { 5681 pgoff_t idx = folio_page_idx(folio, page); 5682 /* The page offset of vmf->address within the VMA. */ 5683 pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff; 5684 /* The index of the entry in the pagetable for fault page. */ 5685 pgoff_t pte_off = pte_index(vmf->address); 5686 5687 /* 5688 * Fallback to per-page fault in case the folio size in page 5689 * cache beyond the VMA limits and PMD pagetable limits. 5690 */ 5691 if (unlikely(vma_off < idx || 5692 vma_off + (nr_pages - idx) > vma_pages(vma) || 5693 pte_off < idx || 5694 pte_off + (nr_pages - idx) > PTRS_PER_PTE)) { 5695 nr_pages = 1; 5696 } else { 5697 /* Now we can set mappings for the whole large folio. */ 5698 addr = vmf->address - idx * PAGE_SIZE; 5699 page = &folio->page; 5700 } 5701 } 5702 5703 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 5704 addr, &vmf->ptl); 5705 if (!vmf->pte) 5706 return VM_FAULT_NOPAGE; 5707 5708 /* Re-check under ptl */ 5709 if (nr_pages == 1 && unlikely(vmf_pte_changed(vmf))) { 5710 update_mmu_tlb(vma, addr, vmf->pte); 5711 ret = VM_FAULT_NOPAGE; 5712 goto unlock; 5713 } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) { 5714 needs_fallback = true; 5715 pte_unmap_unlock(vmf->pte, vmf->ptl); 5716 goto fallback; 5717 } 5718 5719 folio_ref_add(folio, nr_pages - 1); 5720 set_pte_range(vmf, folio, page, nr_pages, addr); 5721 type = is_cow ? MM_ANONPAGES : mm_counter_file(folio); 5722 add_mm_counter(vma->vm_mm, type, nr_pages); 5723 ret = 0; 5724 5725 unlock: 5726 pte_unmap_unlock(vmf->pte, vmf->ptl); 5727 return ret; 5728 } 5729 5730 static unsigned long fault_around_pages __read_mostly = 5731 65536 >> PAGE_SHIFT; 5732 5733 #ifdef CONFIG_DEBUG_FS 5734 static int fault_around_bytes_get(void *data, u64 *val) 5735 { 5736 *val = fault_around_pages << PAGE_SHIFT; 5737 return 0; 5738 } 5739 5740 /* 5741 * fault_around_bytes must be rounded down to the nearest page order as it's 5742 * what do_fault_around() expects to see. 5743 */ 5744 static int fault_around_bytes_set(void *data, u64 val) 5745 { 5746 if (val / PAGE_SIZE > PTRS_PER_PTE) 5747 return -EINVAL; 5748 5749 /* 5750 * The minimum value is 1 page, however this results in no fault-around 5751 * at all. See should_fault_around(). 5752 */ 5753 val = max(val, PAGE_SIZE); 5754 fault_around_pages = rounddown_pow_of_two(val) >> PAGE_SHIFT; 5755 5756 return 0; 5757 } 5758 DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops, 5759 fault_around_bytes_get, fault_around_bytes_set, "%llu\n"); 5760 5761 static int __init fault_around_debugfs(void) 5762 { 5763 debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL, 5764 &fault_around_bytes_fops); 5765 return 0; 5766 } 5767 late_initcall(fault_around_debugfs); 5768 #endif 5769 5770 /* 5771 * do_fault_around() tries to map few pages around the fault address. The hope 5772 * is that the pages will be needed soon and this will lower the number of 5773 * faults to handle. 5774 * 5775 * It uses vm_ops->map_pages() to map the pages, which skips the page if it's 5776 * not ready to be mapped: not up-to-date, locked, etc. 5777 * 5778 * This function doesn't cross VMA or page table boundaries, in order to call 5779 * map_pages() and acquire a PTE lock only once. 5780 * 5781 * fault_around_pages defines how many pages we'll try to map. 5782 * do_fault_around() expects it to be set to a power of two less than or equal 5783 * to PTRS_PER_PTE. 5784 * 5785 * The virtual address of the area that we map is naturally aligned to 5786 * fault_around_pages * PAGE_SIZE rounded down to the machine page size 5787 * (and therefore to page order). This way it's easier to guarantee 5788 * that we don't cross page table boundaries. 5789 */ 5790 static vm_fault_t do_fault_around(struct vm_fault *vmf) 5791 { 5792 pgoff_t nr_pages = READ_ONCE(fault_around_pages); 5793 pgoff_t pte_off = pte_index(vmf->address); 5794 /* The page offset of vmf->address within the VMA. */ 5795 pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff; 5796 pgoff_t from_pte, to_pte; 5797 vm_fault_t ret; 5798 5799 /* The PTE offset of the start address, clamped to the VMA. */ 5800 from_pte = max(ALIGN_DOWN(pte_off, nr_pages), 5801 pte_off - min(pte_off, vma_off)); 5802 5803 /* The PTE offset of the end address, clamped to the VMA and PTE. */ 5804 to_pte = min3(from_pte + nr_pages, (pgoff_t)PTRS_PER_PTE, 5805 pte_off + vma_pages(vmf->vma) - vma_off) - 1; 5806 5807 if (pmd_none(*vmf->pmd)) { 5808 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); 5809 if (!vmf->prealloc_pte) 5810 return VM_FAULT_OOM; 5811 } 5812 5813 rcu_read_lock(); 5814 ret = vmf->vma->vm_ops->map_pages(vmf, 5815 vmf->pgoff + from_pte - pte_off, 5816 vmf->pgoff + to_pte - pte_off); 5817 rcu_read_unlock(); 5818 5819 return ret; 5820 } 5821 5822 /* Return true if we should do read fault-around, false otherwise */ 5823 static inline bool should_fault_around(struct vm_fault *vmf) 5824 { 5825 /* No ->map_pages? No way to fault around... */ 5826 if (!vmf->vma->vm_ops->map_pages) 5827 return false; 5828 5829 if (uffd_disable_fault_around(vmf->vma)) 5830 return false; 5831 5832 /* A single page implies no faulting 'around' at all. */ 5833 return fault_around_pages > 1; 5834 } 5835 5836 static vm_fault_t do_read_fault(struct vm_fault *vmf) 5837 { 5838 vm_fault_t ret = 0; 5839 struct folio *folio; 5840 5841 /* 5842 * Let's call ->map_pages() first and use ->fault() as fallback 5843 * if page by the offset is not ready to be mapped (cold cache or 5844 * something). 5845 */ 5846 if (should_fault_around(vmf)) { 5847 ret = do_fault_around(vmf); 5848 if (ret) 5849 return ret; 5850 } 5851 5852 ret = vmf_can_call_fault(vmf); 5853 if (ret) 5854 return ret; 5855 5856 ret = __do_fault(vmf); 5857 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 5858 return ret; 5859 5860 ret |= finish_fault(vmf); 5861 folio = page_folio(vmf->page); 5862 folio_unlock(folio); 5863 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 5864 folio_put(folio); 5865 return ret; 5866 } 5867 5868 static vm_fault_t do_cow_fault(struct vm_fault *vmf) 5869 { 5870 struct vm_area_struct *vma = vmf->vma; 5871 struct folio *folio; 5872 vm_fault_t ret; 5873 5874 ret = vmf_can_call_fault(vmf); 5875 if (!ret) 5876 ret = vmf_anon_prepare(vmf); 5877 if (ret) 5878 return ret; 5879 5880 folio = folio_prealloc(vma->vm_mm, vma, vmf->address, false); 5881 if (!folio) 5882 return VM_FAULT_OOM; 5883 5884 vmf->cow_page = &folio->page; 5885 5886 ret = __do_fault(vmf); 5887 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 5888 goto uncharge_out; 5889 if (ret & VM_FAULT_DONE_COW) 5890 return ret; 5891 5892 if (copy_mc_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma)) { 5893 ret = VM_FAULT_HWPOISON; 5894 goto unlock; 5895 } 5896 __folio_mark_uptodate(folio); 5897 5898 ret |= finish_fault(vmf); 5899 unlock: 5900 unlock_page(vmf->page); 5901 put_page(vmf->page); 5902 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 5903 goto uncharge_out; 5904 return ret; 5905 uncharge_out: 5906 folio_put(folio); 5907 return ret; 5908 } 5909 5910 static vm_fault_t do_shared_fault(struct vm_fault *vmf) 5911 { 5912 struct vm_area_struct *vma = vmf->vma; 5913 vm_fault_t ret, tmp; 5914 struct folio *folio; 5915 5916 ret = vmf_can_call_fault(vmf); 5917 if (ret) 5918 return ret; 5919 5920 ret = __do_fault(vmf); 5921 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 5922 return ret; 5923 5924 folio = page_folio(vmf->page); 5925 5926 /* 5927 * Check if the backing address space wants to know that the page is 5928 * about to become writable 5929 */ 5930 if (vma->vm_ops->page_mkwrite) { 5931 folio_unlock(folio); 5932 tmp = do_page_mkwrite(vmf, folio); 5933 if (unlikely(!tmp || 5934 (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { 5935 folio_put(folio); 5936 return tmp; 5937 } 5938 } 5939 5940 ret |= finish_fault(vmf); 5941 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | 5942 VM_FAULT_RETRY))) { 5943 folio_unlock(folio); 5944 folio_put(folio); 5945 return ret; 5946 } 5947 5948 ret |= fault_dirty_shared_page(vmf); 5949 return ret; 5950 } 5951 5952 /* 5953 * We enter with non-exclusive mmap_lock (to exclude vma changes, 5954 * but allow concurrent faults). 5955 * The mmap_lock may have been released depending on flags and our 5956 * return value. See filemap_fault() and __folio_lock_or_retry(). 5957 * If mmap_lock is released, vma may become invalid (for example 5958 * by other thread calling munmap()). 5959 */ 5960 static vm_fault_t do_fault(struct vm_fault *vmf) 5961 { 5962 struct vm_area_struct *vma = vmf->vma; 5963 struct mm_struct *vm_mm = vma->vm_mm; 5964 vm_fault_t ret; 5965 5966 /* 5967 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND 5968 */ 5969 if (!vma->vm_ops->fault) { 5970 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, 5971 vmf->address, &vmf->ptl); 5972 if (unlikely(!vmf->pte)) 5973 ret = VM_FAULT_SIGBUS; 5974 else { 5975 /* 5976 * Make sure this is not a temporary clearing of pte 5977 * by holding ptl and checking again. A R/M/W update 5978 * of pte involves: take ptl, clearing the pte so that 5979 * we don't have concurrent modification by hardware 5980 * followed by an update. 5981 */ 5982 if (unlikely(pte_none(ptep_get(vmf->pte)))) 5983 ret = VM_FAULT_SIGBUS; 5984 else 5985 ret = VM_FAULT_NOPAGE; 5986 5987 pte_unmap_unlock(vmf->pte, vmf->ptl); 5988 } 5989 } else if (!(vmf->flags & FAULT_FLAG_WRITE)) 5990 ret = do_read_fault(vmf); 5991 else if (!(vma->vm_flags & VM_SHARED)) 5992 ret = do_cow_fault(vmf); 5993 else 5994 ret = do_shared_fault(vmf); 5995 5996 /* preallocated pagetable is unused: free it */ 5997 if (vmf->prealloc_pte) { 5998 pte_free(vm_mm, vmf->prealloc_pte); 5999 vmf->prealloc_pte = NULL; 6000 } 6001 return ret; 6002 } 6003 6004 int numa_migrate_check(struct folio *folio, struct vm_fault *vmf, 6005 unsigned long addr, int *flags, 6006 bool writable, int *last_cpupid) 6007 { 6008 struct vm_area_struct *vma = vmf->vma; 6009 6010 /* 6011 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as 6012 * much anyway since they can be in shared cache state. This misses 6013 * the case where a mapping is writable but the process never writes 6014 * to it but pte_write gets cleared during protection updates and 6015 * pte_dirty has unpredictable behaviour between PTE scan updates, 6016 * background writeback, dirty balancing and application behaviour. 6017 */ 6018 if (!writable) 6019 *flags |= TNF_NO_GROUP; 6020 6021 /* 6022 * Flag if the folio is shared between multiple address spaces. This 6023 * is later used when determining whether to group tasks together 6024 */ 6025 if (folio_maybe_mapped_shared(folio) && (vma->vm_flags & VM_SHARED)) 6026 *flags |= TNF_SHARED; 6027 /* 6028 * For memory tiering mode, cpupid of slow memory page is used 6029 * to record page access time. So use default value. 6030 */ 6031 if (folio_use_access_time(folio)) 6032 *last_cpupid = (-1 & LAST_CPUPID_MASK); 6033 else 6034 *last_cpupid = folio_last_cpupid(folio); 6035 6036 /* Record the current PID accessing VMA */ 6037 vma_set_access_pid_bit(vma); 6038 6039 count_vm_numa_event(NUMA_HINT_FAULTS); 6040 #ifdef CONFIG_NUMA_BALANCING 6041 count_memcg_folio_events(folio, NUMA_HINT_FAULTS, 1); 6042 #endif 6043 if (folio_nid(folio) == numa_node_id()) { 6044 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); 6045 *flags |= TNF_FAULT_LOCAL; 6046 } 6047 6048 return mpol_misplaced(folio, vmf, addr); 6049 } 6050 6051 static void numa_rebuild_single_mapping(struct vm_fault *vmf, struct vm_area_struct *vma, 6052 unsigned long fault_addr, pte_t *fault_pte, 6053 bool writable) 6054 { 6055 pte_t pte, old_pte; 6056 6057 old_pte = ptep_modify_prot_start(vma, fault_addr, fault_pte); 6058 pte = pte_modify(old_pte, vma->vm_page_prot); 6059 pte = pte_mkyoung(pte); 6060 if (writable) 6061 pte = pte_mkwrite(pte, vma); 6062 ptep_modify_prot_commit(vma, fault_addr, fault_pte, old_pte, pte); 6063 update_mmu_cache_range(vmf, vma, fault_addr, fault_pte, 1); 6064 } 6065 6066 static void numa_rebuild_large_mapping(struct vm_fault *vmf, struct vm_area_struct *vma, 6067 struct folio *folio, pte_t fault_pte, 6068 bool ignore_writable, bool pte_write_upgrade) 6069 { 6070 int nr = pte_pfn(fault_pte) - folio_pfn(folio); 6071 unsigned long start, end, addr = vmf->address; 6072 unsigned long addr_start = addr - (nr << PAGE_SHIFT); 6073 unsigned long pt_start = ALIGN_DOWN(addr, PMD_SIZE); 6074 pte_t *start_ptep; 6075 6076 /* Stay within the VMA and within the page table. */ 6077 start = max3(addr_start, pt_start, vma->vm_start); 6078 end = min3(addr_start + folio_size(folio), pt_start + PMD_SIZE, 6079 vma->vm_end); 6080 start_ptep = vmf->pte - ((addr - start) >> PAGE_SHIFT); 6081 6082 /* Restore all PTEs' mapping of the large folio */ 6083 for (addr = start; addr != end; start_ptep++, addr += PAGE_SIZE) { 6084 pte_t ptent = ptep_get(start_ptep); 6085 bool writable = false; 6086 6087 if (!pte_present(ptent) || !pte_protnone(ptent)) 6088 continue; 6089 6090 if (pfn_folio(pte_pfn(ptent)) != folio) 6091 continue; 6092 6093 if (!ignore_writable) { 6094 ptent = pte_modify(ptent, vma->vm_page_prot); 6095 writable = pte_write(ptent); 6096 if (!writable && pte_write_upgrade && 6097 can_change_pte_writable(vma, addr, ptent)) 6098 writable = true; 6099 } 6100 6101 numa_rebuild_single_mapping(vmf, vma, addr, start_ptep, writable); 6102 } 6103 } 6104 6105 static vm_fault_t do_numa_page(struct vm_fault *vmf) 6106 { 6107 struct vm_area_struct *vma = vmf->vma; 6108 struct folio *folio = NULL; 6109 int nid = NUMA_NO_NODE; 6110 bool writable = false, ignore_writable = false; 6111 bool pte_write_upgrade = vma_wants_manual_pte_write_upgrade(vma); 6112 int last_cpupid; 6113 int target_nid; 6114 pte_t pte, old_pte; 6115 int flags = 0, nr_pages; 6116 6117 /* 6118 * The pte cannot be used safely until we verify, while holding the page 6119 * table lock, that its contents have not changed during fault handling. 6120 */ 6121 spin_lock(vmf->ptl); 6122 /* Read the live PTE from the page tables: */ 6123 old_pte = ptep_get(vmf->pte); 6124 6125 if (unlikely(!pte_same(old_pte, vmf->orig_pte))) { 6126 pte_unmap_unlock(vmf->pte, vmf->ptl); 6127 return 0; 6128 } 6129 6130 pte = pte_modify(old_pte, vma->vm_page_prot); 6131 6132 /* 6133 * Detect now whether the PTE could be writable; this information 6134 * is only valid while holding the PT lock. 6135 */ 6136 writable = pte_write(pte); 6137 if (!writable && pte_write_upgrade && 6138 can_change_pte_writable(vma, vmf->address, pte)) 6139 writable = true; 6140 6141 folio = vm_normal_folio(vma, vmf->address, pte); 6142 if (!folio || folio_is_zone_device(folio)) 6143 goto out_map; 6144 6145 nid = folio_nid(folio); 6146 nr_pages = folio_nr_pages(folio); 6147 6148 target_nid = numa_migrate_check(folio, vmf, vmf->address, &flags, 6149 writable, &last_cpupid); 6150 if (target_nid == NUMA_NO_NODE) 6151 goto out_map; 6152 if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) { 6153 flags |= TNF_MIGRATE_FAIL; 6154 goto out_map; 6155 } 6156 /* The folio is isolated and isolation code holds a folio reference. */ 6157 pte_unmap_unlock(vmf->pte, vmf->ptl); 6158 writable = false; 6159 ignore_writable = true; 6160 6161 /* Migrate to the requested node */ 6162 if (!migrate_misplaced_folio(folio, target_nid)) { 6163 nid = target_nid; 6164 flags |= TNF_MIGRATED; 6165 task_numa_fault(last_cpupid, nid, nr_pages, flags); 6166 return 0; 6167 } 6168 6169 flags |= TNF_MIGRATE_FAIL; 6170 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 6171 vmf->address, &vmf->ptl); 6172 if (unlikely(!vmf->pte)) 6173 return 0; 6174 if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { 6175 pte_unmap_unlock(vmf->pte, vmf->ptl); 6176 return 0; 6177 } 6178 out_map: 6179 /* 6180 * Make it present again, depending on how arch implements 6181 * non-accessible ptes, some can allow access by kernel mode. 6182 */ 6183 if (folio && folio_test_large(folio)) 6184 numa_rebuild_large_mapping(vmf, vma, folio, pte, ignore_writable, 6185 pte_write_upgrade); 6186 else 6187 numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte, 6188 writable); 6189 pte_unmap_unlock(vmf->pte, vmf->ptl); 6190 6191 if (nid != NUMA_NO_NODE) 6192 task_numa_fault(last_cpupid, nid, nr_pages, flags); 6193 return 0; 6194 } 6195 6196 static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf) 6197 { 6198 struct vm_area_struct *vma = vmf->vma; 6199 if (vma_is_anonymous(vma)) 6200 return do_huge_pmd_anonymous_page(vmf); 6201 if (vma->vm_ops->huge_fault) 6202 return vma->vm_ops->huge_fault(vmf, PMD_ORDER); 6203 return VM_FAULT_FALLBACK; 6204 } 6205 6206 /* `inline' is required to avoid gcc 4.1.2 build error */ 6207 static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf) 6208 { 6209 struct vm_area_struct *vma = vmf->vma; 6210 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; 6211 vm_fault_t ret; 6212 6213 if (vma_is_anonymous(vma)) { 6214 if (likely(!unshare) && 6215 userfaultfd_huge_pmd_wp(vma, vmf->orig_pmd)) { 6216 if (userfaultfd_wp_async(vmf->vma)) 6217 goto split; 6218 return handle_userfault(vmf, VM_UFFD_WP); 6219 } 6220 return do_huge_pmd_wp_page(vmf); 6221 } 6222 6223 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { 6224 if (vma->vm_ops->huge_fault) { 6225 ret = vma->vm_ops->huge_fault(vmf, PMD_ORDER); 6226 if (!(ret & VM_FAULT_FALLBACK)) 6227 return ret; 6228 } 6229 } 6230 6231 split: 6232 /* COW or write-notify handled on pte level: split pmd. */ 6233 __split_huge_pmd(vma, vmf->pmd, vmf->address, false); 6234 6235 return VM_FAULT_FALLBACK; 6236 } 6237 6238 static vm_fault_t create_huge_pud(struct vm_fault *vmf) 6239 { 6240 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ 6241 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 6242 struct vm_area_struct *vma = vmf->vma; 6243 /* No support for anonymous transparent PUD pages yet */ 6244 if (vma_is_anonymous(vma)) 6245 return VM_FAULT_FALLBACK; 6246 if (vma->vm_ops->huge_fault) 6247 return vma->vm_ops->huge_fault(vmf, PUD_ORDER); 6248 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 6249 return VM_FAULT_FALLBACK; 6250 } 6251 6252 static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud) 6253 { 6254 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ 6255 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 6256 struct vm_area_struct *vma = vmf->vma; 6257 vm_fault_t ret; 6258 6259 /* No support for anonymous transparent PUD pages yet */ 6260 if (vma_is_anonymous(vma)) 6261 goto split; 6262 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { 6263 if (vma->vm_ops->huge_fault) { 6264 ret = vma->vm_ops->huge_fault(vmf, PUD_ORDER); 6265 if (!(ret & VM_FAULT_FALLBACK)) 6266 return ret; 6267 } 6268 } 6269 split: 6270 /* COW or write-notify not handled on PUD level: split pud.*/ 6271 __split_huge_pud(vma, vmf->pud, vmf->address); 6272 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 6273 return VM_FAULT_FALLBACK; 6274 } 6275 6276 /* 6277 * The page faults may be spurious because of the racy access to the 6278 * page table. For example, a non-populated virtual page is accessed 6279 * on 2 CPUs simultaneously, thus the page faults are triggered on 6280 * both CPUs. However, it's possible that one CPU (say CPU A) cannot 6281 * find the reason for the page fault if the other CPU (say CPU B) has 6282 * changed the page table before the PTE is checked on CPU A. Most of 6283 * the time, the spurious page faults can be ignored safely. However, 6284 * if the page fault is for the write access, it's possible that a 6285 * stale read-only TLB entry exists in the local CPU and needs to be 6286 * flushed on some architectures. This is called the spurious page 6287 * fault fixing. 6288 * 6289 * Note: flush_tlb_fix_spurious_fault() is defined as flush_tlb_page() 6290 * by default and used as such on most architectures, while 6291 * flush_tlb_fix_spurious_fault_pmd() is defined as NOP by default and 6292 * used as such on most architectures. 6293 */ 6294 static void fix_spurious_fault(struct vm_fault *vmf, 6295 enum pgtable_level ptlevel) 6296 { 6297 /* Skip spurious TLB flush for retried page fault */ 6298 if (vmf->flags & FAULT_FLAG_TRIED) 6299 return; 6300 /* 6301 * This is needed only for protection faults but the arch code 6302 * is not yet telling us if this is a protection fault or not. 6303 * This still avoids useless tlb flushes for .text page faults 6304 * with threads. 6305 */ 6306 if (vmf->flags & FAULT_FLAG_WRITE) { 6307 if (ptlevel == PGTABLE_LEVEL_PTE) 6308 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address, 6309 vmf->pte); 6310 else 6311 flush_tlb_fix_spurious_fault_pmd(vmf->vma, vmf->address, 6312 vmf->pmd); 6313 } 6314 } 6315 /* 6316 * These routines also need to handle stuff like marking pages dirty 6317 * and/or accessed for architectures that don't do it in hardware (most 6318 * RISC architectures). The early dirtying is also good on the i386. 6319 * 6320 * There is also a hook called "update_mmu_cache()" that architectures 6321 * with external mmu caches can use to update those (ie the Sparc or 6322 * PowerPC hashed page tables that act as extended TLBs). 6323 * 6324 * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow 6325 * concurrent faults). 6326 * 6327 * The mmap_lock may have been released depending on flags and our return value. 6328 * See filemap_fault() and __folio_lock_or_retry(). 6329 */ 6330 static vm_fault_t handle_pte_fault(struct vm_fault *vmf) 6331 { 6332 pte_t entry; 6333 6334 if (unlikely(pmd_none(*vmf->pmd))) { 6335 /* 6336 * Leave __pte_alloc() until later: because vm_ops->fault may 6337 * want to allocate huge page, and if we expose page table 6338 * for an instant, it will be difficult to retract from 6339 * concurrent faults and from rmap lookups. 6340 */ 6341 vmf->pte = NULL; 6342 vmf->flags &= ~FAULT_FLAG_ORIG_PTE_VALID; 6343 } else { 6344 pmd_t dummy_pmdval; 6345 6346 /* 6347 * A regular pmd is established and it can't morph into a huge 6348 * pmd by anon khugepaged, since that takes mmap_lock in write 6349 * mode; but shmem or file collapse to THP could still morph 6350 * it into a huge pmd: just retry later if so. 6351 * 6352 * Use the maywrite version to indicate that vmf->pte may be 6353 * modified, but since we will use pte_same() to detect the 6354 * change of the !pte_none() entry, there is no need to recheck 6355 * the pmdval. Here we choose to pass a dummy variable instead 6356 * of NULL, which helps new user think about why this place is 6357 * special. 6358 */ 6359 vmf->pte = pte_offset_map_rw_nolock(vmf->vma->vm_mm, vmf->pmd, 6360 vmf->address, &dummy_pmdval, 6361 &vmf->ptl); 6362 if (unlikely(!vmf->pte)) 6363 return 0; 6364 vmf->orig_pte = ptep_get_lockless(vmf->pte); 6365 vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID; 6366 6367 if (pte_none(vmf->orig_pte)) { 6368 pte_unmap(vmf->pte); 6369 vmf->pte = NULL; 6370 } 6371 } 6372 6373 if (!vmf->pte) 6374 return do_pte_missing(vmf); 6375 6376 if (!pte_present(vmf->orig_pte)) 6377 return do_swap_page(vmf); 6378 6379 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) 6380 return do_numa_page(vmf); 6381 6382 spin_lock(vmf->ptl); 6383 entry = vmf->orig_pte; 6384 if (unlikely(!pte_same(ptep_get(vmf->pte), entry))) { 6385 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); 6386 goto unlock; 6387 } 6388 if (vmf->flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) { 6389 if (!pte_write(entry)) 6390 return do_wp_page(vmf); 6391 else if (likely(vmf->flags & FAULT_FLAG_WRITE)) 6392 entry = pte_mkdirty(entry); 6393 } 6394 entry = pte_mkyoung(entry); 6395 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry, 6396 vmf->flags & FAULT_FLAG_WRITE)) 6397 update_mmu_cache_range(vmf, vmf->vma, vmf->address, 6398 vmf->pte, 1); 6399 else 6400 fix_spurious_fault(vmf, PGTABLE_LEVEL_PTE); 6401 unlock: 6402 pte_unmap_unlock(vmf->pte, vmf->ptl); 6403 return 0; 6404 } 6405 6406 /* 6407 * On entry, we hold either the VMA lock or the mmap_lock 6408 * (FAULT_FLAG_VMA_LOCK tells you which). If VM_FAULT_RETRY is set in 6409 * the result, the mmap_lock is not held on exit. See filemap_fault() 6410 * and __folio_lock_or_retry(). 6411 */ 6412 static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, 6413 unsigned long address, unsigned int flags) 6414 { 6415 struct vm_fault vmf = { 6416 .vma = vma, 6417 .address = address & PAGE_MASK, 6418 .real_address = address, 6419 .flags = flags, 6420 .pgoff = linear_page_index(vma, address), 6421 .gfp_mask = __get_fault_gfp_mask(vma), 6422 }; 6423 struct mm_struct *mm = vma->vm_mm; 6424 vm_flags_t vm_flags = vma->vm_flags; 6425 pgd_t *pgd; 6426 p4d_t *p4d; 6427 vm_fault_t ret; 6428 6429 pgd = pgd_offset(mm, address); 6430 p4d = p4d_alloc(mm, pgd, address); 6431 if (!p4d) 6432 return VM_FAULT_OOM; 6433 6434 vmf.pud = pud_alloc(mm, p4d, address); 6435 if (!vmf.pud) 6436 return VM_FAULT_OOM; 6437 retry_pud: 6438 if (pud_none(*vmf.pud) && 6439 thp_vma_allowable_order(vma, vm_flags, TVA_PAGEFAULT, PUD_ORDER)) { 6440 ret = create_huge_pud(&vmf); 6441 if (!(ret & VM_FAULT_FALLBACK)) 6442 return ret; 6443 } else { 6444 pud_t orig_pud = *vmf.pud; 6445 6446 barrier(); 6447 if (pud_trans_huge(orig_pud)) { 6448 6449 /* 6450 * TODO once we support anonymous PUDs: NUMA case and 6451 * FAULT_FLAG_UNSHARE handling. 6452 */ 6453 if ((flags & FAULT_FLAG_WRITE) && !pud_write(orig_pud)) { 6454 ret = wp_huge_pud(&vmf, orig_pud); 6455 if (!(ret & VM_FAULT_FALLBACK)) 6456 return ret; 6457 } else { 6458 huge_pud_set_accessed(&vmf, orig_pud); 6459 return 0; 6460 } 6461 } 6462 } 6463 6464 vmf.pmd = pmd_alloc(mm, vmf.pud, address); 6465 if (!vmf.pmd) 6466 return VM_FAULT_OOM; 6467 6468 /* Huge pud page fault raced with pmd_alloc? */ 6469 if (pud_trans_unstable(vmf.pud)) 6470 goto retry_pud; 6471 6472 if (pmd_none(*vmf.pmd) && 6473 thp_vma_allowable_order(vma, vm_flags, TVA_PAGEFAULT, PMD_ORDER)) { 6474 ret = create_huge_pmd(&vmf); 6475 if (ret & VM_FAULT_FALLBACK) 6476 goto fallback; 6477 else 6478 return ret; 6479 } 6480 6481 vmf.orig_pmd = pmdp_get_lockless(vmf.pmd); 6482 if (pmd_none(vmf.orig_pmd)) 6483 goto fallback; 6484 6485 if (unlikely(!pmd_present(vmf.orig_pmd))) { 6486 if (pmd_is_device_private_entry(vmf.orig_pmd)) 6487 return do_huge_pmd_device_private(&vmf); 6488 6489 if (pmd_is_migration_entry(vmf.orig_pmd)) 6490 pmd_migration_entry_wait(mm, vmf.pmd); 6491 return 0; 6492 } 6493 if (pmd_trans_huge(vmf.orig_pmd)) { 6494 if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma)) 6495 return do_huge_pmd_numa_page(&vmf); 6496 6497 if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) && 6498 !pmd_write(vmf.orig_pmd)) { 6499 ret = wp_huge_pmd(&vmf); 6500 if (!(ret & VM_FAULT_FALLBACK)) 6501 return ret; 6502 } else { 6503 vmf.ptl = pmd_lock(mm, vmf.pmd); 6504 if (!huge_pmd_set_accessed(&vmf)) 6505 fix_spurious_fault(&vmf, PGTABLE_LEVEL_PMD); 6506 spin_unlock(vmf.ptl); 6507 return 0; 6508 } 6509 } 6510 6511 fallback: 6512 return handle_pte_fault(&vmf); 6513 } 6514 6515 /** 6516 * mm_account_fault - Do page fault accounting 6517 * @mm: mm from which memcg should be extracted. It can be NULL. 6518 * @regs: the pt_regs struct pointer. When set to NULL, will skip accounting 6519 * of perf event counters, but we'll still do the per-task accounting to 6520 * the task who triggered this page fault. 6521 * @address: the faulted address. 6522 * @flags: the fault flags. 6523 * @ret: the fault retcode. 6524 * 6525 * This will take care of most of the page fault accounting. Meanwhile, it 6526 * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter 6527 * updates. However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should 6528 * still be in per-arch page fault handlers at the entry of page fault. 6529 */ 6530 static inline void mm_account_fault(struct mm_struct *mm, struct pt_regs *regs, 6531 unsigned long address, unsigned int flags, 6532 vm_fault_t ret) 6533 { 6534 bool major; 6535 6536 /* Incomplete faults will be accounted upon completion. */ 6537 if (ret & VM_FAULT_RETRY) 6538 return; 6539 6540 /* 6541 * To preserve the behavior of older kernels, PGFAULT counters record 6542 * both successful and failed faults, as opposed to perf counters, 6543 * which ignore failed cases. 6544 */ 6545 count_vm_event(PGFAULT); 6546 count_memcg_event_mm(mm, PGFAULT); 6547 6548 /* 6549 * Do not account for unsuccessful faults (e.g. when the address wasn't 6550 * valid). That includes arch_vma_access_permitted() failing before 6551 * reaching here. So this is not a "this many hardware page faults" 6552 * counter. We should use the hw profiling for that. 6553 */ 6554 if (ret & VM_FAULT_ERROR) 6555 return; 6556 6557 /* 6558 * We define the fault as a major fault when the final successful fault 6559 * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't 6560 * handle it immediately previously). 6561 */ 6562 major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED); 6563 6564 if (major) 6565 current->maj_flt++; 6566 else 6567 current->min_flt++; 6568 6569 /* 6570 * If the fault is done for GUP, regs will be NULL. We only do the 6571 * accounting for the per thread fault counters who triggered the 6572 * fault, and we skip the perf event updates. 6573 */ 6574 if (!regs) 6575 return; 6576 6577 if (major) 6578 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); 6579 else 6580 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); 6581 } 6582 6583 #ifdef CONFIG_LRU_GEN 6584 static void lru_gen_enter_fault(struct vm_area_struct *vma) 6585 { 6586 /* the LRU algorithm only applies to accesses with recency */ 6587 current->in_lru_fault = vma_has_recency(vma); 6588 } 6589 6590 static void lru_gen_exit_fault(void) 6591 { 6592 current->in_lru_fault = false; 6593 } 6594 #else 6595 static void lru_gen_enter_fault(struct vm_area_struct *vma) 6596 { 6597 } 6598 6599 static void lru_gen_exit_fault(void) 6600 { 6601 } 6602 #endif /* CONFIG_LRU_GEN */ 6603 6604 static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma, 6605 unsigned int *flags) 6606 { 6607 if (unlikely(*flags & FAULT_FLAG_UNSHARE)) { 6608 if (WARN_ON_ONCE(*flags & FAULT_FLAG_WRITE)) 6609 return VM_FAULT_SIGSEGV; 6610 /* 6611 * FAULT_FLAG_UNSHARE only applies to COW mappings. Let's 6612 * just treat it like an ordinary read-fault otherwise. 6613 */ 6614 if (!is_cow_mapping(vma->vm_flags)) 6615 *flags &= ~FAULT_FLAG_UNSHARE; 6616 } else if (*flags & FAULT_FLAG_WRITE) { 6617 /* Write faults on read-only mappings are impossible ... */ 6618 if (WARN_ON_ONCE(!(vma->vm_flags & VM_MAYWRITE))) 6619 return VM_FAULT_SIGSEGV; 6620 /* ... and FOLL_FORCE only applies to COW mappings. */ 6621 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE) && 6622 !is_cow_mapping(vma->vm_flags))) 6623 return VM_FAULT_SIGSEGV; 6624 } 6625 #ifdef CONFIG_PER_VMA_LOCK 6626 /* 6627 * Per-VMA locks can't be used with FAULT_FLAG_RETRY_NOWAIT because of 6628 * the assumption that lock is dropped on VM_FAULT_RETRY. 6629 */ 6630 if (WARN_ON_ONCE((*flags & 6631 (FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT)) == 6632 (FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT))) 6633 return VM_FAULT_SIGSEGV; 6634 #endif 6635 6636 return 0; 6637 } 6638 6639 /* 6640 * By the time we get here, we already hold either the VMA lock or the 6641 * mmap_lock (FAULT_FLAG_VMA_LOCK tells you which). 6642 * 6643 * The mmap_lock may have been released depending on flags and our 6644 * return value. See filemap_fault() and __folio_lock_or_retry(). 6645 */ 6646 vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, 6647 unsigned int flags, struct pt_regs *regs) 6648 { 6649 /* If the fault handler drops the mmap_lock, vma may be freed */ 6650 struct mm_struct *mm = vma->vm_mm; 6651 vm_fault_t ret; 6652 bool is_droppable; 6653 6654 __set_current_state(TASK_RUNNING); 6655 6656 ret = sanitize_fault_flags(vma, &flags); 6657 if (ret) 6658 goto out; 6659 6660 if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE, 6661 flags & FAULT_FLAG_INSTRUCTION, 6662 flags & FAULT_FLAG_REMOTE)) { 6663 ret = VM_FAULT_SIGSEGV; 6664 goto out; 6665 } 6666 6667 is_droppable = !!(vma->vm_flags & VM_DROPPABLE); 6668 6669 /* 6670 * Enable the memcg OOM handling for faults triggered in user 6671 * space. Kernel faults are handled more gracefully. 6672 */ 6673 if (flags & FAULT_FLAG_USER) 6674 mem_cgroup_enter_user_fault(); 6675 6676 lru_gen_enter_fault(vma); 6677 6678 if (unlikely(is_vm_hugetlb_page(vma))) 6679 ret = hugetlb_fault(vma->vm_mm, vma, address, flags); 6680 else 6681 ret = __handle_mm_fault(vma, address, flags); 6682 6683 /* 6684 * Warning: It is no longer safe to dereference vma-> after this point, 6685 * because mmap_lock might have been dropped by __handle_mm_fault(), so 6686 * vma might be destroyed from underneath us. 6687 */ 6688 6689 lru_gen_exit_fault(); 6690 6691 /* If the mapping is droppable, then errors due to OOM aren't fatal. */ 6692 if (is_droppable) 6693 ret &= ~VM_FAULT_OOM; 6694 6695 if (flags & FAULT_FLAG_USER) { 6696 mem_cgroup_exit_user_fault(); 6697 /* 6698 * The task may have entered a memcg OOM situation but 6699 * if the allocation error was handled gracefully (no 6700 * VM_FAULT_OOM), there is no need to kill anything. 6701 * Just clean up the OOM state peacefully. 6702 */ 6703 if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM)) 6704 mem_cgroup_oom_synchronize(false); 6705 } 6706 out: 6707 mm_account_fault(mm, regs, address, flags, ret); 6708 6709 return ret; 6710 } 6711 EXPORT_SYMBOL_GPL(handle_mm_fault); 6712 6713 #ifndef __PAGETABLE_P4D_FOLDED 6714 /* 6715 * Allocate p4d page table. 6716 * We've already handled the fast-path in-line. 6717 */ 6718 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) 6719 { 6720 p4d_t *new = p4d_alloc_one(mm, address); 6721 if (!new) 6722 return -ENOMEM; 6723 6724 spin_lock(&mm->page_table_lock); 6725 if (pgd_present(*pgd)) { /* Another has populated it */ 6726 p4d_free(mm, new); 6727 } else { 6728 smp_wmb(); /* See comment in pmd_install() */ 6729 pgd_populate(mm, pgd, new); 6730 } 6731 spin_unlock(&mm->page_table_lock); 6732 return 0; 6733 } 6734 #endif /* __PAGETABLE_P4D_FOLDED */ 6735 6736 #ifndef __PAGETABLE_PUD_FOLDED 6737 /* 6738 * Allocate page upper directory. 6739 * We've already handled the fast-path in-line. 6740 */ 6741 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address) 6742 { 6743 pud_t *new = pud_alloc_one(mm, address); 6744 if (!new) 6745 return -ENOMEM; 6746 6747 spin_lock(&mm->page_table_lock); 6748 if (!p4d_present(*p4d)) { 6749 mm_inc_nr_puds(mm); 6750 smp_wmb(); /* See comment in pmd_install() */ 6751 p4d_populate(mm, p4d, new); 6752 } else /* Another has populated it */ 6753 pud_free(mm, new); 6754 spin_unlock(&mm->page_table_lock); 6755 return 0; 6756 } 6757 #endif /* __PAGETABLE_PUD_FOLDED */ 6758 6759 #ifndef __PAGETABLE_PMD_FOLDED 6760 /* 6761 * Allocate page middle directory. 6762 * We've already handled the fast-path in-line. 6763 */ 6764 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 6765 { 6766 spinlock_t *ptl; 6767 pmd_t *new = pmd_alloc_one(mm, address); 6768 if (!new) 6769 return -ENOMEM; 6770 6771 ptl = pud_lock(mm, pud); 6772 if (!pud_present(*pud)) { 6773 mm_inc_nr_pmds(mm); 6774 smp_wmb(); /* See comment in pmd_install() */ 6775 pud_populate(mm, pud, new); 6776 } else { /* Another has populated it */ 6777 pmd_free(mm, new); 6778 } 6779 spin_unlock(ptl); 6780 return 0; 6781 } 6782 #endif /* __PAGETABLE_PMD_FOLDED */ 6783 6784 static inline void pfnmap_args_setup(struct follow_pfnmap_args *args, 6785 spinlock_t *lock, pte_t *ptep, 6786 pgprot_t pgprot, unsigned long pfn_base, 6787 unsigned long addr_mask, bool writable, 6788 bool special) 6789 { 6790 args->lock = lock; 6791 args->ptep = ptep; 6792 args->pfn = pfn_base + ((args->address & ~addr_mask) >> PAGE_SHIFT); 6793 args->addr_mask = addr_mask; 6794 args->pgprot = pgprot; 6795 args->writable = writable; 6796 args->special = special; 6797 } 6798 6799 static inline void pfnmap_lockdep_assert(struct vm_area_struct *vma) 6800 { 6801 #ifdef CONFIG_LOCKDEP 6802 struct file *file = vma->vm_file; 6803 struct address_space *mapping = file ? file->f_mapping : NULL; 6804 6805 if (mapping) 6806 lockdep_assert(lockdep_is_held(&mapping->i_mmap_rwsem) || 6807 lockdep_is_held(&vma->vm_mm->mmap_lock)); 6808 else 6809 lockdep_assert(lockdep_is_held(&vma->vm_mm->mmap_lock)); 6810 #endif 6811 } 6812 6813 /** 6814 * follow_pfnmap_start() - Look up a pfn mapping at a user virtual address 6815 * @args: Pointer to struct @follow_pfnmap_args 6816 * 6817 * The caller needs to setup args->vma and args->address to point to the 6818 * virtual address as the target of such lookup. On a successful return, 6819 * the results will be put into other output fields. 6820 * 6821 * After the caller finished using the fields, the caller must invoke 6822 * another follow_pfnmap_end() to proper releases the locks and resources 6823 * of such look up request. 6824 * 6825 * During the start() and end() calls, the results in @args will be valid 6826 * as proper locks will be held. After the end() is called, all the fields 6827 * in @follow_pfnmap_args will be invalid to be further accessed. Further 6828 * use of such information after end() may require proper synchronizations 6829 * by the caller with page table updates, otherwise it can create a 6830 * security bug. 6831 * 6832 * If the PTE maps a refcounted page, callers are responsible to protect 6833 * against invalidation with MMU notifiers; otherwise access to the PFN at 6834 * a later point in time can trigger use-after-free. 6835 * 6836 * Only IO mappings and raw PFN mappings are allowed. The mmap semaphore 6837 * should be taken for read, and the mmap semaphore cannot be released 6838 * before the end() is invoked. 6839 * 6840 * This function must not be used to modify PTE content. 6841 * 6842 * Return: zero on success, negative otherwise. 6843 */ 6844 int follow_pfnmap_start(struct follow_pfnmap_args *args) 6845 { 6846 struct vm_area_struct *vma = args->vma; 6847 unsigned long address = args->address; 6848 struct mm_struct *mm = vma->vm_mm; 6849 spinlock_t *lock; 6850 pgd_t *pgdp; 6851 p4d_t *p4dp, p4d; 6852 pud_t *pudp, pud; 6853 pmd_t *pmdp, pmd; 6854 pte_t *ptep, pte; 6855 6856 pfnmap_lockdep_assert(vma); 6857 6858 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) 6859 goto out; 6860 6861 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) 6862 goto out; 6863 retry: 6864 pgdp = pgd_offset(mm, address); 6865 if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp))) 6866 goto out; 6867 6868 p4dp = p4d_offset(pgdp, address); 6869 p4d = p4dp_get(p4dp); 6870 if (p4d_none(p4d) || unlikely(p4d_bad(p4d))) 6871 goto out; 6872 6873 pudp = pud_offset(p4dp, address); 6874 pud = pudp_get(pudp); 6875 if (!pud_present(pud)) 6876 goto out; 6877 if (pud_leaf(pud)) { 6878 lock = pud_lock(mm, pudp); 6879 pud = pudp_get(pudp); 6880 6881 if (unlikely(!pud_present(pud))) { 6882 spin_unlock(lock); 6883 goto out; 6884 } else if (unlikely(!pud_leaf(pud))) { 6885 spin_unlock(lock); 6886 goto retry; 6887 } 6888 pfnmap_args_setup(args, lock, NULL, pud_pgprot(pud), 6889 pud_pfn(pud), PUD_MASK, pud_write(pud), 6890 pud_special(pud)); 6891 return 0; 6892 } 6893 6894 pmdp = pmd_offset(pudp, address); 6895 pmd = pmdp_get_lockless(pmdp); 6896 if (!pmd_present(pmd)) 6897 goto out; 6898 if (pmd_leaf(pmd)) { 6899 lock = pmd_lock(mm, pmdp); 6900 pmd = pmdp_get(pmdp); 6901 6902 if (unlikely(!pmd_present(pmd))) { 6903 spin_unlock(lock); 6904 goto out; 6905 } else if (unlikely(!pmd_leaf(pmd))) { 6906 spin_unlock(lock); 6907 goto retry; 6908 } 6909 pfnmap_args_setup(args, lock, NULL, pmd_pgprot(pmd), 6910 pmd_pfn(pmd), PMD_MASK, pmd_write(pmd), 6911 pmd_special(pmd)); 6912 return 0; 6913 } 6914 6915 ptep = pte_offset_map_lock(mm, pmdp, address, &lock); 6916 if (!ptep) 6917 goto out; 6918 pte = ptep_get(ptep); 6919 if (!pte_present(pte)) 6920 goto unlock; 6921 pfnmap_args_setup(args, lock, ptep, pte_pgprot(pte), 6922 pte_pfn(pte), PAGE_MASK, pte_write(pte), 6923 pte_special(pte)); 6924 return 0; 6925 unlock: 6926 pte_unmap_unlock(ptep, lock); 6927 out: 6928 return -EINVAL; 6929 } 6930 EXPORT_SYMBOL_GPL(follow_pfnmap_start); 6931 6932 /** 6933 * follow_pfnmap_end(): End a follow_pfnmap_start() process 6934 * @args: Pointer to struct @follow_pfnmap_args 6935 * 6936 * Must be used in pair of follow_pfnmap_start(). See the start() function 6937 * above for more information. 6938 */ 6939 void follow_pfnmap_end(struct follow_pfnmap_args *args) 6940 { 6941 if (args->lock) 6942 spin_unlock(args->lock); 6943 if (args->ptep) 6944 pte_unmap(args->ptep); 6945 } 6946 EXPORT_SYMBOL_GPL(follow_pfnmap_end); 6947 6948 #ifdef CONFIG_HAVE_IOREMAP_PROT 6949 /** 6950 * generic_access_phys - generic implementation for iomem mmap access 6951 * @vma: the vma to access 6952 * @addr: userspace address, not relative offset within @vma 6953 * @buf: buffer to read/write 6954 * @len: length of transfer 6955 * @write: set to FOLL_WRITE when writing, otherwise reading 6956 * 6957 * This is a generic implementation for &vm_operations_struct.access for an 6958 * iomem mapping. This callback is used by access_process_vm() when the @vma is 6959 * not page based. 6960 */ 6961 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, 6962 void *buf, int len, int write) 6963 { 6964 resource_size_t phys_addr; 6965 pgprot_t prot = __pgprot(0); 6966 void __iomem *maddr; 6967 int offset = offset_in_page(addr); 6968 int ret = -EINVAL; 6969 bool writable; 6970 struct follow_pfnmap_args args = { .vma = vma, .address = addr }; 6971 6972 retry: 6973 if (follow_pfnmap_start(&args)) 6974 return -EINVAL; 6975 prot = args.pgprot; 6976 phys_addr = (resource_size_t)args.pfn << PAGE_SHIFT; 6977 writable = args.writable; 6978 follow_pfnmap_end(&args); 6979 6980 if ((write & FOLL_WRITE) && !writable) 6981 return -EINVAL; 6982 6983 maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot); 6984 if (!maddr) 6985 return -ENOMEM; 6986 6987 if (follow_pfnmap_start(&args)) 6988 goto out_unmap; 6989 6990 if ((pgprot_val(prot) != pgprot_val(args.pgprot)) || 6991 (phys_addr != (args.pfn << PAGE_SHIFT)) || 6992 (writable != args.writable)) { 6993 follow_pfnmap_end(&args); 6994 iounmap(maddr); 6995 goto retry; 6996 } 6997 6998 if (write) 6999 memcpy_toio(maddr + offset, buf, len); 7000 else 7001 memcpy_fromio(buf, maddr + offset, len); 7002 ret = len; 7003 follow_pfnmap_end(&args); 7004 out_unmap: 7005 iounmap(maddr); 7006 7007 return ret; 7008 } 7009 EXPORT_SYMBOL_GPL(generic_access_phys); 7010 #endif 7011 7012 /* 7013 * Access another process' address space as given in mm. 7014 */ 7015 static int __access_remote_vm(struct mm_struct *mm, unsigned long addr, 7016 void *buf, int len, unsigned int gup_flags) 7017 { 7018 void *old_buf = buf; 7019 int write = gup_flags & FOLL_WRITE; 7020 7021 if (mmap_read_lock_killable(mm)) 7022 return 0; 7023 7024 /* Untag the address before looking up the VMA */ 7025 addr = untagged_addr_remote(mm, addr); 7026 7027 /* Avoid triggering the temporary warning in __get_user_pages */ 7028 if (!vma_lookup(mm, addr) && !expand_stack(mm, addr)) 7029 return 0; 7030 7031 /* ignore errors, just check how much was successfully transferred */ 7032 while (len) { 7033 int bytes, offset; 7034 void *maddr; 7035 struct folio *folio; 7036 struct vm_area_struct *vma = NULL; 7037 struct page *page = get_user_page_vma_remote(mm, addr, 7038 gup_flags, &vma); 7039 7040 if (IS_ERR(page)) { 7041 /* We might need to expand the stack to access it */ 7042 vma = vma_lookup(mm, addr); 7043 if (!vma) { 7044 vma = expand_stack(mm, addr); 7045 7046 /* mmap_lock was dropped on failure */ 7047 if (!vma) 7048 return buf - old_buf; 7049 7050 /* Try again if stack expansion worked */ 7051 continue; 7052 } 7053 7054 /* 7055 * Check if this is a VM_IO | VM_PFNMAP VMA, which 7056 * we can access using slightly different code. 7057 */ 7058 bytes = 0; 7059 #ifdef CONFIG_HAVE_IOREMAP_PROT 7060 if (vma->vm_ops && vma->vm_ops->access) 7061 bytes = vma->vm_ops->access(vma, addr, buf, 7062 len, write); 7063 #endif 7064 if (bytes <= 0) 7065 break; 7066 } else { 7067 folio = page_folio(page); 7068 bytes = len; 7069 offset = addr & (PAGE_SIZE-1); 7070 if (bytes > PAGE_SIZE-offset) 7071 bytes = PAGE_SIZE-offset; 7072 7073 maddr = kmap_local_folio(folio, folio_page_idx(folio, page) * PAGE_SIZE); 7074 if (write) { 7075 copy_to_user_page(vma, page, addr, 7076 maddr + offset, buf, bytes); 7077 folio_mark_dirty_lock(folio); 7078 } else { 7079 copy_from_user_page(vma, page, addr, 7080 buf, maddr + offset, bytes); 7081 } 7082 folio_release_kmap(folio, maddr); 7083 } 7084 len -= bytes; 7085 buf += bytes; 7086 addr += bytes; 7087 } 7088 mmap_read_unlock(mm); 7089 7090 return buf - old_buf; 7091 } 7092 7093 /** 7094 * access_remote_vm - access another process' address space 7095 * @mm: the mm_struct of the target address space 7096 * @addr: start address to access 7097 * @buf: source or destination buffer 7098 * @len: number of bytes to transfer 7099 * @gup_flags: flags modifying lookup behaviour 7100 * 7101 * The caller must hold a reference on @mm. 7102 * 7103 * Return: number of bytes copied from source to destination. 7104 */ 7105 int access_remote_vm(struct mm_struct *mm, unsigned long addr, 7106 void *buf, int len, unsigned int gup_flags) 7107 { 7108 return __access_remote_vm(mm, addr, buf, len, gup_flags); 7109 } 7110 7111 /* 7112 * Access another process' address space. 7113 * Source/target buffer must be kernel space, 7114 * Do not walk the page table directly, use get_user_pages 7115 */ 7116 int access_process_vm(struct task_struct *tsk, unsigned long addr, 7117 void *buf, int len, unsigned int gup_flags) 7118 { 7119 struct mm_struct *mm; 7120 int ret; 7121 7122 mm = get_task_mm(tsk); 7123 if (!mm) 7124 return 0; 7125 7126 ret = __access_remote_vm(mm, addr, buf, len, gup_flags); 7127 7128 mmput(mm); 7129 7130 return ret; 7131 } 7132 EXPORT_SYMBOL_GPL(access_process_vm); 7133 7134 #ifdef CONFIG_BPF_SYSCALL 7135 /* 7136 * Copy a string from another process's address space as given in mm. 7137 * If there is any error return -EFAULT. 7138 */ 7139 static int __copy_remote_vm_str(struct mm_struct *mm, unsigned long addr, 7140 void *buf, int len, unsigned int gup_flags) 7141 { 7142 void *old_buf = buf; 7143 int err = 0; 7144 7145 *(char *)buf = '\0'; 7146 7147 if (mmap_read_lock_killable(mm)) 7148 return -EFAULT; 7149 7150 addr = untagged_addr_remote(mm, addr); 7151 7152 /* Avoid triggering the temporary warning in __get_user_pages */ 7153 if (!vma_lookup(mm, addr)) { 7154 err = -EFAULT; 7155 goto out; 7156 } 7157 7158 while (len) { 7159 int bytes, offset, retval; 7160 void *maddr; 7161 struct folio *folio; 7162 struct page *page; 7163 struct vm_area_struct *vma = NULL; 7164 7165 page = get_user_page_vma_remote(mm, addr, gup_flags, &vma); 7166 if (IS_ERR(page)) { 7167 /* 7168 * Treat as a total failure for now until we decide how 7169 * to handle the CONFIG_HAVE_IOREMAP_PROT case and 7170 * stack expansion. 7171 */ 7172 *(char *)buf = '\0'; 7173 err = -EFAULT; 7174 goto out; 7175 } 7176 7177 folio = page_folio(page); 7178 bytes = len; 7179 offset = addr & (PAGE_SIZE - 1); 7180 if (bytes > PAGE_SIZE - offset) 7181 bytes = PAGE_SIZE - offset; 7182 7183 maddr = kmap_local_folio(folio, folio_page_idx(folio, page) * PAGE_SIZE); 7184 retval = strscpy(buf, maddr + offset, bytes); 7185 if (retval >= 0) { 7186 /* Found the end of the string */ 7187 buf += retval; 7188 folio_release_kmap(folio, maddr); 7189 break; 7190 } 7191 7192 buf += bytes - 1; 7193 /* 7194 * Because strscpy always NUL terminates we need to 7195 * copy the last byte in the page if we are going to 7196 * load more pages 7197 */ 7198 if (bytes != len) { 7199 addr += bytes - 1; 7200 copy_from_user_page(vma, page, addr, buf, maddr + (PAGE_SIZE - 1), 1); 7201 buf += 1; 7202 addr += 1; 7203 } 7204 len -= bytes; 7205 7206 folio_release_kmap(folio, maddr); 7207 } 7208 7209 out: 7210 mmap_read_unlock(mm); 7211 if (err) 7212 return err; 7213 return buf - old_buf; 7214 } 7215 7216 /** 7217 * copy_remote_vm_str - copy a string from another process's address space. 7218 * @tsk: the task of the target address space 7219 * @addr: start address to read from 7220 * @buf: destination buffer 7221 * @len: number of bytes to copy 7222 * @gup_flags: flags modifying lookup behaviour 7223 * 7224 * The caller must hold a reference on @mm. 7225 * 7226 * Return: number of bytes copied from @addr (source) to @buf (destination); 7227 * not including the trailing NUL. Always guaranteed to leave NUL-terminated 7228 * buffer. On any error, return -EFAULT. 7229 */ 7230 int copy_remote_vm_str(struct task_struct *tsk, unsigned long addr, 7231 void *buf, int len, unsigned int gup_flags) 7232 { 7233 struct mm_struct *mm; 7234 int ret; 7235 7236 if (unlikely(len == 0)) 7237 return 0; 7238 7239 mm = get_task_mm(tsk); 7240 if (!mm) { 7241 *(char *)buf = '\0'; 7242 return -EFAULT; 7243 } 7244 7245 ret = __copy_remote_vm_str(mm, addr, buf, len, gup_flags); 7246 7247 mmput(mm); 7248 7249 return ret; 7250 } 7251 EXPORT_SYMBOL_GPL(copy_remote_vm_str); 7252 #endif /* CONFIG_BPF_SYSCALL */ 7253 7254 /* 7255 * Print the name of a VMA. 7256 */ 7257 void print_vma_addr(char *prefix, unsigned long ip) 7258 { 7259 struct mm_struct *mm = current->mm; 7260 struct vm_area_struct *vma; 7261 7262 /* 7263 * we might be running from an atomic context so we cannot sleep 7264 */ 7265 if (!mmap_read_trylock(mm)) 7266 return; 7267 7268 vma = vma_lookup(mm, ip); 7269 if (vma && vma->vm_file) { 7270 struct file *f = vma->vm_file; 7271 ip -= vma->vm_start; 7272 ip += vma->vm_pgoff << PAGE_SHIFT; 7273 printk("%s%pD[%lx,%lx+%lx]", prefix, f, ip, 7274 vma->vm_start, 7275 vma->vm_end - vma->vm_start); 7276 } 7277 mmap_read_unlock(mm); 7278 } 7279 7280 #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP) 7281 void __might_fault(const char *file, int line) 7282 { 7283 if (pagefault_disabled()) 7284 return; 7285 __might_sleep(file, line); 7286 if (current->mm) 7287 might_lock_read(¤t->mm->mmap_lock); 7288 } 7289 EXPORT_SYMBOL(__might_fault); 7290 #endif 7291 7292 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) 7293 /* 7294 * Process all subpages of the specified huge page with the specified 7295 * operation. The target subpage will be processed last to keep its 7296 * cache lines hot. 7297 */ 7298 static inline int process_huge_page( 7299 unsigned long addr_hint, unsigned int nr_pages, 7300 int (*process_subpage)(unsigned long addr, int idx, void *arg), 7301 void *arg) 7302 { 7303 int i, n, base, l, ret; 7304 unsigned long addr = addr_hint & 7305 ~(((unsigned long)nr_pages << PAGE_SHIFT) - 1); 7306 7307 /* Process target subpage last to keep its cache lines hot */ 7308 might_sleep(); 7309 n = (addr_hint - addr) / PAGE_SIZE; 7310 if (2 * n <= nr_pages) { 7311 /* If target subpage in first half of huge page */ 7312 base = 0; 7313 l = n; 7314 /* Process subpages at the end of huge page */ 7315 for (i = nr_pages - 1; i >= 2 * n; i--) { 7316 cond_resched(); 7317 ret = process_subpage(addr + i * PAGE_SIZE, i, arg); 7318 if (ret) 7319 return ret; 7320 } 7321 } else { 7322 /* If target subpage in second half of huge page */ 7323 base = nr_pages - 2 * (nr_pages - n); 7324 l = nr_pages - n; 7325 /* Process subpages at the begin of huge page */ 7326 for (i = 0; i < base; i++) { 7327 cond_resched(); 7328 ret = process_subpage(addr + i * PAGE_SIZE, i, arg); 7329 if (ret) 7330 return ret; 7331 } 7332 } 7333 /* 7334 * Process remaining subpages in left-right-left-right pattern 7335 * towards the target subpage 7336 */ 7337 for (i = 0; i < l; i++) { 7338 int left_idx = base + i; 7339 int right_idx = base + 2 * l - 1 - i; 7340 7341 cond_resched(); 7342 ret = process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg); 7343 if (ret) 7344 return ret; 7345 cond_resched(); 7346 ret = process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg); 7347 if (ret) 7348 return ret; 7349 } 7350 return 0; 7351 } 7352 7353 static void clear_contig_highpages(struct page *page, unsigned long addr, 7354 unsigned int nr_pages) 7355 { 7356 unsigned int i, count; 7357 /* 7358 * When clearing we want to operate on the largest extent possible to 7359 * allow for architecture specific extent based optimizations. 7360 * 7361 * However, since clear_user_highpages() (and primitives clear_user_pages(), 7362 * clear_pages()), do not call cond_resched(), limit the unit size when 7363 * running under non-preemptible scheduling models. 7364 */ 7365 const unsigned int unit = preempt_model_preemptible() ? 7366 nr_pages : PROCESS_PAGES_NON_PREEMPT_BATCH; 7367 7368 might_sleep(); 7369 7370 for (i = 0; i < nr_pages; i += count) { 7371 cond_resched(); 7372 7373 count = min(unit, nr_pages - i); 7374 clear_user_highpages(page + i, addr + i * PAGE_SIZE, count); 7375 } 7376 } 7377 7378 /* 7379 * When zeroing a folio, we want to differentiate between pages in the 7380 * vicinity of the faulting address where we have spatial and temporal 7381 * locality, and those far away where we don't. 7382 * 7383 * Use a radius of 2 for determining the local neighbourhood. 7384 */ 7385 #define FOLIO_ZERO_LOCALITY_RADIUS 2 7386 7387 /** 7388 * folio_zero_user - Zero a folio which will be mapped to userspace. 7389 * @folio: The folio to zero. 7390 * @addr_hint: The address accessed by the user or the base address. 7391 */ 7392 void folio_zero_user(struct folio *folio, unsigned long addr_hint) 7393 { 7394 const unsigned long base_addr = ALIGN_DOWN(addr_hint, folio_size(folio)); 7395 const long fault_idx = (addr_hint - base_addr) / PAGE_SIZE; 7396 const struct range pg = DEFINE_RANGE(0, folio_nr_pages(folio) - 1); 7397 const long radius = FOLIO_ZERO_LOCALITY_RADIUS; 7398 struct range r[3]; 7399 int i; 7400 7401 /* 7402 * Faulting page and its immediate neighbourhood. Will be cleared at the 7403 * end to keep its cachelines hot. 7404 */ 7405 r[2] = DEFINE_RANGE(fault_idx - radius < (long)pg.start ? pg.start : fault_idx - radius, 7406 fault_idx + radius > (long)pg.end ? pg.end : fault_idx + radius); 7407 7408 7409 /* Region to the left of the fault */ 7410 r[1] = DEFINE_RANGE(pg.start, r[2].start - 1); 7411 7412 /* Region to the right of the fault: always valid for the common fault_idx=0 case. */ 7413 r[0] = DEFINE_RANGE(r[2].end + 1, pg.end); 7414 7415 for (i = 0; i < ARRAY_SIZE(r); i++) { 7416 const unsigned long addr = base_addr + r[i].start * PAGE_SIZE; 7417 const long nr_pages = (long)range_len(&r[i]); 7418 struct page *page = folio_page(folio, r[i].start); 7419 7420 if (nr_pages > 0) 7421 clear_contig_highpages(page, addr, nr_pages); 7422 } 7423 } 7424 7425 static int copy_user_gigantic_page(struct folio *dst, struct folio *src, 7426 unsigned long addr_hint, 7427 struct vm_area_struct *vma, 7428 unsigned int nr_pages) 7429 { 7430 unsigned long addr = ALIGN_DOWN(addr_hint, folio_size(dst)); 7431 struct page *dst_page; 7432 struct page *src_page; 7433 int i; 7434 7435 for (i = 0; i < nr_pages; i++) { 7436 dst_page = folio_page(dst, i); 7437 src_page = folio_page(src, i); 7438 7439 cond_resched(); 7440 if (copy_mc_user_highpage(dst_page, src_page, 7441 addr + i*PAGE_SIZE, vma)) 7442 return -EHWPOISON; 7443 } 7444 return 0; 7445 } 7446 7447 struct copy_subpage_arg { 7448 struct folio *dst; 7449 struct folio *src; 7450 struct vm_area_struct *vma; 7451 }; 7452 7453 static int copy_subpage(unsigned long addr, int idx, void *arg) 7454 { 7455 struct copy_subpage_arg *copy_arg = arg; 7456 struct page *dst = folio_page(copy_arg->dst, idx); 7457 struct page *src = folio_page(copy_arg->src, idx); 7458 7459 if (copy_mc_user_highpage(dst, src, addr, copy_arg->vma)) 7460 return -EHWPOISON; 7461 return 0; 7462 } 7463 7464 int copy_user_large_folio(struct folio *dst, struct folio *src, 7465 unsigned long addr_hint, struct vm_area_struct *vma) 7466 { 7467 unsigned int nr_pages = folio_nr_pages(dst); 7468 struct copy_subpage_arg arg = { 7469 .dst = dst, 7470 .src = src, 7471 .vma = vma, 7472 }; 7473 7474 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) 7475 return copy_user_gigantic_page(dst, src, addr_hint, vma, nr_pages); 7476 7477 return process_huge_page(addr_hint, nr_pages, copy_subpage, &arg); 7478 } 7479 7480 long copy_folio_from_user(struct folio *dst_folio, 7481 const void __user *usr_src, 7482 bool allow_pagefault) 7483 { 7484 void *kaddr; 7485 unsigned long i, rc = 0; 7486 unsigned int nr_pages = folio_nr_pages(dst_folio); 7487 unsigned long ret_val = nr_pages * PAGE_SIZE; 7488 struct page *subpage; 7489 7490 for (i = 0; i < nr_pages; i++) { 7491 subpage = folio_page(dst_folio, i); 7492 kaddr = kmap_local_page(subpage); 7493 if (!allow_pagefault) 7494 pagefault_disable(); 7495 rc = copy_from_user(kaddr, usr_src + i * PAGE_SIZE, PAGE_SIZE); 7496 if (!allow_pagefault) 7497 pagefault_enable(); 7498 kunmap_local(kaddr); 7499 7500 ret_val -= (PAGE_SIZE - rc); 7501 if (rc) 7502 break; 7503 7504 flush_dcache_page(subpage); 7505 7506 cond_resched(); 7507 } 7508 return ret_val; 7509 } 7510 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ 7511 7512 #if defined(CONFIG_SPLIT_PTE_PTLOCKS) && ALLOC_SPLIT_PTLOCKS 7513 7514 static struct kmem_cache *page_ptl_cachep; 7515 7516 void __init ptlock_cache_init(void) 7517 { 7518 page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0, 7519 SLAB_PANIC, NULL); 7520 } 7521 7522 bool ptlock_alloc(struct ptdesc *ptdesc) 7523 { 7524 spinlock_t *ptl; 7525 7526 ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL); 7527 if (!ptl) 7528 return false; 7529 ptdesc->ptl = ptl; 7530 return true; 7531 } 7532 7533 void ptlock_free(struct ptdesc *ptdesc) 7534 { 7535 if (ptdesc->ptl) 7536 kmem_cache_free(page_ptl_cachep, ptdesc->ptl); 7537 } 7538 #endif 7539 7540 void vma_pgtable_walk_begin(struct vm_area_struct *vma) 7541 { 7542 if (is_vm_hugetlb_page(vma)) 7543 hugetlb_vma_lock_read(vma); 7544 } 7545 7546 void vma_pgtable_walk_end(struct vm_area_struct *vma) 7547 { 7548 if (is_vm_hugetlb_page(vma)) 7549 hugetlb_vma_unlock_read(vma); 7550 } 7551