1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/mm/memory.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 */
7
8 /*
9 * demand-loading started 01.12.91 - seems it is high on the list of
10 * things wanted, and it should be easy to implement. - Linus
11 */
12
13 /*
14 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
15 * pages started 02.12.91, seems to work. - Linus.
16 *
17 * Tested sharing by executing about 30 /bin/sh: under the old kernel it
18 * would have taken more than the 6M I have free, but it worked well as
19 * far as I could see.
20 *
21 * Also corrected some "invalidate()"s - I wasn't doing enough of them.
22 */
23
24 /*
25 * Real VM (paging to/from disk) started 18.12.91. Much more work and
26 * thought has to go into this. Oh, well..
27 * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
28 * Found it. Everything seems to work now.
29 * 20.12.91 - Ok, making the swap-device changeable like the root.
30 */
31
32 /*
33 * 05.04.94 - Multi-page memory management added for v1.1.
34 * Idea by Alex Bligh (alex@cconcepts.co.uk)
35 *
36 * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
37 * (Gerhard.Wichert@pdb.siemens.de)
38 *
39 * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
40 */
41
42 #include <linux/kernel_stat.h>
43 #include <linux/mm.h>
44 #include <linux/mm_inline.h>
45 #include <linux/sched/mm.h>
46 #include <linux/sched/numa_balancing.h>
47 #include <linux/sched/task.h>
48 #include <linux/hugetlb.h>
49 #include <linux/mman.h>
50 #include <linux/swap.h>
51 #include <linux/highmem.h>
52 #include <linux/pagemap.h>
53 #include <linux/memremap.h>
54 #include <linux/kmsan.h>
55 #include <linux/ksm.h>
56 #include <linux/rmap.h>
57 #include <linux/export.h>
58 #include <linux/delayacct.h>
59 #include <linux/init.h>
60 #include <linux/pfn_t.h>
61 #include <linux/writeback.h>
62 #include <linux/memcontrol.h>
63 #include <linux/mmu_notifier.h>
64 #include <linux/swapops.h>
65 #include <linux/elf.h>
66 #include <linux/gfp.h>
67 #include <linux/migrate.h>
68 #include <linux/string.h>
69 #include <linux/memory-tiers.h>
70 #include <linux/debugfs.h>
71 #include <linux/userfaultfd_k.h>
72 #include <linux/dax.h>
73 #include <linux/oom.h>
74 #include <linux/numa.h>
75 #include <linux/perf_event.h>
76 #include <linux/ptrace.h>
77 #include <linux/vmalloc.h>
78 #include <linux/sched/sysctl.h>
79 #include <linux/fsnotify.h>
80
81 #include <trace/events/kmem.h>
82
83 #include <asm/io.h>
84 #include <asm/mmu_context.h>
85 #include <asm/pgalloc.h>
86 #include <linux/uaccess.h>
87 #include <asm/tlb.h>
88 #include <asm/tlbflush.h>
89
90 #include "pgalloc-track.h"
91 #include "internal.h"
92 #include "swap.h"
93
94 #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
95 #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
96 #endif
97
98 #ifndef CONFIG_NUMA
99 unsigned long max_mapnr;
100 EXPORT_SYMBOL(max_mapnr);
101
102 struct page *mem_map;
103 EXPORT_SYMBOL(mem_map);
104 #endif
105
106 static vm_fault_t do_fault(struct vm_fault *vmf);
107 static vm_fault_t do_anonymous_page(struct vm_fault *vmf);
108 static bool vmf_pte_changed(struct vm_fault *vmf);
109
110 /*
111 * Return true if the original pte was a uffd-wp pte marker (so the pte was
112 * wr-protected).
113 */
vmf_orig_pte_uffd_wp(struct vm_fault * vmf)114 static __always_inline bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf)
115 {
116 if (!userfaultfd_wp(vmf->vma))
117 return false;
118 if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID))
119 return false;
120
121 return pte_marker_uffd_wp(vmf->orig_pte);
122 }
123
124 /*
125 * A number of key systems in x86 including ioremap() rely on the assumption
126 * that high_memory defines the upper bound on direct map memory, then end
127 * of ZONE_NORMAL.
128 */
129 void *high_memory;
130 EXPORT_SYMBOL(high_memory);
131
132 /*
133 * Randomize the address space (stacks, mmaps, brk, etc.).
134 *
135 * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
136 * as ancient (libc5 based) binaries can segfault. )
137 */
138 int randomize_va_space __read_mostly =
139 #ifdef CONFIG_COMPAT_BRK
140 1;
141 #else
142 2;
143 #endif
144
145 #ifndef arch_wants_old_prefaulted_pte
arch_wants_old_prefaulted_pte(void)146 static inline bool arch_wants_old_prefaulted_pte(void)
147 {
148 /*
149 * Transitioning a PTE from 'old' to 'young' can be expensive on
150 * some architectures, even if it's performed in hardware. By
151 * default, "false" means prefaulted entries will be 'young'.
152 */
153 return false;
154 }
155 #endif
156
disable_randmaps(char * s)157 static int __init disable_randmaps(char *s)
158 {
159 randomize_va_space = 0;
160 return 1;
161 }
162 __setup("norandmaps", disable_randmaps);
163
164 unsigned long zero_pfn __read_mostly;
165 EXPORT_SYMBOL(zero_pfn);
166
167 unsigned long highest_memmap_pfn __read_mostly;
168
169 /*
170 * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
171 */
init_zero_pfn(void)172 static int __init init_zero_pfn(void)
173 {
174 zero_pfn = page_to_pfn(ZERO_PAGE(0));
175 return 0;
176 }
177 early_initcall(init_zero_pfn);
178
mm_trace_rss_stat(struct mm_struct * mm,int member)179 void mm_trace_rss_stat(struct mm_struct *mm, int member)
180 {
181 trace_rss_stat(mm, member);
182 }
183
184 /*
185 * Note: this doesn't free the actual pages themselves. That
186 * has been handled earlier when unmapping all the memory regions.
187 */
free_pte_range(struct mmu_gather * tlb,pmd_t * pmd,unsigned long addr)188 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
189 unsigned long addr)
190 {
191 pgtable_t token = pmd_pgtable(*pmd);
192 pmd_clear(pmd);
193 pte_free_tlb(tlb, token, addr);
194 mm_dec_nr_ptes(tlb->mm);
195 }
196
free_pmd_range(struct mmu_gather * tlb,pud_t * pud,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)197 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
198 unsigned long addr, unsigned long end,
199 unsigned long floor, unsigned long ceiling)
200 {
201 pmd_t *pmd;
202 unsigned long next;
203 unsigned long start;
204
205 start = addr;
206 pmd = pmd_offset(pud, addr);
207 do {
208 next = pmd_addr_end(addr, end);
209 if (pmd_none_or_clear_bad(pmd))
210 continue;
211 free_pte_range(tlb, pmd, addr);
212 } while (pmd++, addr = next, addr != end);
213
214 start &= PUD_MASK;
215 if (start < floor)
216 return;
217 if (ceiling) {
218 ceiling &= PUD_MASK;
219 if (!ceiling)
220 return;
221 }
222 if (end - 1 > ceiling - 1)
223 return;
224
225 pmd = pmd_offset(pud, start);
226 pud_clear(pud);
227 pmd_free_tlb(tlb, pmd, start);
228 mm_dec_nr_pmds(tlb->mm);
229 }
230
free_pud_range(struct mmu_gather * tlb,p4d_t * p4d,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)231 static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
232 unsigned long addr, unsigned long end,
233 unsigned long floor, unsigned long ceiling)
234 {
235 pud_t *pud;
236 unsigned long next;
237 unsigned long start;
238
239 start = addr;
240 pud = pud_offset(p4d, addr);
241 do {
242 next = pud_addr_end(addr, end);
243 if (pud_none_or_clear_bad(pud))
244 continue;
245 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
246 } while (pud++, addr = next, addr != end);
247
248 start &= P4D_MASK;
249 if (start < floor)
250 return;
251 if (ceiling) {
252 ceiling &= P4D_MASK;
253 if (!ceiling)
254 return;
255 }
256 if (end - 1 > ceiling - 1)
257 return;
258
259 pud = pud_offset(p4d, start);
260 p4d_clear(p4d);
261 pud_free_tlb(tlb, pud, start);
262 mm_dec_nr_puds(tlb->mm);
263 }
264
free_p4d_range(struct mmu_gather * tlb,pgd_t * pgd,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)265 static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
266 unsigned long addr, unsigned long end,
267 unsigned long floor, unsigned long ceiling)
268 {
269 p4d_t *p4d;
270 unsigned long next;
271 unsigned long start;
272
273 start = addr;
274 p4d = p4d_offset(pgd, addr);
275 do {
276 next = p4d_addr_end(addr, end);
277 if (p4d_none_or_clear_bad(p4d))
278 continue;
279 free_pud_range(tlb, p4d, addr, next, floor, ceiling);
280 } while (p4d++, addr = next, addr != end);
281
282 start &= PGDIR_MASK;
283 if (start < floor)
284 return;
285 if (ceiling) {
286 ceiling &= PGDIR_MASK;
287 if (!ceiling)
288 return;
289 }
290 if (end - 1 > ceiling - 1)
291 return;
292
293 p4d = p4d_offset(pgd, start);
294 pgd_clear(pgd);
295 p4d_free_tlb(tlb, p4d, start);
296 }
297
298 /*
299 * This function frees user-level page tables of a process.
300 */
free_pgd_range(struct mmu_gather * tlb,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)301 void free_pgd_range(struct mmu_gather *tlb,
302 unsigned long addr, unsigned long end,
303 unsigned long floor, unsigned long ceiling)
304 {
305 pgd_t *pgd;
306 unsigned long next;
307
308 /*
309 * The next few lines have given us lots of grief...
310 *
311 * Why are we testing PMD* at this top level? Because often
312 * there will be no work to do at all, and we'd prefer not to
313 * go all the way down to the bottom just to discover that.
314 *
315 * Why all these "- 1"s? Because 0 represents both the bottom
316 * of the address space and the top of it (using -1 for the
317 * top wouldn't help much: the masks would do the wrong thing).
318 * The rule is that addr 0 and floor 0 refer to the bottom of
319 * the address space, but end 0 and ceiling 0 refer to the top
320 * Comparisons need to use "end - 1" and "ceiling - 1" (though
321 * that end 0 case should be mythical).
322 *
323 * Wherever addr is brought up or ceiling brought down, we must
324 * be careful to reject "the opposite 0" before it confuses the
325 * subsequent tests. But what about where end is brought down
326 * by PMD_SIZE below? no, end can't go down to 0 there.
327 *
328 * Whereas we round start (addr) and ceiling down, by different
329 * masks at different levels, in order to test whether a table
330 * now has no other vmas using it, so can be freed, we don't
331 * bother to round floor or end up - the tests don't need that.
332 */
333
334 addr &= PMD_MASK;
335 if (addr < floor) {
336 addr += PMD_SIZE;
337 if (!addr)
338 return;
339 }
340 if (ceiling) {
341 ceiling &= PMD_MASK;
342 if (!ceiling)
343 return;
344 }
345 if (end - 1 > ceiling - 1)
346 end -= PMD_SIZE;
347 if (addr > end - 1)
348 return;
349 /*
350 * We add page table cache pages with PAGE_SIZE,
351 * (see pte_free_tlb()), flush the tlb if we need
352 */
353 tlb_change_page_size(tlb, PAGE_SIZE);
354 pgd = pgd_offset(tlb->mm, addr);
355 do {
356 next = pgd_addr_end(addr, end);
357 if (pgd_none_or_clear_bad(pgd))
358 continue;
359 free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
360 } while (pgd++, addr = next, addr != end);
361 }
362
free_pgtables(struct mmu_gather * tlb,struct ma_state * mas,struct vm_area_struct * vma,unsigned long floor,unsigned long ceiling,bool mm_wr_locked)363 void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
364 struct vm_area_struct *vma, unsigned long floor,
365 unsigned long ceiling, bool mm_wr_locked)
366 {
367 struct unlink_vma_file_batch vb;
368
369 do {
370 unsigned long addr = vma->vm_start;
371 struct vm_area_struct *next;
372
373 /*
374 * Note: USER_PGTABLES_CEILING may be passed as ceiling and may
375 * be 0. This will underflow and is okay.
376 */
377 next = mas_find(mas, ceiling - 1);
378 if (unlikely(xa_is_zero(next)))
379 next = NULL;
380
381 /*
382 * Hide vma from rmap and truncate_pagecache before freeing
383 * pgtables
384 */
385 if (mm_wr_locked)
386 vma_start_write(vma);
387 unlink_anon_vmas(vma);
388
389 if (is_vm_hugetlb_page(vma)) {
390 unlink_file_vma(vma);
391 hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
392 floor, next ? next->vm_start : ceiling);
393 } else {
394 unlink_file_vma_batch_init(&vb);
395 unlink_file_vma_batch_add(&vb, vma);
396
397 /*
398 * Optimization: gather nearby vmas into one call down
399 */
400 while (next && next->vm_start <= vma->vm_end + PMD_SIZE
401 && !is_vm_hugetlb_page(next)) {
402 vma = next;
403 next = mas_find(mas, ceiling - 1);
404 if (unlikely(xa_is_zero(next)))
405 next = NULL;
406 if (mm_wr_locked)
407 vma_start_write(vma);
408 unlink_anon_vmas(vma);
409 unlink_file_vma_batch_add(&vb, vma);
410 }
411 unlink_file_vma_batch_final(&vb);
412 free_pgd_range(tlb, addr, vma->vm_end,
413 floor, next ? next->vm_start : ceiling);
414 }
415 vma = next;
416 } while (vma);
417 }
418
pmd_install(struct mm_struct * mm,pmd_t * pmd,pgtable_t * pte)419 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
420 {
421 spinlock_t *ptl = pmd_lock(mm, pmd);
422
423 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
424 mm_inc_nr_ptes(mm);
425 /*
426 * Ensure all pte setup (eg. pte page lock and page clearing) are
427 * visible before the pte is made visible to other CPUs by being
428 * put into page tables.
429 *
430 * The other side of the story is the pointer chasing in the page
431 * table walking code (when walking the page table without locking;
432 * ie. most of the time). Fortunately, these data accesses consist
433 * of a chain of data-dependent loads, meaning most CPUs (alpha
434 * being the notable exception) will already guarantee loads are
435 * seen in-order. See the alpha page table accessors for the
436 * smp_rmb() barriers in page table walking code.
437 */
438 smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
439 pmd_populate(mm, pmd, *pte);
440 *pte = NULL;
441 }
442 spin_unlock(ptl);
443 }
444
__pte_alloc(struct mm_struct * mm,pmd_t * pmd)445 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
446 {
447 pgtable_t new = pte_alloc_one(mm);
448 if (!new)
449 return -ENOMEM;
450
451 pmd_install(mm, pmd, &new);
452 if (new)
453 pte_free(mm, new);
454 return 0;
455 }
456
__pte_alloc_kernel(pmd_t * pmd)457 int __pte_alloc_kernel(pmd_t *pmd)
458 {
459 pte_t *new = pte_alloc_one_kernel(&init_mm);
460 if (!new)
461 return -ENOMEM;
462
463 spin_lock(&init_mm.page_table_lock);
464 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
465 smp_wmb(); /* See comment in pmd_install() */
466 pmd_populate_kernel(&init_mm, pmd, new);
467 new = NULL;
468 }
469 spin_unlock(&init_mm.page_table_lock);
470 if (new)
471 pte_free_kernel(&init_mm, new);
472 return 0;
473 }
474
init_rss_vec(int * rss)475 static inline void init_rss_vec(int *rss)
476 {
477 memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
478 }
479
add_mm_rss_vec(struct mm_struct * mm,int * rss)480 static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
481 {
482 int i;
483
484 for (i = 0; i < NR_MM_COUNTERS; i++)
485 if (rss[i])
486 add_mm_counter(mm, i, rss[i]);
487 }
488
489 /*
490 * This function is called to print an error when a bad pte
491 * is found. For example, we might have a PFN-mapped pte in
492 * a region that doesn't allow it.
493 *
494 * The calling function must still handle the error.
495 */
print_bad_pte(struct vm_area_struct * vma,unsigned long addr,pte_t pte,struct page * page)496 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
497 pte_t pte, struct page *page)
498 {
499 pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
500 p4d_t *p4d = p4d_offset(pgd, addr);
501 pud_t *pud = pud_offset(p4d, addr);
502 pmd_t *pmd = pmd_offset(pud, addr);
503 struct address_space *mapping;
504 pgoff_t index;
505 static unsigned long resume;
506 static unsigned long nr_shown;
507 static unsigned long nr_unshown;
508
509 /*
510 * Allow a burst of 60 reports, then keep quiet for that minute;
511 * or allow a steady drip of one report per second.
512 */
513 if (nr_shown == 60) {
514 if (time_before(jiffies, resume)) {
515 nr_unshown++;
516 return;
517 }
518 if (nr_unshown) {
519 pr_alert("BUG: Bad page map: %lu messages suppressed\n",
520 nr_unshown);
521 nr_unshown = 0;
522 }
523 nr_shown = 0;
524 }
525 if (nr_shown++ == 0)
526 resume = jiffies + 60 * HZ;
527
528 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
529 index = linear_page_index(vma, addr);
530
531 pr_alert("BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n",
532 current->comm,
533 (long long)pte_val(pte), (long long)pmd_val(*pmd));
534 if (page)
535 dump_page(page, "bad pte");
536 pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
537 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
538 pr_alert("file:%pD fault:%ps mmap:%ps read_folio:%ps\n",
539 vma->vm_file,
540 vma->vm_ops ? vma->vm_ops->fault : NULL,
541 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
542 mapping ? mapping->a_ops->read_folio : NULL);
543 dump_stack();
544 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
545 }
546
547 /*
548 * vm_normal_page -- This function gets the "struct page" associated with a pte.
549 *
550 * "Special" mappings do not wish to be associated with a "struct page" (either
551 * it doesn't exist, or it exists but they don't want to touch it). In this
552 * case, NULL is returned here. "Normal" mappings do have a struct page.
553 *
554 * There are 2 broad cases. Firstly, an architecture may define a pte_special()
555 * pte bit, in which case this function is trivial. Secondly, an architecture
556 * may not have a spare pte bit, which requires a more complicated scheme,
557 * described below.
558 *
559 * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
560 * special mapping (even if there are underlying and valid "struct pages").
561 * COWed pages of a VM_PFNMAP are always normal.
562 *
563 * The way we recognize COWed pages within VM_PFNMAP mappings is through the
564 * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
565 * set, and the vm_pgoff will point to the first PFN mapped: thus every special
566 * mapping will always honor the rule
567 *
568 * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
569 *
570 * And for normal mappings this is false.
571 *
572 * This restricts such mappings to be a linear translation from virtual address
573 * to pfn. To get around this restriction, we allow arbitrary mappings so long
574 * as the vma is not a COW mapping; in that case, we know that all ptes are
575 * special (because none can have been COWed).
576 *
577 *
578 * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
579 *
580 * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
581 * page" backing, however the difference is that _all_ pages with a struct
582 * page (that is, those where pfn_valid is true) are refcounted and considered
583 * normal pages by the VM. The only exception are zeropages, which are
584 * *never* refcounted.
585 *
586 * The disadvantage is that pages are refcounted (which can be slower and
587 * simply not an option for some PFNMAP users). The advantage is that we
588 * don't have to follow the strict linearity rule of PFNMAP mappings in
589 * order to support COWable mappings.
590 *
591 */
vm_normal_page(struct vm_area_struct * vma,unsigned long addr,pte_t pte)592 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
593 pte_t pte)
594 {
595 unsigned long pfn = pte_pfn(pte);
596
597 if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
598 if (likely(!pte_special(pte)))
599 goto check_pfn;
600 if (vma->vm_ops && vma->vm_ops->find_special_page)
601 return vma->vm_ops->find_special_page(vma, addr);
602 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
603 return NULL;
604 if (is_zero_pfn(pfn))
605 return NULL;
606 if (pte_devmap(pte))
607 /*
608 * NOTE: New users of ZONE_DEVICE will not set pte_devmap()
609 * and will have refcounts incremented on their struct pages
610 * when they are inserted into PTEs, thus they are safe to
611 * return here. Legacy ZONE_DEVICE pages that set pte_devmap()
612 * do not have refcounts. Example of legacy ZONE_DEVICE is
613 * MEMORY_DEVICE_FS_DAX type in pmem or virtio_fs drivers.
614 */
615 return NULL;
616
617 print_bad_pte(vma, addr, pte, NULL);
618 return NULL;
619 }
620
621 /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
622
623 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
624 if (vma->vm_flags & VM_MIXEDMAP) {
625 if (!pfn_valid(pfn))
626 return NULL;
627 if (is_zero_pfn(pfn))
628 return NULL;
629 goto out;
630 } else {
631 unsigned long off;
632 off = (addr - vma->vm_start) >> PAGE_SHIFT;
633 if (pfn == vma->vm_pgoff + off)
634 return NULL;
635 if (!is_cow_mapping(vma->vm_flags))
636 return NULL;
637 }
638 }
639
640 if (is_zero_pfn(pfn))
641 return NULL;
642
643 check_pfn:
644 if (unlikely(pfn > highest_memmap_pfn)) {
645 print_bad_pte(vma, addr, pte, NULL);
646 return NULL;
647 }
648
649 /*
650 * NOTE! We still have PageReserved() pages in the page tables.
651 * eg. VDSO mappings can cause them to exist.
652 */
653 out:
654 VM_WARN_ON_ONCE(is_zero_pfn(pfn));
655 return pfn_to_page(pfn);
656 }
657
vm_normal_folio(struct vm_area_struct * vma,unsigned long addr,pte_t pte)658 struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
659 pte_t pte)
660 {
661 struct page *page = vm_normal_page(vma, addr, pte);
662
663 if (page)
664 return page_folio(page);
665 return NULL;
666 }
667
668 #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
vm_normal_page_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t pmd)669 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
670 pmd_t pmd)
671 {
672 unsigned long pfn = pmd_pfn(pmd);
673
674 /* Currently it's only used for huge pfnmaps */
675 if (unlikely(pmd_special(pmd)))
676 return NULL;
677
678 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
679 if (vma->vm_flags & VM_MIXEDMAP) {
680 if (!pfn_valid(pfn))
681 return NULL;
682 goto out;
683 } else {
684 unsigned long off;
685 off = (addr - vma->vm_start) >> PAGE_SHIFT;
686 if (pfn == vma->vm_pgoff + off)
687 return NULL;
688 if (!is_cow_mapping(vma->vm_flags))
689 return NULL;
690 }
691 }
692
693 if (pmd_devmap(pmd))
694 return NULL;
695 if (is_huge_zero_pmd(pmd))
696 return NULL;
697 if (unlikely(pfn > highest_memmap_pfn))
698 return NULL;
699
700 /*
701 * NOTE! We still have PageReserved() pages in the page tables.
702 * eg. VDSO mappings can cause them to exist.
703 */
704 out:
705 return pfn_to_page(pfn);
706 }
707
vm_normal_folio_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t pmd)708 struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
709 unsigned long addr, pmd_t pmd)
710 {
711 struct page *page = vm_normal_page_pmd(vma, addr, pmd);
712
713 if (page)
714 return page_folio(page);
715 return NULL;
716 }
717 #endif
718
restore_exclusive_pte(struct vm_area_struct * vma,struct page * page,unsigned long address,pte_t * ptep)719 static void restore_exclusive_pte(struct vm_area_struct *vma,
720 struct page *page, unsigned long address,
721 pte_t *ptep)
722 {
723 struct folio *folio = page_folio(page);
724 pte_t orig_pte;
725 pte_t pte;
726 swp_entry_t entry;
727
728 orig_pte = ptep_get(ptep);
729 pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
730 if (pte_swp_soft_dirty(orig_pte))
731 pte = pte_mksoft_dirty(pte);
732
733 entry = pte_to_swp_entry(orig_pte);
734 if (pte_swp_uffd_wp(orig_pte))
735 pte = pte_mkuffd_wp(pte);
736 else if (is_writable_device_exclusive_entry(entry))
737 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
738
739 VM_BUG_ON_FOLIO(pte_write(pte) && (!folio_test_anon(folio) &&
740 PageAnonExclusive(page)), folio);
741
742 /*
743 * No need to take a page reference as one was already
744 * created when the swap entry was made.
745 */
746 if (folio_test_anon(folio))
747 folio_add_anon_rmap_pte(folio, page, vma, address, RMAP_NONE);
748 else
749 /*
750 * Currently device exclusive access only supports anonymous
751 * memory so the entry shouldn't point to a filebacked page.
752 */
753 WARN_ON_ONCE(1);
754
755 set_pte_at(vma->vm_mm, address, ptep, pte);
756
757 /*
758 * No need to invalidate - it was non-present before. However
759 * secondary CPUs may have mappings that need invalidating.
760 */
761 update_mmu_cache(vma, address, ptep);
762 }
763
764 /*
765 * Tries to restore an exclusive pte if the page lock can be acquired without
766 * sleeping.
767 */
768 static int
try_restore_exclusive_pte(pte_t * src_pte,struct vm_area_struct * vma,unsigned long addr)769 try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma,
770 unsigned long addr)
771 {
772 swp_entry_t entry = pte_to_swp_entry(ptep_get(src_pte));
773 struct page *page = pfn_swap_entry_to_page(entry);
774
775 if (trylock_page(page)) {
776 restore_exclusive_pte(vma, page, addr, src_pte);
777 unlock_page(page);
778 return 0;
779 }
780
781 return -EBUSY;
782 }
783
784 /*
785 * copy one vm_area from one task to the other. Assumes the page tables
786 * already present in the new task to be cleared in the whole range
787 * covered by this vma.
788 */
789
790 static unsigned long
copy_nonpresent_pte(struct mm_struct * dst_mm,struct mm_struct * src_mm,pte_t * dst_pte,pte_t * src_pte,struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,unsigned long addr,int * rss)791 copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
792 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma,
793 struct vm_area_struct *src_vma, unsigned long addr, int *rss)
794 {
795 unsigned long vm_flags = dst_vma->vm_flags;
796 pte_t orig_pte = ptep_get(src_pte);
797 pte_t pte = orig_pte;
798 struct folio *folio;
799 struct page *page;
800 swp_entry_t entry = pte_to_swp_entry(orig_pte);
801
802 if (likely(!non_swap_entry(entry))) {
803 if (swap_duplicate(entry) < 0)
804 return -EIO;
805
806 /* make sure dst_mm is on swapoff's mmlist. */
807 if (unlikely(list_empty(&dst_mm->mmlist))) {
808 spin_lock(&mmlist_lock);
809 if (list_empty(&dst_mm->mmlist))
810 list_add(&dst_mm->mmlist,
811 &src_mm->mmlist);
812 spin_unlock(&mmlist_lock);
813 }
814 /* Mark the swap entry as shared. */
815 if (pte_swp_exclusive(orig_pte)) {
816 pte = pte_swp_clear_exclusive(orig_pte);
817 set_pte_at(src_mm, addr, src_pte, pte);
818 }
819 rss[MM_SWAPENTS]++;
820 } else if (is_migration_entry(entry)) {
821 folio = pfn_swap_entry_folio(entry);
822
823 rss[mm_counter(folio)]++;
824
825 if (!is_readable_migration_entry(entry) &&
826 is_cow_mapping(vm_flags)) {
827 /*
828 * COW mappings require pages in both parent and child
829 * to be set to read. A previously exclusive entry is
830 * now shared.
831 */
832 entry = make_readable_migration_entry(
833 swp_offset(entry));
834 pte = swp_entry_to_pte(entry);
835 if (pte_swp_soft_dirty(orig_pte))
836 pte = pte_swp_mksoft_dirty(pte);
837 if (pte_swp_uffd_wp(orig_pte))
838 pte = pte_swp_mkuffd_wp(pte);
839 set_pte_at(src_mm, addr, src_pte, pte);
840 }
841 } else if (is_device_private_entry(entry)) {
842 page = pfn_swap_entry_to_page(entry);
843 folio = page_folio(page);
844
845 /*
846 * Update rss count even for unaddressable pages, as
847 * they should treated just like normal pages in this
848 * respect.
849 *
850 * We will likely want to have some new rss counters
851 * for unaddressable pages, at some point. But for now
852 * keep things as they are.
853 */
854 folio_get(folio);
855 rss[mm_counter(folio)]++;
856 /* Cannot fail as these pages cannot get pinned. */
857 folio_try_dup_anon_rmap_pte(folio, page, src_vma);
858
859 /*
860 * We do not preserve soft-dirty information, because so
861 * far, checkpoint/restore is the only feature that
862 * requires that. And checkpoint/restore does not work
863 * when a device driver is involved (you cannot easily
864 * save and restore device driver state).
865 */
866 if (is_writable_device_private_entry(entry) &&
867 is_cow_mapping(vm_flags)) {
868 entry = make_readable_device_private_entry(
869 swp_offset(entry));
870 pte = swp_entry_to_pte(entry);
871 if (pte_swp_uffd_wp(orig_pte))
872 pte = pte_swp_mkuffd_wp(pte);
873 set_pte_at(src_mm, addr, src_pte, pte);
874 }
875 } else if (is_device_exclusive_entry(entry)) {
876 /*
877 * Make device exclusive entries present by restoring the
878 * original entry then copying as for a present pte. Device
879 * exclusive entries currently only support private writable
880 * (ie. COW) mappings.
881 */
882 VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags));
883 if (try_restore_exclusive_pte(src_pte, src_vma, addr))
884 return -EBUSY;
885 return -ENOENT;
886 } else if (is_pte_marker_entry(entry)) {
887 pte_marker marker = copy_pte_marker(entry, dst_vma);
888
889 if (marker)
890 set_pte_at(dst_mm, addr, dst_pte,
891 make_pte_marker(marker));
892 return 0;
893 }
894 if (!userfaultfd_wp(dst_vma))
895 pte = pte_swp_clear_uffd_wp(pte);
896 set_pte_at(dst_mm, addr, dst_pte, pte);
897 return 0;
898 }
899
900 /*
901 * Copy a present and normal page.
902 *
903 * NOTE! The usual case is that this isn't required;
904 * instead, the caller can just increase the page refcount
905 * and re-use the pte the traditional way.
906 *
907 * And if we need a pre-allocated page but don't yet have
908 * one, return a negative error to let the preallocation
909 * code know so that it can do so outside the page table
910 * lock.
911 */
912 static inline int
copy_present_page(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pte_t * dst_pte,pte_t * src_pte,unsigned long addr,int * rss,struct folio ** prealloc,struct page * page)913 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
914 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
915 struct folio **prealloc, struct page *page)
916 {
917 struct folio *new_folio;
918 pte_t pte;
919
920 new_folio = *prealloc;
921 if (!new_folio)
922 return -EAGAIN;
923
924 /*
925 * We have a prealloc page, all good! Take it
926 * over and copy the page & arm it.
927 */
928
929 if (copy_mc_user_highpage(&new_folio->page, page, addr, src_vma))
930 return -EHWPOISON;
931
932 *prealloc = NULL;
933 __folio_mark_uptodate(new_folio);
934 folio_add_new_anon_rmap(new_folio, dst_vma, addr, RMAP_EXCLUSIVE);
935 folio_add_lru_vma(new_folio, dst_vma);
936 rss[MM_ANONPAGES]++;
937
938 /* All done, just insert the new page copy in the child */
939 pte = mk_pte(&new_folio->page, dst_vma->vm_page_prot);
940 pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma);
941 if (userfaultfd_pte_wp(dst_vma, ptep_get(src_pte)))
942 /* Uffd-wp needs to be delivered to dest pte as well */
943 pte = pte_mkuffd_wp(pte);
944 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
945 return 0;
946 }
947
__copy_present_ptes(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pte_t * dst_pte,pte_t * src_pte,pte_t pte,unsigned long addr,int nr)948 static __always_inline void __copy_present_ptes(struct vm_area_struct *dst_vma,
949 struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte,
950 pte_t pte, unsigned long addr, int nr)
951 {
952 struct mm_struct *src_mm = src_vma->vm_mm;
953
954 /* If it's a COW mapping, write protect it both processes. */
955 if (is_cow_mapping(src_vma->vm_flags) && pte_write(pte)) {
956 wrprotect_ptes(src_mm, addr, src_pte, nr);
957 pte = pte_wrprotect(pte);
958 }
959
960 /* If it's a shared mapping, mark it clean in the child. */
961 if (src_vma->vm_flags & VM_SHARED)
962 pte = pte_mkclean(pte);
963 pte = pte_mkold(pte);
964
965 if (!userfaultfd_wp(dst_vma))
966 pte = pte_clear_uffd_wp(pte);
967
968 set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr);
969 }
970
971 /*
972 * Copy one present PTE, trying to batch-process subsequent PTEs that map
973 * consecutive pages of the same folio by copying them as well.
974 *
975 * Returns -EAGAIN if one preallocated page is required to copy the next PTE.
976 * Otherwise, returns the number of copied PTEs (at least 1).
977 */
978 static inline int
copy_present_ptes(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pte_t * dst_pte,pte_t * src_pte,pte_t pte,unsigned long addr,int max_nr,int * rss,struct folio ** prealloc)979 copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
980 pte_t *dst_pte, pte_t *src_pte, pte_t pte, unsigned long addr,
981 int max_nr, int *rss, struct folio **prealloc)
982 {
983 struct page *page;
984 struct folio *folio;
985 bool any_writable;
986 fpb_t flags = 0;
987 int err, nr;
988
989 page = vm_normal_page(src_vma, addr, pte);
990 if (unlikely(!page))
991 goto copy_pte;
992
993 folio = page_folio(page);
994
995 /*
996 * If we likely have to copy, just don't bother with batching. Make
997 * sure that the common "small folio" case is as fast as possible
998 * by keeping the batching logic separate.
999 */
1000 if (unlikely(!*prealloc && folio_test_large(folio) && max_nr != 1)) {
1001 if (src_vma->vm_flags & VM_SHARED)
1002 flags |= FPB_IGNORE_DIRTY;
1003 if (!vma_soft_dirty_enabled(src_vma))
1004 flags |= FPB_IGNORE_SOFT_DIRTY;
1005
1006 nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr, flags,
1007 &any_writable, NULL, NULL);
1008 folio_ref_add(folio, nr);
1009 if (folio_test_anon(folio)) {
1010 if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page,
1011 nr, src_vma))) {
1012 folio_ref_sub(folio, nr);
1013 return -EAGAIN;
1014 }
1015 rss[MM_ANONPAGES] += nr;
1016 VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio);
1017 } else {
1018 folio_dup_file_rmap_ptes(folio, page, nr);
1019 rss[mm_counter_file(folio)] += nr;
1020 }
1021 if (any_writable)
1022 pte = pte_mkwrite(pte, src_vma);
1023 __copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte,
1024 addr, nr);
1025 return nr;
1026 }
1027
1028 folio_get(folio);
1029 if (folio_test_anon(folio)) {
1030 /*
1031 * If this page may have been pinned by the parent process,
1032 * copy the page immediately for the child so that we'll always
1033 * guarantee the pinned page won't be randomly replaced in the
1034 * future.
1035 */
1036 if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, src_vma))) {
1037 /* Page may be pinned, we have to copy. */
1038 folio_put(folio);
1039 err = copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
1040 addr, rss, prealloc, page);
1041 return err ? err : 1;
1042 }
1043 rss[MM_ANONPAGES]++;
1044 VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio);
1045 } else {
1046 folio_dup_file_rmap_pte(folio, page);
1047 rss[mm_counter_file(folio)]++;
1048 }
1049
1050 copy_pte:
1051 __copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte, addr, 1);
1052 return 1;
1053 }
1054
folio_prealloc(struct mm_struct * src_mm,struct vm_area_struct * vma,unsigned long addr,bool need_zero)1055 static inline struct folio *folio_prealloc(struct mm_struct *src_mm,
1056 struct vm_area_struct *vma, unsigned long addr, bool need_zero)
1057 {
1058 struct folio *new_folio;
1059
1060 if (need_zero)
1061 new_folio = vma_alloc_zeroed_movable_folio(vma, addr);
1062 else
1063 new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr);
1064
1065 if (!new_folio)
1066 return NULL;
1067
1068 if (mem_cgroup_charge(new_folio, src_mm, GFP_KERNEL)) {
1069 folio_put(new_folio);
1070 return NULL;
1071 }
1072 folio_throttle_swaprate(new_folio, GFP_KERNEL);
1073
1074 return new_folio;
1075 }
1076
1077 static int
copy_pte_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pmd_t * dst_pmd,pmd_t * src_pmd,unsigned long addr,unsigned long end)1078 copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1079 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1080 unsigned long end)
1081 {
1082 struct mm_struct *dst_mm = dst_vma->vm_mm;
1083 struct mm_struct *src_mm = src_vma->vm_mm;
1084 pte_t *orig_src_pte, *orig_dst_pte;
1085 pte_t *src_pte, *dst_pte;
1086 pmd_t dummy_pmdval;
1087 pte_t ptent;
1088 spinlock_t *src_ptl, *dst_ptl;
1089 int progress, max_nr, ret = 0;
1090 int rss[NR_MM_COUNTERS];
1091 swp_entry_t entry = (swp_entry_t){0};
1092 struct folio *prealloc = NULL;
1093 int nr;
1094
1095 again:
1096 progress = 0;
1097 init_rss_vec(rss);
1098
1099 /*
1100 * copy_pmd_range()'s prior pmd_none_or_clear_bad(src_pmd), and the
1101 * error handling here, assume that exclusive mmap_lock on dst and src
1102 * protects anon from unexpected THP transitions; with shmem and file
1103 * protected by mmap_lock-less collapse skipping areas with anon_vma
1104 * (whereas vma_needs_copy() skips areas without anon_vma). A rework
1105 * can remove such assumptions later, but this is good enough for now.
1106 */
1107 dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
1108 if (!dst_pte) {
1109 ret = -ENOMEM;
1110 goto out;
1111 }
1112
1113 /*
1114 * We already hold the exclusive mmap_lock, the copy_pte_range() and
1115 * retract_page_tables() are using vma->anon_vma to be exclusive, so
1116 * the PTE page is stable, and there is no need to get pmdval and do
1117 * pmd_same() check.
1118 */
1119 src_pte = pte_offset_map_rw_nolock(src_mm, src_pmd, addr, &dummy_pmdval,
1120 &src_ptl);
1121 if (!src_pte) {
1122 pte_unmap_unlock(dst_pte, dst_ptl);
1123 /* ret == 0 */
1124 goto out;
1125 }
1126 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1127 orig_src_pte = src_pte;
1128 orig_dst_pte = dst_pte;
1129 arch_enter_lazy_mmu_mode();
1130
1131 do {
1132 nr = 1;
1133
1134 /*
1135 * We are holding two locks at this point - either of them
1136 * could generate latencies in another task on another CPU.
1137 */
1138 if (progress >= 32) {
1139 progress = 0;
1140 if (need_resched() ||
1141 spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
1142 break;
1143 }
1144 ptent = ptep_get(src_pte);
1145 if (pte_none(ptent)) {
1146 progress++;
1147 continue;
1148 }
1149 if (unlikely(!pte_present(ptent))) {
1150 ret = copy_nonpresent_pte(dst_mm, src_mm,
1151 dst_pte, src_pte,
1152 dst_vma, src_vma,
1153 addr, rss);
1154 if (ret == -EIO) {
1155 entry = pte_to_swp_entry(ptep_get(src_pte));
1156 break;
1157 } else if (ret == -EBUSY) {
1158 break;
1159 } else if (!ret) {
1160 progress += 8;
1161 continue;
1162 }
1163 ptent = ptep_get(src_pte);
1164 VM_WARN_ON_ONCE(!pte_present(ptent));
1165
1166 /*
1167 * Device exclusive entry restored, continue by copying
1168 * the now present pte.
1169 */
1170 WARN_ON_ONCE(ret != -ENOENT);
1171 }
1172 /* copy_present_ptes() will clear `*prealloc' if consumed */
1173 max_nr = (end - addr) / PAGE_SIZE;
1174 ret = copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte,
1175 ptent, addr, max_nr, rss, &prealloc);
1176 /*
1177 * If we need a pre-allocated page for this pte, drop the
1178 * locks, allocate, and try again.
1179 * If copy failed due to hwpoison in source page, break out.
1180 */
1181 if (unlikely(ret == -EAGAIN || ret == -EHWPOISON))
1182 break;
1183 if (unlikely(prealloc)) {
1184 /*
1185 * pre-alloc page cannot be reused by next time so as
1186 * to strictly follow mempolicy (e.g., alloc_page_vma()
1187 * will allocate page according to address). This
1188 * could only happen if one pinned pte changed.
1189 */
1190 folio_put(prealloc);
1191 prealloc = NULL;
1192 }
1193 nr = ret;
1194 progress += 8 * nr;
1195 } while (dst_pte += nr, src_pte += nr, addr += PAGE_SIZE * nr,
1196 addr != end);
1197
1198 arch_leave_lazy_mmu_mode();
1199 pte_unmap_unlock(orig_src_pte, src_ptl);
1200 add_mm_rss_vec(dst_mm, rss);
1201 pte_unmap_unlock(orig_dst_pte, dst_ptl);
1202 cond_resched();
1203
1204 if (ret == -EIO) {
1205 VM_WARN_ON_ONCE(!entry.val);
1206 if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) {
1207 ret = -ENOMEM;
1208 goto out;
1209 }
1210 entry.val = 0;
1211 } else if (ret == -EBUSY || unlikely(ret == -EHWPOISON)) {
1212 goto out;
1213 } else if (ret == -EAGAIN) {
1214 prealloc = folio_prealloc(src_mm, src_vma, addr, false);
1215 if (!prealloc)
1216 return -ENOMEM;
1217 } else if (ret < 0) {
1218 VM_WARN_ON_ONCE(1);
1219 }
1220
1221 /* We've captured and resolved the error. Reset, try again. */
1222 ret = 0;
1223
1224 if (addr != end)
1225 goto again;
1226 out:
1227 if (unlikely(prealloc))
1228 folio_put(prealloc);
1229 return ret;
1230 }
1231
1232 static inline int
copy_pmd_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pud_t * dst_pud,pud_t * src_pud,unsigned long addr,unsigned long end)1233 copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1234 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1235 unsigned long end)
1236 {
1237 struct mm_struct *dst_mm = dst_vma->vm_mm;
1238 struct mm_struct *src_mm = src_vma->vm_mm;
1239 pmd_t *src_pmd, *dst_pmd;
1240 unsigned long next;
1241
1242 dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
1243 if (!dst_pmd)
1244 return -ENOMEM;
1245 src_pmd = pmd_offset(src_pud, addr);
1246 do {
1247 next = pmd_addr_end(addr, end);
1248 if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
1249 || pmd_devmap(*src_pmd)) {
1250 int err;
1251 VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
1252 err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd,
1253 addr, dst_vma, src_vma);
1254 if (err == -ENOMEM)
1255 return -ENOMEM;
1256 if (!err)
1257 continue;
1258 /* fall through */
1259 }
1260 if (pmd_none_or_clear_bad(src_pmd))
1261 continue;
1262 if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd,
1263 addr, next))
1264 return -ENOMEM;
1265 } while (dst_pmd++, src_pmd++, addr = next, addr != end);
1266 return 0;
1267 }
1268
1269 static inline int
copy_pud_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,p4d_t * dst_p4d,p4d_t * src_p4d,unsigned long addr,unsigned long end)1270 copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1271 p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr,
1272 unsigned long end)
1273 {
1274 struct mm_struct *dst_mm = dst_vma->vm_mm;
1275 struct mm_struct *src_mm = src_vma->vm_mm;
1276 pud_t *src_pud, *dst_pud;
1277 unsigned long next;
1278
1279 dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
1280 if (!dst_pud)
1281 return -ENOMEM;
1282 src_pud = pud_offset(src_p4d, addr);
1283 do {
1284 next = pud_addr_end(addr, end);
1285 if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
1286 int err;
1287
1288 VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma);
1289 err = copy_huge_pud(dst_mm, src_mm,
1290 dst_pud, src_pud, addr, src_vma);
1291 if (err == -ENOMEM)
1292 return -ENOMEM;
1293 if (!err)
1294 continue;
1295 /* fall through */
1296 }
1297 if (pud_none_or_clear_bad(src_pud))
1298 continue;
1299 if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud,
1300 addr, next))
1301 return -ENOMEM;
1302 } while (dst_pud++, src_pud++, addr = next, addr != end);
1303 return 0;
1304 }
1305
1306 static inline int
copy_p4d_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pgd_t * dst_pgd,pgd_t * src_pgd,unsigned long addr,unsigned long end)1307 copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1308 pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr,
1309 unsigned long end)
1310 {
1311 struct mm_struct *dst_mm = dst_vma->vm_mm;
1312 p4d_t *src_p4d, *dst_p4d;
1313 unsigned long next;
1314
1315 dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
1316 if (!dst_p4d)
1317 return -ENOMEM;
1318 src_p4d = p4d_offset(src_pgd, addr);
1319 do {
1320 next = p4d_addr_end(addr, end);
1321 if (p4d_none_or_clear_bad(src_p4d))
1322 continue;
1323 if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d,
1324 addr, next))
1325 return -ENOMEM;
1326 } while (dst_p4d++, src_p4d++, addr = next, addr != end);
1327 return 0;
1328 }
1329
1330 /*
1331 * Return true if the vma needs to copy the pgtable during this fork(). Return
1332 * false when we can speed up fork() by allowing lazy page faults later until
1333 * when the child accesses the memory range.
1334 */
1335 static bool
vma_needs_copy(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma)1336 vma_needs_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1337 {
1338 /*
1339 * Always copy pgtables when dst_vma has uffd-wp enabled even if it's
1340 * file-backed (e.g. shmem). Because when uffd-wp is enabled, pgtable
1341 * contains uffd-wp protection information, that's something we can't
1342 * retrieve from page cache, and skip copying will lose those info.
1343 */
1344 if (userfaultfd_wp(dst_vma))
1345 return true;
1346
1347 if (src_vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
1348 return true;
1349
1350 if (src_vma->anon_vma)
1351 return true;
1352
1353 /*
1354 * Don't copy ptes where a page fault will fill them correctly. Fork
1355 * becomes much lighter when there are big shared or private readonly
1356 * mappings. The tradeoff is that copy_page_range is more efficient
1357 * than faulting.
1358 */
1359 return false;
1360 }
1361
1362 int
copy_page_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma)1363 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1364 {
1365 pgd_t *src_pgd, *dst_pgd;
1366 unsigned long next;
1367 unsigned long addr = src_vma->vm_start;
1368 unsigned long end = src_vma->vm_end;
1369 struct mm_struct *dst_mm = dst_vma->vm_mm;
1370 struct mm_struct *src_mm = src_vma->vm_mm;
1371 struct mmu_notifier_range range;
1372 bool is_cow;
1373 int ret;
1374
1375 if (!vma_needs_copy(dst_vma, src_vma))
1376 return 0;
1377
1378 if (is_vm_hugetlb_page(src_vma))
1379 return copy_hugetlb_page_range(dst_mm, src_mm, dst_vma, src_vma);
1380
1381 if (unlikely(src_vma->vm_flags & VM_PFNMAP)) {
1382 /*
1383 * We do not free on error cases below as remove_vma
1384 * gets called on error from higher level routine
1385 */
1386 ret = track_pfn_copy(src_vma);
1387 if (ret)
1388 return ret;
1389 }
1390
1391 /*
1392 * We need to invalidate the secondary MMU mappings only when
1393 * there could be a permission downgrade on the ptes of the
1394 * parent mm. And a permission downgrade will only happen if
1395 * is_cow_mapping() returns true.
1396 */
1397 is_cow = is_cow_mapping(src_vma->vm_flags);
1398
1399 if (is_cow) {
1400 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
1401 0, src_mm, addr, end);
1402 mmu_notifier_invalidate_range_start(&range);
1403 /*
1404 * Disabling preemption is not needed for the write side, as
1405 * the read side doesn't spin, but goes to the mmap_lock.
1406 *
1407 * Use the raw variant of the seqcount_t write API to avoid
1408 * lockdep complaining about preemptibility.
1409 */
1410 vma_assert_write_locked(src_vma);
1411 raw_write_seqcount_begin(&src_mm->write_protect_seq);
1412 }
1413
1414 ret = 0;
1415 dst_pgd = pgd_offset(dst_mm, addr);
1416 src_pgd = pgd_offset(src_mm, addr);
1417 do {
1418 next = pgd_addr_end(addr, end);
1419 if (pgd_none_or_clear_bad(src_pgd))
1420 continue;
1421 if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,
1422 addr, next))) {
1423 untrack_pfn_clear(dst_vma);
1424 ret = -ENOMEM;
1425 break;
1426 }
1427 } while (dst_pgd++, src_pgd++, addr = next, addr != end);
1428
1429 if (is_cow) {
1430 raw_write_seqcount_end(&src_mm->write_protect_seq);
1431 mmu_notifier_invalidate_range_end(&range);
1432 }
1433 return ret;
1434 }
1435
1436 /* Whether we should zap all COWed (private) pages too */
should_zap_cows(struct zap_details * details)1437 static inline bool should_zap_cows(struct zap_details *details)
1438 {
1439 /* By default, zap all pages */
1440 if (!details || details->reclaim_pt)
1441 return true;
1442
1443 /* Or, we zap COWed pages only if the caller wants to */
1444 return details->even_cows;
1445 }
1446
1447 /* Decides whether we should zap this folio with the folio pointer specified */
should_zap_folio(struct zap_details * details,struct folio * folio)1448 static inline bool should_zap_folio(struct zap_details *details,
1449 struct folio *folio)
1450 {
1451 /* If we can make a decision without *folio.. */
1452 if (should_zap_cows(details))
1453 return true;
1454
1455 /* Otherwise we should only zap non-anon folios */
1456 return !folio_test_anon(folio);
1457 }
1458
zap_drop_markers(struct zap_details * details)1459 static inline bool zap_drop_markers(struct zap_details *details)
1460 {
1461 if (!details)
1462 return false;
1463
1464 return details->zap_flags & ZAP_FLAG_DROP_MARKER;
1465 }
1466
1467 /*
1468 * This function makes sure that we'll replace the none pte with an uffd-wp
1469 * swap special pte marker when necessary. Must be with the pgtable lock held.
1470 *
1471 * Returns true if uffd-wp ptes was installed, false otherwise.
1472 */
1473 static inline bool
zap_install_uffd_wp_if_needed(struct vm_area_struct * vma,unsigned long addr,pte_t * pte,int nr,struct zap_details * details,pte_t pteval)1474 zap_install_uffd_wp_if_needed(struct vm_area_struct *vma,
1475 unsigned long addr, pte_t *pte, int nr,
1476 struct zap_details *details, pte_t pteval)
1477 {
1478 bool was_installed = false;
1479
1480 #ifdef CONFIG_PTE_MARKER_UFFD_WP
1481 /* Zap on anonymous always means dropping everything */
1482 if (vma_is_anonymous(vma))
1483 return false;
1484
1485 if (zap_drop_markers(details))
1486 return false;
1487
1488 for (;;) {
1489 /* the PFN in the PTE is irrelevant. */
1490 if (pte_install_uffd_wp_if_needed(vma, addr, pte, pteval))
1491 was_installed = true;
1492 if (--nr == 0)
1493 break;
1494 pte++;
1495 addr += PAGE_SIZE;
1496 }
1497 #endif
1498 return was_installed;
1499 }
1500
zap_present_folio_ptes(struct mmu_gather * tlb,struct vm_area_struct * vma,struct folio * folio,struct page * page,pte_t * pte,pte_t ptent,unsigned int nr,unsigned long addr,struct zap_details * details,int * rss,bool * force_flush,bool * force_break,bool * any_skipped)1501 static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb,
1502 struct vm_area_struct *vma, struct folio *folio,
1503 struct page *page, pte_t *pte, pte_t ptent, unsigned int nr,
1504 unsigned long addr, struct zap_details *details, int *rss,
1505 bool *force_flush, bool *force_break, bool *any_skipped)
1506 {
1507 struct mm_struct *mm = tlb->mm;
1508 bool delay_rmap = false;
1509
1510 if (!folio_test_anon(folio)) {
1511 ptent = get_and_clear_full_ptes(mm, addr, pte, nr, tlb->fullmm);
1512 if (pte_dirty(ptent)) {
1513 folio_mark_dirty(folio);
1514 if (tlb_delay_rmap(tlb)) {
1515 delay_rmap = true;
1516 *force_flush = true;
1517 }
1518 }
1519 if (pte_young(ptent) && likely(vma_has_recency(vma)))
1520 folio_mark_accessed(folio);
1521 rss[mm_counter(folio)] -= nr;
1522 } else {
1523 /* We don't need up-to-date accessed/dirty bits. */
1524 clear_full_ptes(mm, addr, pte, nr, tlb->fullmm);
1525 rss[MM_ANONPAGES] -= nr;
1526 }
1527 /* Checking a single PTE in a batch is sufficient. */
1528 arch_check_zapped_pte(vma, ptent);
1529 tlb_remove_tlb_entries(tlb, pte, nr, addr);
1530 if (unlikely(userfaultfd_pte_wp(vma, ptent)))
1531 *any_skipped = zap_install_uffd_wp_if_needed(vma, addr, pte,
1532 nr, details, ptent);
1533
1534 if (!delay_rmap) {
1535 folio_remove_rmap_ptes(folio, page, nr, vma);
1536
1537 if (unlikely(folio_mapcount(folio) < 0))
1538 print_bad_pte(vma, addr, ptent, page);
1539 }
1540 if (unlikely(__tlb_remove_folio_pages(tlb, page, nr, delay_rmap))) {
1541 *force_flush = true;
1542 *force_break = true;
1543 }
1544 }
1545
1546 /*
1547 * Zap or skip at least one present PTE, trying to batch-process subsequent
1548 * PTEs that map consecutive pages of the same folio.
1549 *
1550 * Returns the number of processed (skipped or zapped) PTEs (at least 1).
1551 */
zap_present_ptes(struct mmu_gather * tlb,struct vm_area_struct * vma,pte_t * pte,pte_t ptent,unsigned int max_nr,unsigned long addr,struct zap_details * details,int * rss,bool * force_flush,bool * force_break,bool * any_skipped)1552 static inline int zap_present_ptes(struct mmu_gather *tlb,
1553 struct vm_area_struct *vma, pte_t *pte, pte_t ptent,
1554 unsigned int max_nr, unsigned long addr,
1555 struct zap_details *details, int *rss, bool *force_flush,
1556 bool *force_break, bool *any_skipped)
1557 {
1558 const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY;
1559 struct mm_struct *mm = tlb->mm;
1560 struct folio *folio;
1561 struct page *page;
1562 int nr;
1563
1564 page = vm_normal_page(vma, addr, ptent);
1565 if (!page) {
1566 /* We don't need up-to-date accessed/dirty bits. */
1567 ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm);
1568 arch_check_zapped_pte(vma, ptent);
1569 tlb_remove_tlb_entry(tlb, pte, addr);
1570 if (userfaultfd_pte_wp(vma, ptent))
1571 *any_skipped = zap_install_uffd_wp_if_needed(vma, addr,
1572 pte, 1, details, ptent);
1573 ksm_might_unmap_zero_page(mm, ptent);
1574 return 1;
1575 }
1576
1577 folio = page_folio(page);
1578 if (unlikely(!should_zap_folio(details, folio))) {
1579 *any_skipped = true;
1580 return 1;
1581 }
1582
1583 /*
1584 * Make sure that the common "small folio" case is as fast as possible
1585 * by keeping the batching logic separate.
1586 */
1587 if (unlikely(folio_test_large(folio) && max_nr != 1)) {
1588 nr = folio_pte_batch(folio, addr, pte, ptent, max_nr, fpb_flags,
1589 NULL, NULL, NULL);
1590
1591 zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr,
1592 addr, details, rss, force_flush,
1593 force_break, any_skipped);
1594 return nr;
1595 }
1596 zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, 1, addr,
1597 details, rss, force_flush, force_break, any_skipped);
1598 return 1;
1599 }
1600
zap_nonpresent_ptes(struct mmu_gather * tlb,struct vm_area_struct * vma,pte_t * pte,pte_t ptent,unsigned int max_nr,unsigned long addr,struct zap_details * details,int * rss,bool * any_skipped)1601 static inline int zap_nonpresent_ptes(struct mmu_gather *tlb,
1602 struct vm_area_struct *vma, pte_t *pte, pte_t ptent,
1603 unsigned int max_nr, unsigned long addr,
1604 struct zap_details *details, int *rss, bool *any_skipped)
1605 {
1606 swp_entry_t entry;
1607 int nr = 1;
1608
1609 *any_skipped = true;
1610 entry = pte_to_swp_entry(ptent);
1611 if (is_device_private_entry(entry) ||
1612 is_device_exclusive_entry(entry)) {
1613 struct page *page = pfn_swap_entry_to_page(entry);
1614 struct folio *folio = page_folio(page);
1615
1616 if (unlikely(!should_zap_folio(details, folio)))
1617 return 1;
1618 /*
1619 * Both device private/exclusive mappings should only
1620 * work with anonymous page so far, so we don't need to
1621 * consider uffd-wp bit when zap. For more information,
1622 * see zap_install_uffd_wp_if_needed().
1623 */
1624 WARN_ON_ONCE(!vma_is_anonymous(vma));
1625 rss[mm_counter(folio)]--;
1626 if (is_device_private_entry(entry))
1627 folio_remove_rmap_pte(folio, page, vma);
1628 folio_put(folio);
1629 } else if (!non_swap_entry(entry)) {
1630 /* Genuine swap entries, hence a private anon pages */
1631 if (!should_zap_cows(details))
1632 return 1;
1633
1634 nr = swap_pte_batch(pte, max_nr, ptent);
1635 rss[MM_SWAPENTS] -= nr;
1636 free_swap_and_cache_nr(entry, nr);
1637 } else if (is_migration_entry(entry)) {
1638 struct folio *folio = pfn_swap_entry_folio(entry);
1639
1640 if (!should_zap_folio(details, folio))
1641 return 1;
1642 rss[mm_counter(folio)]--;
1643 } else if (pte_marker_entry_uffd_wp(entry)) {
1644 /*
1645 * For anon: always drop the marker; for file: only
1646 * drop the marker if explicitly requested.
1647 */
1648 if (!vma_is_anonymous(vma) && !zap_drop_markers(details))
1649 return 1;
1650 } else if (is_guard_swp_entry(entry)) {
1651 /*
1652 * Ordinary zapping should not remove guard PTE
1653 * markers. Only do so if we should remove PTE markers
1654 * in general.
1655 */
1656 if (!zap_drop_markers(details))
1657 return 1;
1658 } else if (is_hwpoison_entry(entry) || is_poisoned_swp_entry(entry)) {
1659 if (!should_zap_cows(details))
1660 return 1;
1661 } else {
1662 /* We should have covered all the swap entry types */
1663 pr_alert("unrecognized swap entry 0x%lx\n", entry.val);
1664 WARN_ON_ONCE(1);
1665 }
1666 clear_not_present_full_ptes(vma->vm_mm, addr, pte, nr, tlb->fullmm);
1667 *any_skipped = zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, ptent);
1668
1669 return nr;
1670 }
1671
do_zap_pte_range(struct mmu_gather * tlb,struct vm_area_struct * vma,pte_t * pte,unsigned long addr,unsigned long end,struct zap_details * details,int * rss,bool * force_flush,bool * force_break,bool * any_skipped)1672 static inline int do_zap_pte_range(struct mmu_gather *tlb,
1673 struct vm_area_struct *vma, pte_t *pte,
1674 unsigned long addr, unsigned long end,
1675 struct zap_details *details, int *rss,
1676 bool *force_flush, bool *force_break,
1677 bool *any_skipped)
1678 {
1679 pte_t ptent = ptep_get(pte);
1680 int max_nr = (end - addr) / PAGE_SIZE;
1681 int nr = 0;
1682
1683 /* Skip all consecutive none ptes */
1684 if (pte_none(ptent)) {
1685 for (nr = 1; nr < max_nr; nr++) {
1686 ptent = ptep_get(pte + nr);
1687 if (!pte_none(ptent))
1688 break;
1689 }
1690 max_nr -= nr;
1691 if (!max_nr)
1692 return nr;
1693 pte += nr;
1694 addr += nr * PAGE_SIZE;
1695 }
1696
1697 if (pte_present(ptent))
1698 nr += zap_present_ptes(tlb, vma, pte, ptent, max_nr, addr,
1699 details, rss, force_flush, force_break,
1700 any_skipped);
1701 else
1702 nr += zap_nonpresent_ptes(tlb, vma, pte, ptent, max_nr, addr,
1703 details, rss, any_skipped);
1704
1705 return nr;
1706 }
1707
zap_pte_range(struct mmu_gather * tlb,struct vm_area_struct * vma,pmd_t * pmd,unsigned long addr,unsigned long end,struct zap_details * details)1708 static unsigned long zap_pte_range(struct mmu_gather *tlb,
1709 struct vm_area_struct *vma, pmd_t *pmd,
1710 unsigned long addr, unsigned long end,
1711 struct zap_details *details)
1712 {
1713 bool force_flush = false, force_break = false;
1714 struct mm_struct *mm = tlb->mm;
1715 int rss[NR_MM_COUNTERS];
1716 spinlock_t *ptl;
1717 pte_t *start_pte;
1718 pte_t *pte;
1719 pmd_t pmdval;
1720 unsigned long start = addr;
1721 bool can_reclaim_pt = reclaim_pt_is_enabled(start, end, details);
1722 bool direct_reclaim = true;
1723 int nr;
1724
1725 retry:
1726 tlb_change_page_size(tlb, PAGE_SIZE);
1727 init_rss_vec(rss);
1728 start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1729 if (!pte)
1730 return addr;
1731
1732 flush_tlb_batched_pending(mm);
1733 arch_enter_lazy_mmu_mode();
1734 do {
1735 bool any_skipped = false;
1736
1737 if (need_resched()) {
1738 direct_reclaim = false;
1739 break;
1740 }
1741
1742 nr = do_zap_pte_range(tlb, vma, pte, addr, end, details, rss,
1743 &force_flush, &force_break, &any_skipped);
1744 if (any_skipped)
1745 can_reclaim_pt = false;
1746 if (unlikely(force_break)) {
1747 addr += nr * PAGE_SIZE;
1748 direct_reclaim = false;
1749 break;
1750 }
1751 } while (pte += nr, addr += PAGE_SIZE * nr, addr != end);
1752
1753 /*
1754 * Fast path: try to hold the pmd lock and unmap the PTE page.
1755 *
1756 * If the pte lock was released midway (retry case), or if the attempt
1757 * to hold the pmd lock failed, then we need to recheck all pte entries
1758 * to ensure they are still none, thereby preventing the pte entries
1759 * from being repopulated by another thread.
1760 */
1761 if (can_reclaim_pt && direct_reclaim && addr == end)
1762 direct_reclaim = try_get_and_clear_pmd(mm, pmd, &pmdval);
1763
1764 add_mm_rss_vec(mm, rss);
1765 arch_leave_lazy_mmu_mode();
1766
1767 /* Do the actual TLB flush before dropping ptl */
1768 if (force_flush) {
1769 tlb_flush_mmu_tlbonly(tlb);
1770 tlb_flush_rmaps(tlb, vma);
1771 }
1772 pte_unmap_unlock(start_pte, ptl);
1773
1774 /*
1775 * If we forced a TLB flush (either due to running out of
1776 * batch buffers or because we needed to flush dirty TLB
1777 * entries before releasing the ptl), free the batched
1778 * memory too. Come back again if we didn't do everything.
1779 */
1780 if (force_flush)
1781 tlb_flush_mmu(tlb);
1782
1783 if (addr != end) {
1784 cond_resched();
1785 force_flush = false;
1786 force_break = false;
1787 goto retry;
1788 }
1789
1790 if (can_reclaim_pt) {
1791 if (direct_reclaim)
1792 free_pte(mm, start, tlb, pmdval);
1793 else
1794 try_to_free_pte(mm, pmd, start, tlb);
1795 }
1796
1797 return addr;
1798 }
1799
zap_pmd_range(struct mmu_gather * tlb,struct vm_area_struct * vma,pud_t * pud,unsigned long addr,unsigned long end,struct zap_details * details)1800 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1801 struct vm_area_struct *vma, pud_t *pud,
1802 unsigned long addr, unsigned long end,
1803 struct zap_details *details)
1804 {
1805 pmd_t *pmd;
1806 unsigned long next;
1807
1808 pmd = pmd_offset(pud, addr);
1809 do {
1810 next = pmd_addr_end(addr, end);
1811 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
1812 if (next - addr != HPAGE_PMD_SIZE)
1813 __split_huge_pmd(vma, pmd, addr, false, NULL);
1814 else if (zap_huge_pmd(tlb, vma, pmd, addr)) {
1815 addr = next;
1816 continue;
1817 }
1818 /* fall through */
1819 } else if (details && details->single_folio &&
1820 folio_test_pmd_mappable(details->single_folio) &&
1821 next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
1822 spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
1823 /*
1824 * Take and drop THP pmd lock so that we cannot return
1825 * prematurely, while zap_huge_pmd() has cleared *pmd,
1826 * but not yet decremented compound_mapcount().
1827 */
1828 spin_unlock(ptl);
1829 }
1830 if (pmd_none(*pmd)) {
1831 addr = next;
1832 continue;
1833 }
1834 addr = zap_pte_range(tlb, vma, pmd, addr, next, details);
1835 if (addr != next)
1836 pmd--;
1837 } while (pmd++, cond_resched(), addr != end);
1838
1839 return addr;
1840 }
1841
zap_pud_range(struct mmu_gather * tlb,struct vm_area_struct * vma,p4d_t * p4d,unsigned long addr,unsigned long end,struct zap_details * details)1842 static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1843 struct vm_area_struct *vma, p4d_t *p4d,
1844 unsigned long addr, unsigned long end,
1845 struct zap_details *details)
1846 {
1847 pud_t *pud;
1848 unsigned long next;
1849
1850 pud = pud_offset(p4d, addr);
1851 do {
1852 next = pud_addr_end(addr, end);
1853 if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
1854 if (next - addr != HPAGE_PUD_SIZE) {
1855 mmap_assert_locked(tlb->mm);
1856 split_huge_pud(vma, pud, addr);
1857 } else if (zap_huge_pud(tlb, vma, pud, addr))
1858 goto next;
1859 /* fall through */
1860 }
1861 if (pud_none_or_clear_bad(pud))
1862 continue;
1863 next = zap_pmd_range(tlb, vma, pud, addr, next, details);
1864 next:
1865 cond_resched();
1866 } while (pud++, addr = next, addr != end);
1867
1868 return addr;
1869 }
1870
zap_p4d_range(struct mmu_gather * tlb,struct vm_area_struct * vma,pgd_t * pgd,unsigned long addr,unsigned long end,struct zap_details * details)1871 static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
1872 struct vm_area_struct *vma, pgd_t *pgd,
1873 unsigned long addr, unsigned long end,
1874 struct zap_details *details)
1875 {
1876 p4d_t *p4d;
1877 unsigned long next;
1878
1879 p4d = p4d_offset(pgd, addr);
1880 do {
1881 next = p4d_addr_end(addr, end);
1882 if (p4d_none_or_clear_bad(p4d))
1883 continue;
1884 next = zap_pud_range(tlb, vma, p4d, addr, next, details);
1885 } while (p4d++, addr = next, addr != end);
1886
1887 return addr;
1888 }
1889
unmap_page_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long addr,unsigned long end,struct zap_details * details)1890 void unmap_page_range(struct mmu_gather *tlb,
1891 struct vm_area_struct *vma,
1892 unsigned long addr, unsigned long end,
1893 struct zap_details *details)
1894 {
1895 pgd_t *pgd;
1896 unsigned long next;
1897
1898 BUG_ON(addr >= end);
1899 tlb_start_vma(tlb, vma);
1900 pgd = pgd_offset(vma->vm_mm, addr);
1901 do {
1902 next = pgd_addr_end(addr, end);
1903 if (pgd_none_or_clear_bad(pgd))
1904 continue;
1905 next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
1906 } while (pgd++, addr = next, addr != end);
1907 tlb_end_vma(tlb, vma);
1908 }
1909
1910
unmap_single_vma(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start_addr,unsigned long end_addr,struct zap_details * details,bool mm_wr_locked)1911 static void unmap_single_vma(struct mmu_gather *tlb,
1912 struct vm_area_struct *vma, unsigned long start_addr,
1913 unsigned long end_addr,
1914 struct zap_details *details, bool mm_wr_locked)
1915 {
1916 unsigned long start = max(vma->vm_start, start_addr);
1917 unsigned long end;
1918
1919 if (start >= vma->vm_end)
1920 return;
1921 end = min(vma->vm_end, end_addr);
1922 if (end <= vma->vm_start)
1923 return;
1924
1925 if (vma->vm_file)
1926 uprobe_munmap(vma, start, end);
1927
1928 if (unlikely(vma->vm_flags & VM_PFNMAP))
1929 untrack_pfn(vma, 0, 0, mm_wr_locked);
1930
1931 if (start != end) {
1932 if (unlikely(is_vm_hugetlb_page(vma))) {
1933 /*
1934 * It is undesirable to test vma->vm_file as it
1935 * should be non-null for valid hugetlb area.
1936 * However, vm_file will be NULL in the error
1937 * cleanup path of mmap_region. When
1938 * hugetlbfs ->mmap method fails,
1939 * mmap_region() nullifies vma->vm_file
1940 * before calling this function to clean up.
1941 * Since no pte has actually been setup, it is
1942 * safe to do nothing in this case.
1943 */
1944 if (vma->vm_file) {
1945 zap_flags_t zap_flags = details ?
1946 details->zap_flags : 0;
1947 __unmap_hugepage_range(tlb, vma, start, end,
1948 NULL, zap_flags);
1949 }
1950 } else
1951 unmap_page_range(tlb, vma, start, end, details);
1952 }
1953 }
1954
1955 /**
1956 * unmap_vmas - unmap a range of memory covered by a list of vma's
1957 * @tlb: address of the caller's struct mmu_gather
1958 * @mas: the maple state
1959 * @vma: the starting vma
1960 * @start_addr: virtual address at which to start unmapping
1961 * @end_addr: virtual address at which to end unmapping
1962 * @tree_end: The maximum index to check
1963 * @mm_wr_locked: lock flag
1964 *
1965 * Unmap all pages in the vma list.
1966 *
1967 * Only addresses between `start' and `end' will be unmapped.
1968 *
1969 * The VMA list must be sorted in ascending virtual address order.
1970 *
1971 * unmap_vmas() assumes that the caller will flush the whole unmapped address
1972 * range after unmap_vmas() returns. So the only responsibility here is to
1973 * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1974 * drops the lock and schedules.
1975 */
unmap_vmas(struct mmu_gather * tlb,struct ma_state * mas,struct vm_area_struct * vma,unsigned long start_addr,unsigned long end_addr,unsigned long tree_end,bool mm_wr_locked)1976 void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
1977 struct vm_area_struct *vma, unsigned long start_addr,
1978 unsigned long end_addr, unsigned long tree_end,
1979 bool mm_wr_locked)
1980 {
1981 struct mmu_notifier_range range;
1982 struct zap_details details = {
1983 .zap_flags = ZAP_FLAG_DROP_MARKER | ZAP_FLAG_UNMAP,
1984 /* Careful - we need to zap private pages too! */
1985 .even_cows = true,
1986 };
1987
1988 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm,
1989 start_addr, end_addr);
1990 mmu_notifier_invalidate_range_start(&range);
1991 do {
1992 unsigned long start = start_addr;
1993 unsigned long end = end_addr;
1994 hugetlb_zap_begin(vma, &start, &end);
1995 unmap_single_vma(tlb, vma, start, end, &details,
1996 mm_wr_locked);
1997 hugetlb_zap_end(vma, &details);
1998 vma = mas_find(mas, tree_end - 1);
1999 } while (vma && likely(!xa_is_zero(vma)));
2000 mmu_notifier_invalidate_range_end(&range);
2001 }
2002
2003 /**
2004 * zap_page_range_single - remove user pages in a given range
2005 * @vma: vm_area_struct holding the applicable pages
2006 * @address: starting address of pages to zap
2007 * @size: number of bytes to zap
2008 * @details: details of shared cache invalidation
2009 *
2010 * The range must fit into one VMA.
2011 */
zap_page_range_single(struct vm_area_struct * vma,unsigned long address,unsigned long size,struct zap_details * details)2012 void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
2013 unsigned long size, struct zap_details *details)
2014 {
2015 const unsigned long end = address + size;
2016 struct mmu_notifier_range range;
2017 struct mmu_gather tlb;
2018
2019 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2020 address, end);
2021 hugetlb_zap_begin(vma, &range.start, &range.end);
2022 tlb_gather_mmu(&tlb, vma->vm_mm);
2023 update_hiwater_rss(vma->vm_mm);
2024 mmu_notifier_invalidate_range_start(&range);
2025 /*
2026 * unmap 'address-end' not 'range.start-range.end' as range
2027 * could have been expanded for hugetlb pmd sharing.
2028 */
2029 unmap_single_vma(&tlb, vma, address, end, details, false);
2030 mmu_notifier_invalidate_range_end(&range);
2031 tlb_finish_mmu(&tlb);
2032 hugetlb_zap_end(vma, details);
2033 }
2034
2035 /**
2036 * zap_vma_ptes - remove ptes mapping the vma
2037 * @vma: vm_area_struct holding ptes to be zapped
2038 * @address: starting address of pages to zap
2039 * @size: number of bytes to zap
2040 *
2041 * This function only unmaps ptes assigned to VM_PFNMAP vmas.
2042 *
2043 * The entire address range must be fully contained within the vma.
2044 *
2045 */
zap_vma_ptes(struct vm_area_struct * vma,unsigned long address,unsigned long size)2046 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
2047 unsigned long size)
2048 {
2049 if (!range_in_vma(vma, address, address + size) ||
2050 !(vma->vm_flags & VM_PFNMAP))
2051 return;
2052
2053 zap_page_range_single(vma, address, size, NULL);
2054 }
2055 EXPORT_SYMBOL_GPL(zap_vma_ptes);
2056
walk_to_pmd(struct mm_struct * mm,unsigned long addr)2057 static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
2058 {
2059 pgd_t *pgd;
2060 p4d_t *p4d;
2061 pud_t *pud;
2062 pmd_t *pmd;
2063
2064 pgd = pgd_offset(mm, addr);
2065 p4d = p4d_alloc(mm, pgd, addr);
2066 if (!p4d)
2067 return NULL;
2068 pud = pud_alloc(mm, p4d, addr);
2069 if (!pud)
2070 return NULL;
2071 pmd = pmd_alloc(mm, pud, addr);
2072 if (!pmd)
2073 return NULL;
2074
2075 VM_BUG_ON(pmd_trans_huge(*pmd));
2076 return pmd;
2077 }
2078
__get_locked_pte(struct mm_struct * mm,unsigned long addr,spinlock_t ** ptl)2079 pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
2080 spinlock_t **ptl)
2081 {
2082 pmd_t *pmd = walk_to_pmd(mm, addr);
2083
2084 if (!pmd)
2085 return NULL;
2086 return pte_alloc_map_lock(mm, pmd, addr, ptl);
2087 }
2088
vm_mixed_zeropage_allowed(struct vm_area_struct * vma)2089 static bool vm_mixed_zeropage_allowed(struct vm_area_struct *vma)
2090 {
2091 VM_WARN_ON_ONCE(vma->vm_flags & VM_PFNMAP);
2092 /*
2093 * Whoever wants to forbid the zeropage after some zeropages
2094 * might already have been mapped has to scan the page tables and
2095 * bail out on any zeropages. Zeropages in COW mappings can
2096 * be unshared using FAULT_FLAG_UNSHARE faults.
2097 */
2098 if (mm_forbids_zeropage(vma->vm_mm))
2099 return false;
2100 /* zeropages in COW mappings are common and unproblematic. */
2101 if (is_cow_mapping(vma->vm_flags))
2102 return true;
2103 /* Mappings that do not allow for writable PTEs are unproblematic. */
2104 if (!(vma->vm_flags & (VM_WRITE | VM_MAYWRITE)))
2105 return true;
2106 /*
2107 * Why not allow any VMA that has vm_ops->pfn_mkwrite? GUP could
2108 * find the shared zeropage and longterm-pin it, which would
2109 * be problematic as soon as the zeropage gets replaced by a different
2110 * page due to vma->vm_ops->pfn_mkwrite, because what's mapped would
2111 * now differ to what GUP looked up. FSDAX is incompatible to
2112 * FOLL_LONGTERM and VM_IO is incompatible to GUP completely (see
2113 * check_vma_flags).
2114 */
2115 return vma->vm_ops && vma->vm_ops->pfn_mkwrite &&
2116 (vma_is_fsdax(vma) || vma->vm_flags & VM_IO);
2117 }
2118
validate_page_before_insert(struct vm_area_struct * vma,struct page * page)2119 static int validate_page_before_insert(struct vm_area_struct *vma,
2120 struct page *page)
2121 {
2122 struct folio *folio = page_folio(page);
2123
2124 if (!folio_ref_count(folio))
2125 return -EINVAL;
2126 if (unlikely(is_zero_folio(folio))) {
2127 if (!vm_mixed_zeropage_allowed(vma))
2128 return -EINVAL;
2129 return 0;
2130 }
2131 if (folio_test_anon(folio) || folio_test_slab(folio) ||
2132 page_has_type(page))
2133 return -EINVAL;
2134 flush_dcache_folio(folio);
2135 return 0;
2136 }
2137
insert_page_into_pte_locked(struct vm_area_struct * vma,pte_t * pte,unsigned long addr,struct page * page,pgprot_t prot)2138 static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte,
2139 unsigned long addr, struct page *page, pgprot_t prot)
2140 {
2141 struct folio *folio = page_folio(page);
2142 pte_t pteval;
2143
2144 if (!pte_none(ptep_get(pte)))
2145 return -EBUSY;
2146 /* Ok, finally just insert the thing.. */
2147 pteval = mk_pte(page, prot);
2148 if (unlikely(is_zero_folio(folio))) {
2149 pteval = pte_mkspecial(pteval);
2150 } else {
2151 folio_get(folio);
2152 inc_mm_counter(vma->vm_mm, mm_counter_file(folio));
2153 folio_add_file_rmap_pte(folio, page, vma);
2154 }
2155 set_pte_at(vma->vm_mm, addr, pte, pteval);
2156 return 0;
2157 }
2158
insert_page(struct vm_area_struct * vma,unsigned long addr,struct page * page,pgprot_t prot)2159 static int insert_page(struct vm_area_struct *vma, unsigned long addr,
2160 struct page *page, pgprot_t prot)
2161 {
2162 int retval;
2163 pte_t *pte;
2164 spinlock_t *ptl;
2165
2166 retval = validate_page_before_insert(vma, page);
2167 if (retval)
2168 goto out;
2169 retval = -ENOMEM;
2170 pte = get_locked_pte(vma->vm_mm, addr, &ptl);
2171 if (!pte)
2172 goto out;
2173 retval = insert_page_into_pte_locked(vma, pte, addr, page, prot);
2174 pte_unmap_unlock(pte, ptl);
2175 out:
2176 return retval;
2177 }
2178
insert_page_in_batch_locked(struct vm_area_struct * vma,pte_t * pte,unsigned long addr,struct page * page,pgprot_t prot)2179 static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte,
2180 unsigned long addr, struct page *page, pgprot_t prot)
2181 {
2182 int err;
2183
2184 err = validate_page_before_insert(vma, page);
2185 if (err)
2186 return err;
2187 return insert_page_into_pte_locked(vma, pte, addr, page, prot);
2188 }
2189
2190 /* insert_pages() amortizes the cost of spinlock operations
2191 * when inserting pages in a loop.
2192 */
insert_pages(struct vm_area_struct * vma,unsigned long addr,struct page ** pages,unsigned long * num,pgprot_t prot)2193 static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
2194 struct page **pages, unsigned long *num, pgprot_t prot)
2195 {
2196 pmd_t *pmd = NULL;
2197 pte_t *start_pte, *pte;
2198 spinlock_t *pte_lock;
2199 struct mm_struct *const mm = vma->vm_mm;
2200 unsigned long curr_page_idx = 0;
2201 unsigned long remaining_pages_total = *num;
2202 unsigned long pages_to_write_in_pmd;
2203 int ret;
2204 more:
2205 ret = -EFAULT;
2206 pmd = walk_to_pmd(mm, addr);
2207 if (!pmd)
2208 goto out;
2209
2210 pages_to_write_in_pmd = min_t(unsigned long,
2211 remaining_pages_total, PTRS_PER_PTE - pte_index(addr));
2212
2213 /* Allocate the PTE if necessary; takes PMD lock once only. */
2214 ret = -ENOMEM;
2215 if (pte_alloc(mm, pmd))
2216 goto out;
2217
2218 while (pages_to_write_in_pmd) {
2219 int pte_idx = 0;
2220 const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
2221
2222 start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
2223 if (!start_pte) {
2224 ret = -EFAULT;
2225 goto out;
2226 }
2227 for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
2228 int err = insert_page_in_batch_locked(vma, pte,
2229 addr, pages[curr_page_idx], prot);
2230 if (unlikely(err)) {
2231 pte_unmap_unlock(start_pte, pte_lock);
2232 ret = err;
2233 remaining_pages_total -= pte_idx;
2234 goto out;
2235 }
2236 addr += PAGE_SIZE;
2237 ++curr_page_idx;
2238 }
2239 pte_unmap_unlock(start_pte, pte_lock);
2240 pages_to_write_in_pmd -= batch_size;
2241 remaining_pages_total -= batch_size;
2242 }
2243 if (remaining_pages_total)
2244 goto more;
2245 ret = 0;
2246 out:
2247 *num = remaining_pages_total;
2248 return ret;
2249 }
2250
2251 /**
2252 * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
2253 * @vma: user vma to map to
2254 * @addr: target start user address of these pages
2255 * @pages: source kernel pages
2256 * @num: in: number of pages to map. out: number of pages that were *not*
2257 * mapped. (0 means all pages were successfully mapped).
2258 *
2259 * Preferred over vm_insert_page() when inserting multiple pages.
2260 *
2261 * In case of error, we may have mapped a subset of the provided
2262 * pages. It is the caller's responsibility to account for this case.
2263 *
2264 * The same restrictions apply as in vm_insert_page().
2265 */
vm_insert_pages(struct vm_area_struct * vma,unsigned long addr,struct page ** pages,unsigned long * num)2266 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
2267 struct page **pages, unsigned long *num)
2268 {
2269 const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
2270
2271 if (addr < vma->vm_start || end_addr >= vma->vm_end)
2272 return -EFAULT;
2273 if (!(vma->vm_flags & VM_MIXEDMAP)) {
2274 BUG_ON(mmap_read_trylock(vma->vm_mm));
2275 BUG_ON(vma->vm_flags & VM_PFNMAP);
2276 vm_flags_set(vma, VM_MIXEDMAP);
2277 }
2278 /* Defer page refcount checking till we're about to map that page. */
2279 return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
2280 }
2281 EXPORT_SYMBOL(vm_insert_pages);
2282
2283 /**
2284 * vm_insert_page - insert single page into user vma
2285 * @vma: user vma to map to
2286 * @addr: target user address of this page
2287 * @page: source kernel page
2288 *
2289 * This allows drivers to insert individual pages they've allocated
2290 * into a user vma. The zeropage is supported in some VMAs,
2291 * see vm_mixed_zeropage_allowed().
2292 *
2293 * The page has to be a nice clean _individual_ kernel allocation.
2294 * If you allocate a compound page, you need to have marked it as
2295 * such (__GFP_COMP), or manually just split the page up yourself
2296 * (see split_page()).
2297 *
2298 * NOTE! Traditionally this was done with "remap_pfn_range()" which
2299 * took an arbitrary page protection parameter. This doesn't allow
2300 * that. Your vma protection will have to be set up correctly, which
2301 * means that if you want a shared writable mapping, you'd better
2302 * ask for a shared writable mapping!
2303 *
2304 * The page does not need to be reserved.
2305 *
2306 * Usually this function is called from f_op->mmap() handler
2307 * under mm->mmap_lock write-lock, so it can change vma->vm_flags.
2308 * Caller must set VM_MIXEDMAP on vma if it wants to call this
2309 * function from other places, for example from page-fault handler.
2310 *
2311 * Return: %0 on success, negative error code otherwise.
2312 */
vm_insert_page(struct vm_area_struct * vma,unsigned long addr,struct page * page)2313 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
2314 struct page *page)
2315 {
2316 if (addr < vma->vm_start || addr >= vma->vm_end)
2317 return -EFAULT;
2318 if (!(vma->vm_flags & VM_MIXEDMAP)) {
2319 BUG_ON(mmap_read_trylock(vma->vm_mm));
2320 BUG_ON(vma->vm_flags & VM_PFNMAP);
2321 vm_flags_set(vma, VM_MIXEDMAP);
2322 }
2323 return insert_page(vma, addr, page, vma->vm_page_prot);
2324 }
2325 EXPORT_SYMBOL(vm_insert_page);
2326
2327 /*
2328 * __vm_map_pages - maps range of kernel pages into user vma
2329 * @vma: user vma to map to
2330 * @pages: pointer to array of source kernel pages
2331 * @num: number of pages in page array
2332 * @offset: user's requested vm_pgoff
2333 *
2334 * This allows drivers to map range of kernel pages into a user vma.
2335 * The zeropage is supported in some VMAs, see
2336 * vm_mixed_zeropage_allowed().
2337 *
2338 * Return: 0 on success and error code otherwise.
2339 */
__vm_map_pages(struct vm_area_struct * vma,struct page ** pages,unsigned long num,unsigned long offset)2340 static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2341 unsigned long num, unsigned long offset)
2342 {
2343 unsigned long count = vma_pages(vma);
2344 unsigned long uaddr = vma->vm_start;
2345 int ret, i;
2346
2347 /* Fail if the user requested offset is beyond the end of the object */
2348 if (offset >= num)
2349 return -ENXIO;
2350
2351 /* Fail if the user requested size exceeds available object size */
2352 if (count > num - offset)
2353 return -ENXIO;
2354
2355 for (i = 0; i < count; i++) {
2356 ret = vm_insert_page(vma, uaddr, pages[offset + i]);
2357 if (ret < 0)
2358 return ret;
2359 uaddr += PAGE_SIZE;
2360 }
2361
2362 return 0;
2363 }
2364
2365 /**
2366 * vm_map_pages - maps range of kernel pages starts with non zero offset
2367 * @vma: user vma to map to
2368 * @pages: pointer to array of source kernel pages
2369 * @num: number of pages in page array
2370 *
2371 * Maps an object consisting of @num pages, catering for the user's
2372 * requested vm_pgoff
2373 *
2374 * If we fail to insert any page into the vma, the function will return
2375 * immediately leaving any previously inserted pages present. Callers
2376 * from the mmap handler may immediately return the error as their caller
2377 * will destroy the vma, removing any successfully inserted pages. Other
2378 * callers should make their own arrangements for calling unmap_region().
2379 *
2380 * Context: Process context. Called by mmap handlers.
2381 * Return: 0 on success and error code otherwise.
2382 */
vm_map_pages(struct vm_area_struct * vma,struct page ** pages,unsigned long num)2383 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2384 unsigned long num)
2385 {
2386 return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
2387 }
2388 EXPORT_SYMBOL(vm_map_pages);
2389
2390 /**
2391 * vm_map_pages_zero - map range of kernel pages starts with zero offset
2392 * @vma: user vma to map to
2393 * @pages: pointer to array of source kernel pages
2394 * @num: number of pages in page array
2395 *
2396 * Similar to vm_map_pages(), except that it explicitly sets the offset
2397 * to 0. This function is intended for the drivers that did not consider
2398 * vm_pgoff.
2399 *
2400 * Context: Process context. Called by mmap handlers.
2401 * Return: 0 on success and error code otherwise.
2402 */
vm_map_pages_zero(struct vm_area_struct * vma,struct page ** pages,unsigned long num)2403 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
2404 unsigned long num)
2405 {
2406 return __vm_map_pages(vma, pages, num, 0);
2407 }
2408 EXPORT_SYMBOL(vm_map_pages_zero);
2409
insert_pfn(struct vm_area_struct * vma,unsigned long addr,pfn_t pfn,pgprot_t prot,bool mkwrite)2410 static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2411 pfn_t pfn, pgprot_t prot, bool mkwrite)
2412 {
2413 struct mm_struct *mm = vma->vm_mm;
2414 pte_t *pte, entry;
2415 spinlock_t *ptl;
2416
2417 pte = get_locked_pte(mm, addr, &ptl);
2418 if (!pte)
2419 return VM_FAULT_OOM;
2420 entry = ptep_get(pte);
2421 if (!pte_none(entry)) {
2422 if (mkwrite) {
2423 /*
2424 * For read faults on private mappings the PFN passed
2425 * in may not match the PFN we have mapped if the
2426 * mapped PFN is a writeable COW page. In the mkwrite
2427 * case we are creating a writable PTE for a shared
2428 * mapping and we expect the PFNs to match. If they
2429 * don't match, we are likely racing with block
2430 * allocation and mapping invalidation so just skip the
2431 * update.
2432 */
2433 if (pte_pfn(entry) != pfn_t_to_pfn(pfn)) {
2434 WARN_ON_ONCE(!is_zero_pfn(pte_pfn(entry)));
2435 goto out_unlock;
2436 }
2437 entry = pte_mkyoung(entry);
2438 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2439 if (ptep_set_access_flags(vma, addr, pte, entry, 1))
2440 update_mmu_cache(vma, addr, pte);
2441 }
2442 goto out_unlock;
2443 }
2444
2445 /* Ok, finally just insert the thing.. */
2446 if (pfn_t_devmap(pfn))
2447 entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
2448 else
2449 entry = pte_mkspecial(pfn_t_pte(pfn, prot));
2450
2451 if (mkwrite) {
2452 entry = pte_mkyoung(entry);
2453 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2454 }
2455
2456 set_pte_at(mm, addr, pte, entry);
2457 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
2458
2459 out_unlock:
2460 pte_unmap_unlock(pte, ptl);
2461 return VM_FAULT_NOPAGE;
2462 }
2463
2464 /**
2465 * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
2466 * @vma: user vma to map to
2467 * @addr: target user address of this page
2468 * @pfn: source kernel pfn
2469 * @pgprot: pgprot flags for the inserted page
2470 *
2471 * This is exactly like vmf_insert_pfn(), except that it allows drivers
2472 * to override pgprot on a per-page basis.
2473 *
2474 * This only makes sense for IO mappings, and it makes no sense for
2475 * COW mappings. In general, using multiple vmas is preferable;
2476 * vmf_insert_pfn_prot should only be used if using multiple VMAs is
2477 * impractical.
2478 *
2479 * pgprot typically only differs from @vma->vm_page_prot when drivers set
2480 * caching- and encryption bits different than those of @vma->vm_page_prot,
2481 * because the caching- or encryption mode may not be known at mmap() time.
2482 *
2483 * This is ok as long as @vma->vm_page_prot is not used by the core vm
2484 * to set caching and encryption bits for those vmas (except for COW pages).
2485 * This is ensured by core vm only modifying these page table entries using
2486 * functions that don't touch caching- or encryption bits, using pte_modify()
2487 * if needed. (See for example mprotect()).
2488 *
2489 * Also when new page-table entries are created, this is only done using the
2490 * fault() callback, and never using the value of vma->vm_page_prot,
2491 * except for page-table entries that point to anonymous pages as the result
2492 * of COW.
2493 *
2494 * Context: Process context. May allocate using %GFP_KERNEL.
2495 * Return: vm_fault_t value.
2496 */
vmf_insert_pfn_prot(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,pgprot_t pgprot)2497 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2498 unsigned long pfn, pgprot_t pgprot)
2499 {
2500 /*
2501 * Technically, architectures with pte_special can avoid all these
2502 * restrictions (same for remap_pfn_range). However we would like
2503 * consistency in testing and feature parity among all, so we should
2504 * try to keep these invariants in place for everybody.
2505 */
2506 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
2507 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
2508 (VM_PFNMAP|VM_MIXEDMAP));
2509 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
2510 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
2511
2512 if (addr < vma->vm_start || addr >= vma->vm_end)
2513 return VM_FAULT_SIGBUS;
2514
2515 if (!pfn_modify_allowed(pfn, pgprot))
2516 return VM_FAULT_SIGBUS;
2517
2518 track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
2519
2520 return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
2521 false);
2522 }
2523 EXPORT_SYMBOL(vmf_insert_pfn_prot);
2524
2525 /**
2526 * vmf_insert_pfn - insert single pfn into user vma
2527 * @vma: user vma to map to
2528 * @addr: target user address of this page
2529 * @pfn: source kernel pfn
2530 *
2531 * Similar to vm_insert_page, this allows drivers to insert individual pages
2532 * they've allocated into a user vma. Same comments apply.
2533 *
2534 * This function should only be called from a vm_ops->fault handler, and
2535 * in that case the handler should return the result of this function.
2536 *
2537 * vma cannot be a COW mapping.
2538 *
2539 * As this is called only for pages that do not currently exist, we
2540 * do not need to flush old virtual caches or the TLB.
2541 *
2542 * Context: Process context. May allocate using %GFP_KERNEL.
2543 * Return: vm_fault_t value.
2544 */
vmf_insert_pfn(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn)2545 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2546 unsigned long pfn)
2547 {
2548 return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
2549 }
2550 EXPORT_SYMBOL(vmf_insert_pfn);
2551
vm_mixed_ok(struct vm_area_struct * vma,pfn_t pfn,bool mkwrite)2552 static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn, bool mkwrite)
2553 {
2554 if (unlikely(is_zero_pfn(pfn_t_to_pfn(pfn))) &&
2555 (mkwrite || !vm_mixed_zeropage_allowed(vma)))
2556 return false;
2557 /* these checks mirror the abort conditions in vm_normal_page */
2558 if (vma->vm_flags & VM_MIXEDMAP)
2559 return true;
2560 if (pfn_t_devmap(pfn))
2561 return true;
2562 if (pfn_t_special(pfn))
2563 return true;
2564 if (is_zero_pfn(pfn_t_to_pfn(pfn)))
2565 return true;
2566 return false;
2567 }
2568
__vm_insert_mixed(struct vm_area_struct * vma,unsigned long addr,pfn_t pfn,bool mkwrite)2569 static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
2570 unsigned long addr, pfn_t pfn, bool mkwrite)
2571 {
2572 pgprot_t pgprot = vma->vm_page_prot;
2573 int err;
2574
2575 if (!vm_mixed_ok(vma, pfn, mkwrite))
2576 return VM_FAULT_SIGBUS;
2577
2578 if (addr < vma->vm_start || addr >= vma->vm_end)
2579 return VM_FAULT_SIGBUS;
2580
2581 track_pfn_insert(vma, &pgprot, pfn);
2582
2583 if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
2584 return VM_FAULT_SIGBUS;
2585
2586 /*
2587 * If we don't have pte special, then we have to use the pfn_valid()
2588 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
2589 * refcount the page if pfn_valid is true (hence insert_page rather
2590 * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP
2591 * without pte special, it would there be refcounted as a normal page.
2592 */
2593 if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
2594 !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
2595 struct page *page;
2596
2597 /*
2598 * At this point we are committed to insert_page()
2599 * regardless of whether the caller specified flags that
2600 * result in pfn_t_has_page() == false.
2601 */
2602 page = pfn_to_page(pfn_t_to_pfn(pfn));
2603 err = insert_page(vma, addr, page, pgprot);
2604 } else {
2605 return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
2606 }
2607
2608 if (err == -ENOMEM)
2609 return VM_FAULT_OOM;
2610 if (err < 0 && err != -EBUSY)
2611 return VM_FAULT_SIGBUS;
2612
2613 return VM_FAULT_NOPAGE;
2614 }
2615
vmf_insert_mixed(struct vm_area_struct * vma,unsigned long addr,pfn_t pfn)2616 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2617 pfn_t pfn)
2618 {
2619 return __vm_insert_mixed(vma, addr, pfn, false);
2620 }
2621 EXPORT_SYMBOL(vmf_insert_mixed);
2622
2623 /*
2624 * If the insertion of PTE failed because someone else already added a
2625 * different entry in the mean time, we treat that as success as we assume
2626 * the same entry was actually inserted.
2627 */
vmf_insert_mixed_mkwrite(struct vm_area_struct * vma,unsigned long addr,pfn_t pfn)2628 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2629 unsigned long addr, pfn_t pfn)
2630 {
2631 return __vm_insert_mixed(vma, addr, pfn, true);
2632 }
2633
2634 /*
2635 * maps a range of physical memory into the requested pages. the old
2636 * mappings are removed. any references to nonexistent pages results
2637 * in null mappings (currently treated as "copy-on-access")
2638 */
remap_pte_range(struct mm_struct * mm,pmd_t * pmd,unsigned long addr,unsigned long end,unsigned long pfn,pgprot_t prot)2639 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
2640 unsigned long addr, unsigned long end,
2641 unsigned long pfn, pgprot_t prot)
2642 {
2643 pte_t *pte, *mapped_pte;
2644 spinlock_t *ptl;
2645 int err = 0;
2646
2647 mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
2648 if (!pte)
2649 return -ENOMEM;
2650 arch_enter_lazy_mmu_mode();
2651 do {
2652 BUG_ON(!pte_none(ptep_get(pte)));
2653 if (!pfn_modify_allowed(pfn, prot)) {
2654 err = -EACCES;
2655 break;
2656 }
2657 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
2658 pfn++;
2659 } while (pte++, addr += PAGE_SIZE, addr != end);
2660 arch_leave_lazy_mmu_mode();
2661 pte_unmap_unlock(mapped_pte, ptl);
2662 return err;
2663 }
2664
remap_pmd_range(struct mm_struct * mm,pud_t * pud,unsigned long addr,unsigned long end,unsigned long pfn,pgprot_t prot)2665 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
2666 unsigned long addr, unsigned long end,
2667 unsigned long pfn, pgprot_t prot)
2668 {
2669 pmd_t *pmd;
2670 unsigned long next;
2671 int err;
2672
2673 pfn -= addr >> PAGE_SHIFT;
2674 pmd = pmd_alloc(mm, pud, addr);
2675 if (!pmd)
2676 return -ENOMEM;
2677 VM_BUG_ON(pmd_trans_huge(*pmd));
2678 do {
2679 next = pmd_addr_end(addr, end);
2680 err = remap_pte_range(mm, pmd, addr, next,
2681 pfn + (addr >> PAGE_SHIFT), prot);
2682 if (err)
2683 return err;
2684 } while (pmd++, addr = next, addr != end);
2685 return 0;
2686 }
2687
remap_pud_range(struct mm_struct * mm,p4d_t * p4d,unsigned long addr,unsigned long end,unsigned long pfn,pgprot_t prot)2688 static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
2689 unsigned long addr, unsigned long end,
2690 unsigned long pfn, pgprot_t prot)
2691 {
2692 pud_t *pud;
2693 unsigned long next;
2694 int err;
2695
2696 pfn -= addr >> PAGE_SHIFT;
2697 pud = pud_alloc(mm, p4d, addr);
2698 if (!pud)
2699 return -ENOMEM;
2700 do {
2701 next = pud_addr_end(addr, end);
2702 err = remap_pmd_range(mm, pud, addr, next,
2703 pfn + (addr >> PAGE_SHIFT), prot);
2704 if (err)
2705 return err;
2706 } while (pud++, addr = next, addr != end);
2707 return 0;
2708 }
2709
remap_p4d_range(struct mm_struct * mm,pgd_t * pgd,unsigned long addr,unsigned long end,unsigned long pfn,pgprot_t prot)2710 static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2711 unsigned long addr, unsigned long end,
2712 unsigned long pfn, pgprot_t prot)
2713 {
2714 p4d_t *p4d;
2715 unsigned long next;
2716 int err;
2717
2718 pfn -= addr >> PAGE_SHIFT;
2719 p4d = p4d_alloc(mm, pgd, addr);
2720 if (!p4d)
2721 return -ENOMEM;
2722 do {
2723 next = p4d_addr_end(addr, end);
2724 err = remap_pud_range(mm, p4d, addr, next,
2725 pfn + (addr >> PAGE_SHIFT), prot);
2726 if (err)
2727 return err;
2728 } while (p4d++, addr = next, addr != end);
2729 return 0;
2730 }
2731
remap_pfn_range_internal(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,unsigned long size,pgprot_t prot)2732 static int remap_pfn_range_internal(struct vm_area_struct *vma, unsigned long addr,
2733 unsigned long pfn, unsigned long size, pgprot_t prot)
2734 {
2735 pgd_t *pgd;
2736 unsigned long next;
2737 unsigned long end = addr + PAGE_ALIGN(size);
2738 struct mm_struct *mm = vma->vm_mm;
2739 int err;
2740
2741 if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
2742 return -EINVAL;
2743
2744 /*
2745 * Physically remapped pages are special. Tell the
2746 * rest of the world about it:
2747 * VM_IO tells people not to look at these pages
2748 * (accesses can have side effects).
2749 * VM_PFNMAP tells the core MM that the base pages are just
2750 * raw PFN mappings, and do not have a "struct page" associated
2751 * with them.
2752 * VM_DONTEXPAND
2753 * Disable vma merging and expanding with mremap().
2754 * VM_DONTDUMP
2755 * Omit vma from core dump, even when VM_IO turned off.
2756 *
2757 * There's a horrible special case to handle copy-on-write
2758 * behaviour that some programs depend on. We mark the "original"
2759 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
2760 * See vm_normal_page() for details.
2761 */
2762 if (is_cow_mapping(vma->vm_flags)) {
2763 if (addr != vma->vm_start || end != vma->vm_end)
2764 return -EINVAL;
2765 vma->vm_pgoff = pfn;
2766 }
2767
2768 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
2769
2770 BUG_ON(addr >= end);
2771 pfn -= addr >> PAGE_SHIFT;
2772 pgd = pgd_offset(mm, addr);
2773 flush_cache_range(vma, addr, end);
2774 do {
2775 next = pgd_addr_end(addr, end);
2776 err = remap_p4d_range(mm, pgd, addr, next,
2777 pfn + (addr >> PAGE_SHIFT), prot);
2778 if (err)
2779 return err;
2780 } while (pgd++, addr = next, addr != end);
2781
2782 return 0;
2783 }
2784
2785 /*
2786 * Variant of remap_pfn_range that does not call track_pfn_remap. The caller
2787 * must have pre-validated the caching bits of the pgprot_t.
2788 */
remap_pfn_range_notrack(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,unsigned long size,pgprot_t prot)2789 int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
2790 unsigned long pfn, unsigned long size, pgprot_t prot)
2791 {
2792 int error = remap_pfn_range_internal(vma, addr, pfn, size, prot);
2793
2794 if (!error)
2795 return 0;
2796
2797 /*
2798 * A partial pfn range mapping is dangerous: it does not
2799 * maintain page reference counts, and callers may free
2800 * pages due to the error. So zap it early.
2801 */
2802 zap_page_range_single(vma, addr, size, NULL);
2803 return error;
2804 }
2805
2806 /**
2807 * remap_pfn_range - remap kernel memory to userspace
2808 * @vma: user vma to map to
2809 * @addr: target page aligned user address to start at
2810 * @pfn: page frame number of kernel physical memory address
2811 * @size: size of mapping area
2812 * @prot: page protection flags for this mapping
2813 *
2814 * Note: this is only safe if the mm semaphore is held when called.
2815 *
2816 * Return: %0 on success, negative error code otherwise.
2817 */
remap_pfn_range(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,unsigned long size,pgprot_t prot)2818 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
2819 unsigned long pfn, unsigned long size, pgprot_t prot)
2820 {
2821 int err;
2822
2823 err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size));
2824 if (err)
2825 return -EINVAL;
2826
2827 err = remap_pfn_range_notrack(vma, addr, pfn, size, prot);
2828 if (err)
2829 untrack_pfn(vma, pfn, PAGE_ALIGN(size), true);
2830 return err;
2831 }
2832 EXPORT_SYMBOL(remap_pfn_range);
2833
2834 /**
2835 * vm_iomap_memory - remap memory to userspace
2836 * @vma: user vma to map to
2837 * @start: start of the physical memory to be mapped
2838 * @len: size of area
2839 *
2840 * This is a simplified io_remap_pfn_range() for common driver use. The
2841 * driver just needs to give us the physical memory range to be mapped,
2842 * we'll figure out the rest from the vma information.
2843 *
2844 * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
2845 * whatever write-combining details or similar.
2846 *
2847 * Return: %0 on success, negative error code otherwise.
2848 */
vm_iomap_memory(struct vm_area_struct * vma,phys_addr_t start,unsigned long len)2849 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
2850 {
2851 unsigned long vm_len, pfn, pages;
2852
2853 /* Check that the physical memory area passed in looks valid */
2854 if (start + len < start)
2855 return -EINVAL;
2856 /*
2857 * You *really* shouldn't map things that aren't page-aligned,
2858 * but we've historically allowed it because IO memory might
2859 * just have smaller alignment.
2860 */
2861 len += start & ~PAGE_MASK;
2862 pfn = start >> PAGE_SHIFT;
2863 pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
2864 if (pfn + pages < pfn)
2865 return -EINVAL;
2866
2867 /* We start the mapping 'vm_pgoff' pages into the area */
2868 if (vma->vm_pgoff > pages)
2869 return -EINVAL;
2870 pfn += vma->vm_pgoff;
2871 pages -= vma->vm_pgoff;
2872
2873 /* Can we fit all of the mapping? */
2874 vm_len = vma->vm_end - vma->vm_start;
2875 if (vm_len >> PAGE_SHIFT > pages)
2876 return -EINVAL;
2877
2878 /* Ok, let it rip */
2879 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
2880 }
2881 EXPORT_SYMBOL(vm_iomap_memory);
2882
apply_to_pte_range(struct mm_struct * mm,pmd_t * pmd,unsigned long addr,unsigned long end,pte_fn_t fn,void * data,bool create,pgtbl_mod_mask * mask)2883 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2884 unsigned long addr, unsigned long end,
2885 pte_fn_t fn, void *data, bool create,
2886 pgtbl_mod_mask *mask)
2887 {
2888 pte_t *pte, *mapped_pte;
2889 int err = 0;
2890 spinlock_t *ptl;
2891
2892 if (create) {
2893 mapped_pte = pte = (mm == &init_mm) ?
2894 pte_alloc_kernel_track(pmd, addr, mask) :
2895 pte_alloc_map_lock(mm, pmd, addr, &ptl);
2896 if (!pte)
2897 return -ENOMEM;
2898 } else {
2899 mapped_pte = pte = (mm == &init_mm) ?
2900 pte_offset_kernel(pmd, addr) :
2901 pte_offset_map_lock(mm, pmd, addr, &ptl);
2902 if (!pte)
2903 return -EINVAL;
2904 }
2905
2906 arch_enter_lazy_mmu_mode();
2907
2908 if (fn) {
2909 do {
2910 if (create || !pte_none(ptep_get(pte))) {
2911 err = fn(pte++, addr, data);
2912 if (err)
2913 break;
2914 }
2915 } while (addr += PAGE_SIZE, addr != end);
2916 }
2917 *mask |= PGTBL_PTE_MODIFIED;
2918
2919 arch_leave_lazy_mmu_mode();
2920
2921 if (mm != &init_mm)
2922 pte_unmap_unlock(mapped_pte, ptl);
2923 return err;
2924 }
2925
apply_to_pmd_range(struct mm_struct * mm,pud_t * pud,unsigned long addr,unsigned long end,pte_fn_t fn,void * data,bool create,pgtbl_mod_mask * mask)2926 static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
2927 unsigned long addr, unsigned long end,
2928 pte_fn_t fn, void *data, bool create,
2929 pgtbl_mod_mask *mask)
2930 {
2931 pmd_t *pmd;
2932 unsigned long next;
2933 int err = 0;
2934
2935 BUG_ON(pud_leaf(*pud));
2936
2937 if (create) {
2938 pmd = pmd_alloc_track(mm, pud, addr, mask);
2939 if (!pmd)
2940 return -ENOMEM;
2941 } else {
2942 pmd = pmd_offset(pud, addr);
2943 }
2944 do {
2945 next = pmd_addr_end(addr, end);
2946 if (pmd_none(*pmd) && !create)
2947 continue;
2948 if (WARN_ON_ONCE(pmd_leaf(*pmd)))
2949 return -EINVAL;
2950 if (!pmd_none(*pmd) && WARN_ON_ONCE(pmd_bad(*pmd))) {
2951 if (!create)
2952 continue;
2953 pmd_clear_bad(pmd);
2954 }
2955 err = apply_to_pte_range(mm, pmd, addr, next,
2956 fn, data, create, mask);
2957 if (err)
2958 break;
2959 } while (pmd++, addr = next, addr != end);
2960
2961 return err;
2962 }
2963
apply_to_pud_range(struct mm_struct * mm,p4d_t * p4d,unsigned long addr,unsigned long end,pte_fn_t fn,void * data,bool create,pgtbl_mod_mask * mask)2964 static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
2965 unsigned long addr, unsigned long end,
2966 pte_fn_t fn, void *data, bool create,
2967 pgtbl_mod_mask *mask)
2968 {
2969 pud_t *pud;
2970 unsigned long next;
2971 int err = 0;
2972
2973 if (create) {
2974 pud = pud_alloc_track(mm, p4d, addr, mask);
2975 if (!pud)
2976 return -ENOMEM;
2977 } else {
2978 pud = pud_offset(p4d, addr);
2979 }
2980 do {
2981 next = pud_addr_end(addr, end);
2982 if (pud_none(*pud) && !create)
2983 continue;
2984 if (WARN_ON_ONCE(pud_leaf(*pud)))
2985 return -EINVAL;
2986 if (!pud_none(*pud) && WARN_ON_ONCE(pud_bad(*pud))) {
2987 if (!create)
2988 continue;
2989 pud_clear_bad(pud);
2990 }
2991 err = apply_to_pmd_range(mm, pud, addr, next,
2992 fn, data, create, mask);
2993 if (err)
2994 break;
2995 } while (pud++, addr = next, addr != end);
2996
2997 return err;
2998 }
2999
apply_to_p4d_range(struct mm_struct * mm,pgd_t * pgd,unsigned long addr,unsigned long end,pte_fn_t fn,void * data,bool create,pgtbl_mod_mask * mask)3000 static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
3001 unsigned long addr, unsigned long end,
3002 pte_fn_t fn, void *data, bool create,
3003 pgtbl_mod_mask *mask)
3004 {
3005 p4d_t *p4d;
3006 unsigned long next;
3007 int err = 0;
3008
3009 if (create) {
3010 p4d = p4d_alloc_track(mm, pgd, addr, mask);
3011 if (!p4d)
3012 return -ENOMEM;
3013 } else {
3014 p4d = p4d_offset(pgd, addr);
3015 }
3016 do {
3017 next = p4d_addr_end(addr, end);
3018 if (p4d_none(*p4d) && !create)
3019 continue;
3020 if (WARN_ON_ONCE(p4d_leaf(*p4d)))
3021 return -EINVAL;
3022 if (!p4d_none(*p4d) && WARN_ON_ONCE(p4d_bad(*p4d))) {
3023 if (!create)
3024 continue;
3025 p4d_clear_bad(p4d);
3026 }
3027 err = apply_to_pud_range(mm, p4d, addr, next,
3028 fn, data, create, mask);
3029 if (err)
3030 break;
3031 } while (p4d++, addr = next, addr != end);
3032
3033 return err;
3034 }
3035
__apply_to_page_range(struct mm_struct * mm,unsigned long addr,unsigned long size,pte_fn_t fn,void * data,bool create)3036 static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
3037 unsigned long size, pte_fn_t fn,
3038 void *data, bool create)
3039 {
3040 pgd_t *pgd;
3041 unsigned long start = addr, next;
3042 unsigned long end = addr + size;
3043 pgtbl_mod_mask mask = 0;
3044 int err = 0;
3045
3046 if (WARN_ON(addr >= end))
3047 return -EINVAL;
3048
3049 pgd = pgd_offset(mm, addr);
3050 do {
3051 next = pgd_addr_end(addr, end);
3052 if (pgd_none(*pgd) && !create)
3053 continue;
3054 if (WARN_ON_ONCE(pgd_leaf(*pgd))) {
3055 err = -EINVAL;
3056 break;
3057 }
3058 if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) {
3059 if (!create)
3060 continue;
3061 pgd_clear_bad(pgd);
3062 }
3063 err = apply_to_p4d_range(mm, pgd, addr, next,
3064 fn, data, create, &mask);
3065 if (err)
3066 break;
3067 } while (pgd++, addr = next, addr != end);
3068
3069 if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
3070 arch_sync_kernel_mappings(start, start + size);
3071
3072 return err;
3073 }
3074
3075 /*
3076 * Scan a region of virtual memory, filling in page tables as necessary
3077 * and calling a provided function on each leaf page table.
3078 */
apply_to_page_range(struct mm_struct * mm,unsigned long addr,unsigned long size,pte_fn_t fn,void * data)3079 int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
3080 unsigned long size, pte_fn_t fn, void *data)
3081 {
3082 return __apply_to_page_range(mm, addr, size, fn, data, true);
3083 }
3084 EXPORT_SYMBOL_GPL(apply_to_page_range);
3085
3086 /*
3087 * Scan a region of virtual memory, calling a provided function on
3088 * each leaf page table where it exists.
3089 *
3090 * Unlike apply_to_page_range, this does _not_ fill in page tables
3091 * where they are absent.
3092 */
apply_to_existing_page_range(struct mm_struct * mm,unsigned long addr,unsigned long size,pte_fn_t fn,void * data)3093 int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
3094 unsigned long size, pte_fn_t fn, void *data)
3095 {
3096 return __apply_to_page_range(mm, addr, size, fn, data, false);
3097 }
3098
3099 /*
3100 * handle_pte_fault chooses page fault handler according to an entry which was
3101 * read non-atomically. Before making any commitment, on those architectures
3102 * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
3103 * parts, do_swap_page must check under lock before unmapping the pte and
3104 * proceeding (but do_wp_page is only called after already making such a check;
3105 * and do_anonymous_page can safely check later on).
3106 */
pte_unmap_same(struct vm_fault * vmf)3107 static inline int pte_unmap_same(struct vm_fault *vmf)
3108 {
3109 int same = 1;
3110 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION)
3111 if (sizeof(pte_t) > sizeof(unsigned long)) {
3112 spin_lock(vmf->ptl);
3113 same = pte_same(ptep_get(vmf->pte), vmf->orig_pte);
3114 spin_unlock(vmf->ptl);
3115 }
3116 #endif
3117 pte_unmap(vmf->pte);
3118 vmf->pte = NULL;
3119 return same;
3120 }
3121
3122 /*
3123 * Return:
3124 * 0: copied succeeded
3125 * -EHWPOISON: copy failed due to hwpoison in source page
3126 * -EAGAIN: copied failed (some other reason)
3127 */
__wp_page_copy_user(struct page * dst,struct page * src,struct vm_fault * vmf)3128 static inline int __wp_page_copy_user(struct page *dst, struct page *src,
3129 struct vm_fault *vmf)
3130 {
3131 int ret;
3132 void *kaddr;
3133 void __user *uaddr;
3134 struct vm_area_struct *vma = vmf->vma;
3135 struct mm_struct *mm = vma->vm_mm;
3136 unsigned long addr = vmf->address;
3137
3138 if (likely(src)) {
3139 if (copy_mc_user_highpage(dst, src, addr, vma))
3140 return -EHWPOISON;
3141 return 0;
3142 }
3143
3144 /*
3145 * If the source page was a PFN mapping, we don't have
3146 * a "struct page" for it. We do a best-effort copy by
3147 * just copying from the original user address. If that
3148 * fails, we just zero-fill it. Live with it.
3149 */
3150 kaddr = kmap_local_page(dst);
3151 pagefault_disable();
3152 uaddr = (void __user *)(addr & PAGE_MASK);
3153
3154 /*
3155 * On architectures with software "accessed" bits, we would
3156 * take a double page fault, so mark it accessed here.
3157 */
3158 vmf->pte = NULL;
3159 if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) {
3160 pte_t entry;
3161
3162 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
3163 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
3164 /*
3165 * Other thread has already handled the fault
3166 * and update local tlb only
3167 */
3168 if (vmf->pte)
3169 update_mmu_tlb(vma, addr, vmf->pte);
3170 ret = -EAGAIN;
3171 goto pte_unlock;
3172 }
3173
3174 entry = pte_mkyoung(vmf->orig_pte);
3175 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
3176 update_mmu_cache_range(vmf, vma, addr, vmf->pte, 1);
3177 }
3178
3179 /*
3180 * This really shouldn't fail, because the page is there
3181 * in the page tables. But it might just be unreadable,
3182 * in which case we just give up and fill the result with
3183 * zeroes.
3184 */
3185 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
3186 if (vmf->pte)
3187 goto warn;
3188
3189 /* Re-validate under PTL if the page is still mapped */
3190 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
3191 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
3192 /* The PTE changed under us, update local tlb */
3193 if (vmf->pte)
3194 update_mmu_tlb(vma, addr, vmf->pte);
3195 ret = -EAGAIN;
3196 goto pte_unlock;
3197 }
3198
3199 /*
3200 * The same page can be mapped back since last copy attempt.
3201 * Try to copy again under PTL.
3202 */
3203 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
3204 /*
3205 * Give a warn in case there can be some obscure
3206 * use-case
3207 */
3208 warn:
3209 WARN_ON_ONCE(1);
3210 clear_page(kaddr);
3211 }
3212 }
3213
3214 ret = 0;
3215
3216 pte_unlock:
3217 if (vmf->pte)
3218 pte_unmap_unlock(vmf->pte, vmf->ptl);
3219 pagefault_enable();
3220 kunmap_local(kaddr);
3221 flush_dcache_page(dst);
3222
3223 return ret;
3224 }
3225
__get_fault_gfp_mask(struct vm_area_struct * vma)3226 static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
3227 {
3228 struct file *vm_file = vma->vm_file;
3229
3230 if (vm_file)
3231 return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
3232
3233 /*
3234 * Special mappings (e.g. VDSO) do not have any file so fake
3235 * a default GFP_KERNEL for them.
3236 */
3237 return GFP_KERNEL;
3238 }
3239
3240 /*
3241 * Notify the address space that the page is about to become writable so that
3242 * it can prohibit this or wait for the page to get into an appropriate state.
3243 *
3244 * We do this without the lock held, so that it can sleep if it needs to.
3245 */
do_page_mkwrite(struct vm_fault * vmf,struct folio * folio)3246 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf, struct folio *folio)
3247 {
3248 vm_fault_t ret;
3249 unsigned int old_flags = vmf->flags;
3250
3251 vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
3252
3253 if (vmf->vma->vm_file &&
3254 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
3255 return VM_FAULT_SIGBUS;
3256
3257 ret = vmf->vma->vm_ops->page_mkwrite(vmf);
3258 /* Restore original flags so that caller is not surprised */
3259 vmf->flags = old_flags;
3260 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
3261 return ret;
3262 if (unlikely(!(ret & VM_FAULT_LOCKED))) {
3263 folio_lock(folio);
3264 if (!folio->mapping) {
3265 folio_unlock(folio);
3266 return 0; /* retry */
3267 }
3268 ret |= VM_FAULT_LOCKED;
3269 } else
3270 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
3271 return ret;
3272 }
3273
3274 /*
3275 * Handle dirtying of a page in shared file mapping on a write fault.
3276 *
3277 * The function expects the page to be locked and unlocks it.
3278 */
fault_dirty_shared_page(struct vm_fault * vmf)3279 static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
3280 {
3281 struct vm_area_struct *vma = vmf->vma;
3282 struct address_space *mapping;
3283 struct folio *folio = page_folio(vmf->page);
3284 bool dirtied;
3285 bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
3286
3287 dirtied = folio_mark_dirty(folio);
3288 VM_BUG_ON_FOLIO(folio_test_anon(folio), folio);
3289 /*
3290 * Take a local copy of the address_space - folio.mapping may be zeroed
3291 * by truncate after folio_unlock(). The address_space itself remains
3292 * pinned by vma->vm_file's reference. We rely on folio_unlock()'s
3293 * release semantics to prevent the compiler from undoing this copying.
3294 */
3295 mapping = folio_raw_mapping(folio);
3296 folio_unlock(folio);
3297
3298 if (!page_mkwrite)
3299 file_update_time(vma->vm_file);
3300
3301 /*
3302 * Throttle page dirtying rate down to writeback speed.
3303 *
3304 * mapping may be NULL here because some device drivers do not
3305 * set page.mapping but still dirty their pages
3306 *
3307 * Drop the mmap_lock before waiting on IO, if we can. The file
3308 * is pinning the mapping, as per above.
3309 */
3310 if ((dirtied || page_mkwrite) && mapping) {
3311 struct file *fpin;
3312
3313 fpin = maybe_unlock_mmap_for_io(vmf, NULL);
3314 balance_dirty_pages_ratelimited(mapping);
3315 if (fpin) {
3316 fput(fpin);
3317 return VM_FAULT_COMPLETED;
3318 }
3319 }
3320
3321 return 0;
3322 }
3323
3324 /*
3325 * Handle write page faults for pages that can be reused in the current vma
3326 *
3327 * This can happen either due to the mapping being with the VM_SHARED flag,
3328 * or due to us being the last reference standing to the page. In either
3329 * case, all we need to do here is to mark the page as writable and update
3330 * any related book-keeping.
3331 */
wp_page_reuse(struct vm_fault * vmf,struct folio * folio)3332 static inline void wp_page_reuse(struct vm_fault *vmf, struct folio *folio)
3333 __releases(vmf->ptl)
3334 {
3335 struct vm_area_struct *vma = vmf->vma;
3336 pte_t entry;
3337
3338 VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE));
3339 VM_WARN_ON(is_zero_pfn(pte_pfn(vmf->orig_pte)));
3340
3341 if (folio) {
3342 VM_BUG_ON(folio_test_anon(folio) &&
3343 !PageAnonExclusive(vmf->page));
3344 /*
3345 * Clear the folio's cpupid information as the existing
3346 * information potentially belongs to a now completely
3347 * unrelated process.
3348 */
3349 folio_xchg_last_cpupid(folio, (1 << LAST_CPUPID_SHIFT) - 1);
3350 }
3351
3352 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3353 entry = pte_mkyoung(vmf->orig_pte);
3354 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3355 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
3356 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
3357 pte_unmap_unlock(vmf->pte, vmf->ptl);
3358 count_vm_event(PGREUSE);
3359 }
3360
3361 /*
3362 * We could add a bitflag somewhere, but for now, we know that all
3363 * vm_ops that have a ->map_pages have been audited and don't need
3364 * the mmap_lock to be held.
3365 */
vmf_can_call_fault(const struct vm_fault * vmf)3366 static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf)
3367 {
3368 struct vm_area_struct *vma = vmf->vma;
3369
3370 if (vma->vm_ops->map_pages || !(vmf->flags & FAULT_FLAG_VMA_LOCK))
3371 return 0;
3372 vma_end_read(vma);
3373 return VM_FAULT_RETRY;
3374 }
3375
3376 /**
3377 * __vmf_anon_prepare - Prepare to handle an anonymous fault.
3378 * @vmf: The vm_fault descriptor passed from the fault handler.
3379 *
3380 * When preparing to insert an anonymous page into a VMA from a
3381 * fault handler, call this function rather than anon_vma_prepare().
3382 * If this vma does not already have an associated anon_vma and we are
3383 * only protected by the per-VMA lock, the caller must retry with the
3384 * mmap_lock held. __anon_vma_prepare() will look at adjacent VMAs to
3385 * determine if this VMA can share its anon_vma, and that's not safe to
3386 * do with only the per-VMA lock held for this VMA.
3387 *
3388 * Return: 0 if fault handling can proceed. Any other value should be
3389 * returned to the caller.
3390 */
__vmf_anon_prepare(struct vm_fault * vmf)3391 vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf)
3392 {
3393 struct vm_area_struct *vma = vmf->vma;
3394 vm_fault_t ret = 0;
3395
3396 if (likely(vma->anon_vma))
3397 return 0;
3398 if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
3399 if (!mmap_read_trylock(vma->vm_mm))
3400 return VM_FAULT_RETRY;
3401 }
3402 if (__anon_vma_prepare(vma))
3403 ret = VM_FAULT_OOM;
3404 if (vmf->flags & FAULT_FLAG_VMA_LOCK)
3405 mmap_read_unlock(vma->vm_mm);
3406 return ret;
3407 }
3408
3409 /*
3410 * Handle the case of a page which we actually need to copy to a new page,
3411 * either due to COW or unsharing.
3412 *
3413 * Called with mmap_lock locked and the old page referenced, but
3414 * without the ptl held.
3415 *
3416 * High level logic flow:
3417 *
3418 * - Allocate a page, copy the content of the old page to the new one.
3419 * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
3420 * - Take the PTL. If the pte changed, bail out and release the allocated page
3421 * - If the pte is still the way we remember it, update the page table and all
3422 * relevant references. This includes dropping the reference the page-table
3423 * held to the old page, as well as updating the rmap.
3424 * - In any case, unlock the PTL and drop the reference we took to the old page.
3425 */
wp_page_copy(struct vm_fault * vmf)3426 static vm_fault_t wp_page_copy(struct vm_fault *vmf)
3427 {
3428 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
3429 struct vm_area_struct *vma = vmf->vma;
3430 struct mm_struct *mm = vma->vm_mm;
3431 struct folio *old_folio = NULL;
3432 struct folio *new_folio = NULL;
3433 pte_t entry;
3434 int page_copied = 0;
3435 struct mmu_notifier_range range;
3436 vm_fault_t ret;
3437 bool pfn_is_zero;
3438
3439 delayacct_wpcopy_start();
3440
3441 if (vmf->page)
3442 old_folio = page_folio(vmf->page);
3443 ret = vmf_anon_prepare(vmf);
3444 if (unlikely(ret))
3445 goto out;
3446
3447 pfn_is_zero = is_zero_pfn(pte_pfn(vmf->orig_pte));
3448 new_folio = folio_prealloc(mm, vma, vmf->address, pfn_is_zero);
3449 if (!new_folio)
3450 goto oom;
3451
3452 if (!pfn_is_zero) {
3453 int err;
3454
3455 err = __wp_page_copy_user(&new_folio->page, vmf->page, vmf);
3456 if (err) {
3457 /*
3458 * COW failed, if the fault was solved by other,
3459 * it's fine. If not, userspace would re-fault on
3460 * the same address and we will handle the fault
3461 * from the second attempt.
3462 * The -EHWPOISON case will not be retried.
3463 */
3464 folio_put(new_folio);
3465 if (old_folio)
3466 folio_put(old_folio);
3467
3468 delayacct_wpcopy_end();
3469 return err == -EHWPOISON ? VM_FAULT_HWPOISON : 0;
3470 }
3471 kmsan_copy_page_meta(&new_folio->page, vmf->page);
3472 }
3473
3474 __folio_mark_uptodate(new_folio);
3475
3476 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
3477 vmf->address & PAGE_MASK,
3478 (vmf->address & PAGE_MASK) + PAGE_SIZE);
3479 mmu_notifier_invalidate_range_start(&range);
3480
3481 /*
3482 * Re-check the pte - we dropped the lock
3483 */
3484 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
3485 if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
3486 if (old_folio) {
3487 if (!folio_test_anon(old_folio)) {
3488 dec_mm_counter(mm, mm_counter_file(old_folio));
3489 inc_mm_counter(mm, MM_ANONPAGES);
3490 }
3491 } else {
3492 ksm_might_unmap_zero_page(mm, vmf->orig_pte);
3493 inc_mm_counter(mm, MM_ANONPAGES);
3494 }
3495 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3496 entry = mk_pte(&new_folio->page, vma->vm_page_prot);
3497 entry = pte_sw_mkyoung(entry);
3498 if (unlikely(unshare)) {
3499 if (pte_soft_dirty(vmf->orig_pte))
3500 entry = pte_mksoft_dirty(entry);
3501 if (pte_uffd_wp(vmf->orig_pte))
3502 entry = pte_mkuffd_wp(entry);
3503 } else {
3504 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3505 }
3506
3507 /*
3508 * Clear the pte entry and flush it first, before updating the
3509 * pte with the new entry, to keep TLBs on different CPUs in
3510 * sync. This code used to set the new PTE then flush TLBs, but
3511 * that left a window where the new PTE could be loaded into
3512 * some TLBs while the old PTE remains in others.
3513 */
3514 ptep_clear_flush(vma, vmf->address, vmf->pte);
3515 folio_add_new_anon_rmap(new_folio, vma, vmf->address, RMAP_EXCLUSIVE);
3516 folio_add_lru_vma(new_folio, vma);
3517 BUG_ON(unshare && pte_write(entry));
3518 set_pte_at(mm, vmf->address, vmf->pte, entry);
3519 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
3520 if (old_folio) {
3521 /*
3522 * Only after switching the pte to the new page may
3523 * we remove the mapcount here. Otherwise another
3524 * process may come and find the rmap count decremented
3525 * before the pte is switched to the new page, and
3526 * "reuse" the old page writing into it while our pte
3527 * here still points into it and can be read by other
3528 * threads.
3529 *
3530 * The critical issue is to order this
3531 * folio_remove_rmap_pte() with the ptp_clear_flush
3532 * above. Those stores are ordered by (if nothing else,)
3533 * the barrier present in the atomic_add_negative
3534 * in folio_remove_rmap_pte();
3535 *
3536 * Then the TLB flush in ptep_clear_flush ensures that
3537 * no process can access the old page before the
3538 * decremented mapcount is visible. And the old page
3539 * cannot be reused until after the decremented
3540 * mapcount is visible. So transitively, TLBs to
3541 * old page will be flushed before it can be reused.
3542 */
3543 folio_remove_rmap_pte(old_folio, vmf->page, vma);
3544 }
3545
3546 /* Free the old page.. */
3547 new_folio = old_folio;
3548 page_copied = 1;
3549 pte_unmap_unlock(vmf->pte, vmf->ptl);
3550 } else if (vmf->pte) {
3551 update_mmu_tlb(vma, vmf->address, vmf->pte);
3552 pte_unmap_unlock(vmf->pte, vmf->ptl);
3553 }
3554
3555 mmu_notifier_invalidate_range_end(&range);
3556
3557 if (new_folio)
3558 folio_put(new_folio);
3559 if (old_folio) {
3560 if (page_copied)
3561 free_swap_cache(old_folio);
3562 folio_put(old_folio);
3563 }
3564
3565 delayacct_wpcopy_end();
3566 return 0;
3567 oom:
3568 ret = VM_FAULT_OOM;
3569 out:
3570 if (old_folio)
3571 folio_put(old_folio);
3572
3573 delayacct_wpcopy_end();
3574 return ret;
3575 }
3576
3577 /**
3578 * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
3579 * writeable once the page is prepared
3580 *
3581 * @vmf: structure describing the fault
3582 * @folio: the folio of vmf->page
3583 *
3584 * This function handles all that is needed to finish a write page fault in a
3585 * shared mapping due to PTE being read-only once the mapped page is prepared.
3586 * It handles locking of PTE and modifying it.
3587 *
3588 * The function expects the page to be locked or other protection against
3589 * concurrent faults / writeback (such as DAX radix tree locks).
3590 *
3591 * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before
3592 * we acquired PTE lock.
3593 */
finish_mkwrite_fault(struct vm_fault * vmf,struct folio * folio)3594 static vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf, struct folio *folio)
3595 {
3596 WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
3597 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
3598 &vmf->ptl);
3599 if (!vmf->pte)
3600 return VM_FAULT_NOPAGE;
3601 /*
3602 * We might have raced with another page fault while we released the
3603 * pte_offset_map_lock.
3604 */
3605 if (!pte_same(ptep_get(vmf->pte), vmf->orig_pte)) {
3606 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
3607 pte_unmap_unlock(vmf->pte, vmf->ptl);
3608 return VM_FAULT_NOPAGE;
3609 }
3610 wp_page_reuse(vmf, folio);
3611 return 0;
3612 }
3613
3614 /*
3615 * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
3616 * mapping
3617 */
wp_pfn_shared(struct vm_fault * vmf)3618 static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
3619 {
3620 struct vm_area_struct *vma = vmf->vma;
3621
3622 if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
3623 vm_fault_t ret;
3624
3625 pte_unmap_unlock(vmf->pte, vmf->ptl);
3626 ret = vmf_can_call_fault(vmf);
3627 if (ret)
3628 return ret;
3629
3630 vmf->flags |= FAULT_FLAG_MKWRITE;
3631 ret = vma->vm_ops->pfn_mkwrite(vmf);
3632 if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
3633 return ret;
3634 return finish_mkwrite_fault(vmf, NULL);
3635 }
3636 wp_page_reuse(vmf, NULL);
3637 return 0;
3638 }
3639
wp_page_shared(struct vm_fault * vmf,struct folio * folio)3640 static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio)
3641 __releases(vmf->ptl)
3642 {
3643 struct vm_area_struct *vma = vmf->vma;
3644 vm_fault_t ret = 0;
3645
3646 folio_get(folio);
3647
3648 if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
3649 vm_fault_t tmp;
3650
3651 pte_unmap_unlock(vmf->pte, vmf->ptl);
3652 tmp = vmf_can_call_fault(vmf);
3653 if (tmp) {
3654 folio_put(folio);
3655 return tmp;
3656 }
3657
3658 tmp = do_page_mkwrite(vmf, folio);
3659 if (unlikely(!tmp || (tmp &
3660 (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
3661 folio_put(folio);
3662 return tmp;
3663 }
3664 tmp = finish_mkwrite_fault(vmf, folio);
3665 if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
3666 folio_unlock(folio);
3667 folio_put(folio);
3668 return tmp;
3669 }
3670 } else {
3671 wp_page_reuse(vmf, folio);
3672 folio_lock(folio);
3673 }
3674 ret |= fault_dirty_shared_page(vmf);
3675 folio_put(folio);
3676
3677 return ret;
3678 }
3679
wp_can_reuse_anon_folio(struct folio * folio,struct vm_area_struct * vma)3680 static bool wp_can_reuse_anon_folio(struct folio *folio,
3681 struct vm_area_struct *vma)
3682 {
3683 /*
3684 * We could currently only reuse a subpage of a large folio if no
3685 * other subpages of the large folios are still mapped. However,
3686 * let's just consistently not reuse subpages even if we could
3687 * reuse in that scenario, and give back a large folio a bit
3688 * sooner.
3689 */
3690 if (folio_test_large(folio))
3691 return false;
3692
3693 /*
3694 * We have to verify under folio lock: these early checks are
3695 * just an optimization to avoid locking the folio and freeing
3696 * the swapcache if there is little hope that we can reuse.
3697 *
3698 * KSM doesn't necessarily raise the folio refcount.
3699 */
3700 if (folio_test_ksm(folio) || folio_ref_count(folio) > 3)
3701 return false;
3702 if (!folio_test_lru(folio))
3703 /*
3704 * We cannot easily detect+handle references from
3705 * remote LRU caches or references to LRU folios.
3706 */
3707 lru_add_drain();
3708 if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio))
3709 return false;
3710 if (!folio_trylock(folio))
3711 return false;
3712 if (folio_test_swapcache(folio))
3713 folio_free_swap(folio);
3714 if (folio_test_ksm(folio) || folio_ref_count(folio) != 1) {
3715 folio_unlock(folio);
3716 return false;
3717 }
3718 /*
3719 * Ok, we've got the only folio reference from our mapping
3720 * and the folio is locked, it's dark out, and we're wearing
3721 * sunglasses. Hit it.
3722 */
3723 folio_move_anon_rmap(folio, vma);
3724 folio_unlock(folio);
3725 return true;
3726 }
3727
3728 /*
3729 * This routine handles present pages, when
3730 * * users try to write to a shared page (FAULT_FLAG_WRITE)
3731 * * GUP wants to take a R/O pin on a possibly shared anonymous page
3732 * (FAULT_FLAG_UNSHARE)
3733 *
3734 * It is done by copying the page to a new address and decrementing the
3735 * shared-page counter for the old page.
3736 *
3737 * Note that this routine assumes that the protection checks have been
3738 * done by the caller (the low-level page fault routine in most cases).
3739 * Thus, with FAULT_FLAG_WRITE, we can safely just mark it writable once we've
3740 * done any necessary COW.
3741 *
3742 * In case of FAULT_FLAG_WRITE, we also mark the page dirty at this point even
3743 * though the page will change only once the write actually happens. This
3744 * avoids a few races, and potentially makes it more efficient.
3745 *
3746 * We enter with non-exclusive mmap_lock (to exclude vma changes,
3747 * but allow concurrent faults), with pte both mapped and locked.
3748 * We return with mmap_lock still held, but pte unmapped and unlocked.
3749 */
do_wp_page(struct vm_fault * vmf)3750 static vm_fault_t do_wp_page(struct vm_fault *vmf)
3751 __releases(vmf->ptl)
3752 {
3753 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
3754 struct vm_area_struct *vma = vmf->vma;
3755 struct folio *folio = NULL;
3756 pte_t pte;
3757
3758 if (likely(!unshare)) {
3759 if (userfaultfd_pte_wp(vma, ptep_get(vmf->pte))) {
3760 if (!userfaultfd_wp_async(vma)) {
3761 pte_unmap_unlock(vmf->pte, vmf->ptl);
3762 return handle_userfault(vmf, VM_UFFD_WP);
3763 }
3764
3765 /*
3766 * Nothing needed (cache flush, TLB invalidations,
3767 * etc.) because we're only removing the uffd-wp bit,
3768 * which is completely invisible to the user.
3769 */
3770 pte = pte_clear_uffd_wp(ptep_get(vmf->pte));
3771
3772 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
3773 /*
3774 * Update this to be prepared for following up CoW
3775 * handling
3776 */
3777 vmf->orig_pte = pte;
3778 }
3779
3780 /*
3781 * Userfaultfd write-protect can defer flushes. Ensure the TLB
3782 * is flushed in this case before copying.
3783 */
3784 if (unlikely(userfaultfd_wp(vmf->vma) &&
3785 mm_tlb_flush_pending(vmf->vma->vm_mm)))
3786 flush_tlb_page(vmf->vma, vmf->address);
3787 }
3788
3789 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
3790
3791 if (vmf->page)
3792 folio = page_folio(vmf->page);
3793
3794 /*
3795 * Shared mapping: we are guaranteed to have VM_WRITE and
3796 * FAULT_FLAG_WRITE set at this point.
3797 */
3798 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
3799 /*
3800 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
3801 * VM_PFNMAP VMA.
3802 *
3803 * We should not cow pages in a shared writeable mapping.
3804 * Just mark the pages writable and/or call ops->pfn_mkwrite.
3805 */
3806 if (!vmf->page)
3807 return wp_pfn_shared(vmf);
3808 return wp_page_shared(vmf, folio);
3809 }
3810
3811 /*
3812 * Private mapping: create an exclusive anonymous page copy if reuse
3813 * is impossible. We might miss VM_WRITE for FOLL_FORCE handling.
3814 *
3815 * If we encounter a page that is marked exclusive, we must reuse
3816 * the page without further checks.
3817 */
3818 if (folio && folio_test_anon(folio) &&
3819 (PageAnonExclusive(vmf->page) || wp_can_reuse_anon_folio(folio, vma))) {
3820 if (!PageAnonExclusive(vmf->page))
3821 SetPageAnonExclusive(vmf->page);
3822 if (unlikely(unshare)) {
3823 pte_unmap_unlock(vmf->pte, vmf->ptl);
3824 return 0;
3825 }
3826 wp_page_reuse(vmf, folio);
3827 return 0;
3828 }
3829 /*
3830 * Ok, we need to copy. Oh, well..
3831 */
3832 if (folio)
3833 folio_get(folio);
3834
3835 pte_unmap_unlock(vmf->pte, vmf->ptl);
3836 #ifdef CONFIG_KSM
3837 if (folio && folio_test_ksm(folio))
3838 count_vm_event(COW_KSM);
3839 #endif
3840 return wp_page_copy(vmf);
3841 }
3842
unmap_mapping_range_vma(struct vm_area_struct * vma,unsigned long start_addr,unsigned long end_addr,struct zap_details * details)3843 static void unmap_mapping_range_vma(struct vm_area_struct *vma,
3844 unsigned long start_addr, unsigned long end_addr,
3845 struct zap_details *details)
3846 {
3847 zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
3848 }
3849
unmap_mapping_range_tree(struct rb_root_cached * root,pgoff_t first_index,pgoff_t last_index,struct zap_details * details)3850 static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
3851 pgoff_t first_index,
3852 pgoff_t last_index,
3853 struct zap_details *details)
3854 {
3855 struct vm_area_struct *vma;
3856 pgoff_t vba, vea, zba, zea;
3857
3858 vma_interval_tree_foreach(vma, root, first_index, last_index) {
3859 vba = vma->vm_pgoff;
3860 vea = vba + vma_pages(vma) - 1;
3861 zba = max(first_index, vba);
3862 zea = min(last_index, vea);
3863
3864 unmap_mapping_range_vma(vma,
3865 ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
3866 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
3867 details);
3868 }
3869 }
3870
3871 /**
3872 * unmap_mapping_folio() - Unmap single folio from processes.
3873 * @folio: The locked folio to be unmapped.
3874 *
3875 * Unmap this folio from any userspace process which still has it mmaped.
3876 * Typically, for efficiency, the range of nearby pages has already been
3877 * unmapped by unmap_mapping_pages() or unmap_mapping_range(). But once
3878 * truncation or invalidation holds the lock on a folio, it may find that
3879 * the page has been remapped again: and then uses unmap_mapping_folio()
3880 * to unmap it finally.
3881 */
unmap_mapping_folio(struct folio * folio)3882 void unmap_mapping_folio(struct folio *folio)
3883 {
3884 struct address_space *mapping = folio->mapping;
3885 struct zap_details details = { };
3886 pgoff_t first_index;
3887 pgoff_t last_index;
3888
3889 VM_BUG_ON(!folio_test_locked(folio));
3890
3891 first_index = folio->index;
3892 last_index = folio_next_index(folio) - 1;
3893
3894 details.even_cows = false;
3895 details.single_folio = folio;
3896 details.zap_flags = ZAP_FLAG_DROP_MARKER;
3897
3898 i_mmap_lock_read(mapping);
3899 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3900 unmap_mapping_range_tree(&mapping->i_mmap, first_index,
3901 last_index, &details);
3902 i_mmap_unlock_read(mapping);
3903 }
3904
3905 /**
3906 * unmap_mapping_pages() - Unmap pages from processes.
3907 * @mapping: The address space containing pages to be unmapped.
3908 * @start: Index of first page to be unmapped.
3909 * @nr: Number of pages to be unmapped. 0 to unmap to end of file.
3910 * @even_cows: Whether to unmap even private COWed pages.
3911 *
3912 * Unmap the pages in this address space from any userspace process which
3913 * has them mmaped. Generally, you want to remove COWed pages as well when
3914 * a file is being truncated, but not when invalidating pages from the page
3915 * cache.
3916 */
unmap_mapping_pages(struct address_space * mapping,pgoff_t start,pgoff_t nr,bool even_cows)3917 void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
3918 pgoff_t nr, bool even_cows)
3919 {
3920 struct zap_details details = { };
3921 pgoff_t first_index = start;
3922 pgoff_t last_index = start + nr - 1;
3923
3924 details.even_cows = even_cows;
3925 if (last_index < first_index)
3926 last_index = ULONG_MAX;
3927
3928 i_mmap_lock_read(mapping);
3929 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3930 unmap_mapping_range_tree(&mapping->i_mmap, first_index,
3931 last_index, &details);
3932 i_mmap_unlock_read(mapping);
3933 }
3934 EXPORT_SYMBOL_GPL(unmap_mapping_pages);
3935
3936 /**
3937 * unmap_mapping_range - unmap the portion of all mmaps in the specified
3938 * address_space corresponding to the specified byte range in the underlying
3939 * file.
3940 *
3941 * @mapping: the address space containing mmaps to be unmapped.
3942 * @holebegin: byte in first page to unmap, relative to the start of
3943 * the underlying file. This will be rounded down to a PAGE_SIZE
3944 * boundary. Note that this is different from truncate_pagecache(), which
3945 * must keep the partial page. In contrast, we must get rid of
3946 * partial pages.
3947 * @holelen: size of prospective hole in bytes. This will be rounded
3948 * up to a PAGE_SIZE boundary. A holelen of zero truncates to the
3949 * end of the file.
3950 * @even_cows: 1 when truncating a file, unmap even private COWed pages;
3951 * but 0 when invalidating pagecache, don't throw away private data.
3952 */
unmap_mapping_range(struct address_space * mapping,loff_t const holebegin,loff_t const holelen,int even_cows)3953 void unmap_mapping_range(struct address_space *mapping,
3954 loff_t const holebegin, loff_t const holelen, int even_cows)
3955 {
3956 pgoff_t hba = (pgoff_t)(holebegin) >> PAGE_SHIFT;
3957 pgoff_t hlen = ((pgoff_t)(holelen) + PAGE_SIZE - 1) >> PAGE_SHIFT;
3958
3959 /* Check for overflow. */
3960 if (sizeof(holelen) > sizeof(hlen)) {
3961 long long holeend =
3962 (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3963 if (holeend & ~(long long)ULONG_MAX)
3964 hlen = ULONG_MAX - hba + 1;
3965 }
3966
3967 unmap_mapping_pages(mapping, hba, hlen, even_cows);
3968 }
3969 EXPORT_SYMBOL(unmap_mapping_range);
3970
3971 /*
3972 * Restore a potential device exclusive pte to a working pte entry
3973 */
remove_device_exclusive_entry(struct vm_fault * vmf)3974 static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
3975 {
3976 struct folio *folio = page_folio(vmf->page);
3977 struct vm_area_struct *vma = vmf->vma;
3978 struct mmu_notifier_range range;
3979 vm_fault_t ret;
3980
3981 /*
3982 * We need a reference to lock the folio because we don't hold
3983 * the PTL so a racing thread can remove the device-exclusive
3984 * entry and unmap it. If the folio is free the entry must
3985 * have been removed already. If it happens to have already
3986 * been re-allocated after being freed all we do is lock and
3987 * unlock it.
3988 */
3989 if (!folio_try_get(folio))
3990 return 0;
3991
3992 ret = folio_lock_or_retry(folio, vmf);
3993 if (ret) {
3994 folio_put(folio);
3995 return ret;
3996 }
3997 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0,
3998 vma->vm_mm, vmf->address & PAGE_MASK,
3999 (vmf->address & PAGE_MASK) + PAGE_SIZE, NULL);
4000 mmu_notifier_invalidate_range_start(&range);
4001
4002 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
4003 &vmf->ptl);
4004 if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
4005 restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte);
4006
4007 if (vmf->pte)
4008 pte_unmap_unlock(vmf->pte, vmf->ptl);
4009 folio_unlock(folio);
4010 folio_put(folio);
4011
4012 mmu_notifier_invalidate_range_end(&range);
4013 return 0;
4014 }
4015
should_try_to_free_swap(struct folio * folio,struct vm_area_struct * vma,unsigned int fault_flags)4016 static inline bool should_try_to_free_swap(struct folio *folio,
4017 struct vm_area_struct *vma,
4018 unsigned int fault_flags)
4019 {
4020 if (!folio_test_swapcache(folio))
4021 return false;
4022 if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) ||
4023 folio_test_mlocked(folio))
4024 return true;
4025 /*
4026 * If we want to map a page that's in the swapcache writable, we
4027 * have to detect via the refcount if we're really the exclusive
4028 * user. Try freeing the swapcache to get rid of the swapcache
4029 * reference only in case it's likely that we'll be the exlusive user.
4030 */
4031 return (fault_flags & FAULT_FLAG_WRITE) && !folio_test_ksm(folio) &&
4032 folio_ref_count(folio) == (1 + folio_nr_pages(folio));
4033 }
4034
pte_marker_clear(struct vm_fault * vmf)4035 static vm_fault_t pte_marker_clear(struct vm_fault *vmf)
4036 {
4037 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
4038 vmf->address, &vmf->ptl);
4039 if (!vmf->pte)
4040 return 0;
4041 /*
4042 * Be careful so that we will only recover a special uffd-wp pte into a
4043 * none pte. Otherwise it means the pte could have changed, so retry.
4044 *
4045 * This should also cover the case where e.g. the pte changed
4046 * quickly from a PTE_MARKER_UFFD_WP into PTE_MARKER_POISONED.
4047 * So is_pte_marker() check is not enough to safely drop the pte.
4048 */
4049 if (pte_same(vmf->orig_pte, ptep_get(vmf->pte)))
4050 pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte);
4051 pte_unmap_unlock(vmf->pte, vmf->ptl);
4052 return 0;
4053 }
4054
do_pte_missing(struct vm_fault * vmf)4055 static vm_fault_t do_pte_missing(struct vm_fault *vmf)
4056 {
4057 if (vma_is_anonymous(vmf->vma))
4058 return do_anonymous_page(vmf);
4059 else
4060 return do_fault(vmf);
4061 }
4062
4063 /*
4064 * This is actually a page-missing access, but with uffd-wp special pte
4065 * installed. It means this pte was wr-protected before being unmapped.
4066 */
pte_marker_handle_uffd_wp(struct vm_fault * vmf)4067 static vm_fault_t pte_marker_handle_uffd_wp(struct vm_fault *vmf)
4068 {
4069 /*
4070 * Just in case there're leftover special ptes even after the region
4071 * got unregistered - we can simply clear them.
4072 */
4073 if (unlikely(!userfaultfd_wp(vmf->vma)))
4074 return pte_marker_clear(vmf);
4075
4076 return do_pte_missing(vmf);
4077 }
4078
handle_pte_marker(struct vm_fault * vmf)4079 static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
4080 {
4081 swp_entry_t entry = pte_to_swp_entry(vmf->orig_pte);
4082 unsigned long marker = pte_marker_get(entry);
4083
4084 /*
4085 * PTE markers should never be empty. If anything weird happened,
4086 * the best thing to do is to kill the process along with its mm.
4087 */
4088 if (WARN_ON_ONCE(!marker))
4089 return VM_FAULT_SIGBUS;
4090
4091 /* Higher priority than uffd-wp when data corrupted */
4092 if (marker & PTE_MARKER_POISONED)
4093 return VM_FAULT_HWPOISON;
4094
4095 /* Hitting a guard page is always a fatal condition. */
4096 if (marker & PTE_MARKER_GUARD)
4097 return VM_FAULT_SIGSEGV;
4098
4099 if (pte_marker_entry_uffd_wp(entry))
4100 return pte_marker_handle_uffd_wp(vmf);
4101
4102 /* This is an unknown pte marker */
4103 return VM_FAULT_SIGBUS;
4104 }
4105
__alloc_swap_folio(struct vm_fault * vmf)4106 static struct folio *__alloc_swap_folio(struct vm_fault *vmf)
4107 {
4108 struct vm_area_struct *vma = vmf->vma;
4109 struct folio *folio;
4110 swp_entry_t entry;
4111
4112 folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vmf->address);
4113 if (!folio)
4114 return NULL;
4115
4116 entry = pte_to_swp_entry(vmf->orig_pte);
4117 if (mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
4118 GFP_KERNEL, entry)) {
4119 folio_put(folio);
4120 return NULL;
4121 }
4122
4123 return folio;
4124 }
4125
4126 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
non_swapcache_batch(swp_entry_t entry,int max_nr)4127 static inline int non_swapcache_batch(swp_entry_t entry, int max_nr)
4128 {
4129 struct swap_info_struct *si = swp_swap_info(entry);
4130 pgoff_t offset = swp_offset(entry);
4131 int i;
4132
4133 /*
4134 * While allocating a large folio and doing swap_read_folio, which is
4135 * the case the being faulted pte doesn't have swapcache. We need to
4136 * ensure all PTEs have no cache as well, otherwise, we might go to
4137 * swap devices while the content is in swapcache.
4138 */
4139 for (i = 0; i < max_nr; i++) {
4140 if ((si->swap_map[offset + i] & SWAP_HAS_CACHE))
4141 return i;
4142 }
4143
4144 return i;
4145 }
4146
4147 /*
4148 * Check if the PTEs within a range are contiguous swap entries
4149 * and have consistent swapcache, zeromap.
4150 */
can_swapin_thp(struct vm_fault * vmf,pte_t * ptep,int nr_pages)4151 static bool can_swapin_thp(struct vm_fault *vmf, pte_t *ptep, int nr_pages)
4152 {
4153 unsigned long addr;
4154 swp_entry_t entry;
4155 int idx;
4156 pte_t pte;
4157
4158 addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE);
4159 idx = (vmf->address - addr) / PAGE_SIZE;
4160 pte = ptep_get(ptep);
4161
4162 if (!pte_same(pte, pte_move_swp_offset(vmf->orig_pte, -idx)))
4163 return false;
4164 entry = pte_to_swp_entry(pte);
4165 if (swap_pte_batch(ptep, nr_pages, pte) != nr_pages)
4166 return false;
4167
4168 /*
4169 * swap_read_folio() can't handle the case a large folio is hybridly
4170 * from different backends. And they are likely corner cases. Similar
4171 * things might be added once zswap support large folios.
4172 */
4173 if (unlikely(swap_zeromap_batch(entry, nr_pages, NULL) != nr_pages))
4174 return false;
4175 if (unlikely(non_swapcache_batch(entry, nr_pages) != nr_pages))
4176 return false;
4177
4178 return true;
4179 }
4180
thp_swap_suitable_orders(pgoff_t swp_offset,unsigned long addr,unsigned long orders)4181 static inline unsigned long thp_swap_suitable_orders(pgoff_t swp_offset,
4182 unsigned long addr,
4183 unsigned long orders)
4184 {
4185 int order, nr;
4186
4187 order = highest_order(orders);
4188
4189 /*
4190 * To swap in a THP with nr pages, we require that its first swap_offset
4191 * is aligned with that number, as it was when the THP was swapped out.
4192 * This helps filter out most invalid entries.
4193 */
4194 while (orders) {
4195 nr = 1 << order;
4196 if ((addr >> PAGE_SHIFT) % nr == swp_offset % nr)
4197 break;
4198 order = next_order(&orders, order);
4199 }
4200
4201 return orders;
4202 }
4203
alloc_swap_folio(struct vm_fault * vmf)4204 static struct folio *alloc_swap_folio(struct vm_fault *vmf)
4205 {
4206 struct vm_area_struct *vma = vmf->vma;
4207 unsigned long orders;
4208 struct folio *folio;
4209 unsigned long addr;
4210 swp_entry_t entry;
4211 spinlock_t *ptl;
4212 pte_t *pte;
4213 gfp_t gfp;
4214 int order;
4215
4216 /*
4217 * If uffd is active for the vma we need per-page fault fidelity to
4218 * maintain the uffd semantics.
4219 */
4220 if (unlikely(userfaultfd_armed(vma)))
4221 goto fallback;
4222
4223 /*
4224 * A large swapped out folio could be partially or fully in zswap. We
4225 * lack handling for such cases, so fallback to swapping in order-0
4226 * folio.
4227 */
4228 if (!zswap_never_enabled())
4229 goto fallback;
4230
4231 entry = pte_to_swp_entry(vmf->orig_pte);
4232 /*
4233 * Get a list of all the (large) orders below PMD_ORDER that are enabled
4234 * and suitable for swapping THP.
4235 */
4236 orders = thp_vma_allowable_orders(vma, vma->vm_flags,
4237 TVA_IN_PF | TVA_ENFORCE_SYSFS, BIT(PMD_ORDER) - 1);
4238 orders = thp_vma_suitable_orders(vma, vmf->address, orders);
4239 orders = thp_swap_suitable_orders(swp_offset(entry),
4240 vmf->address, orders);
4241
4242 if (!orders)
4243 goto fallback;
4244
4245 pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
4246 vmf->address & PMD_MASK, &ptl);
4247 if (unlikely(!pte))
4248 goto fallback;
4249
4250 /*
4251 * For do_swap_page, find the highest order where the aligned range is
4252 * completely swap entries with contiguous swap offsets.
4253 */
4254 order = highest_order(orders);
4255 while (orders) {
4256 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
4257 if (can_swapin_thp(vmf, pte + pte_index(addr), 1 << order))
4258 break;
4259 order = next_order(&orders, order);
4260 }
4261
4262 pte_unmap_unlock(pte, ptl);
4263
4264 /* Try allocating the highest of the remaining orders. */
4265 gfp = vma_thp_gfp_mask(vma);
4266 while (orders) {
4267 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
4268 folio = vma_alloc_folio(gfp, order, vma, addr);
4269 if (folio) {
4270 if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
4271 gfp, entry))
4272 return folio;
4273 count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK_CHARGE);
4274 folio_put(folio);
4275 }
4276 count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK);
4277 order = next_order(&orders, order);
4278 }
4279
4280 fallback:
4281 return __alloc_swap_folio(vmf);
4282 }
4283 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
alloc_swap_folio(struct vm_fault * vmf)4284 static struct folio *alloc_swap_folio(struct vm_fault *vmf)
4285 {
4286 return __alloc_swap_folio(vmf);
4287 }
4288 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4289
4290 static DECLARE_WAIT_QUEUE_HEAD(swapcache_wq);
4291
4292 /*
4293 * We enter with non-exclusive mmap_lock (to exclude vma changes,
4294 * but allow concurrent faults), and pte mapped but not yet locked.
4295 * We return with pte unmapped and unlocked.
4296 *
4297 * We return with the mmap_lock locked or unlocked in the same cases
4298 * as does filemap_fault().
4299 */
do_swap_page(struct vm_fault * vmf)4300 vm_fault_t do_swap_page(struct vm_fault *vmf)
4301 {
4302 struct vm_area_struct *vma = vmf->vma;
4303 struct folio *swapcache, *folio = NULL;
4304 DECLARE_WAITQUEUE(wait, current);
4305 struct page *page;
4306 struct swap_info_struct *si = NULL;
4307 rmap_t rmap_flags = RMAP_NONE;
4308 bool need_clear_cache = false;
4309 bool exclusive = false;
4310 swp_entry_t entry;
4311 pte_t pte;
4312 vm_fault_t ret = 0;
4313 void *shadow = NULL;
4314 int nr_pages;
4315 unsigned long page_idx;
4316 unsigned long address;
4317 pte_t *ptep;
4318
4319 if (!pte_unmap_same(vmf))
4320 goto out;
4321
4322 entry = pte_to_swp_entry(vmf->orig_pte);
4323 if (unlikely(non_swap_entry(entry))) {
4324 if (is_migration_entry(entry)) {
4325 migration_entry_wait(vma->vm_mm, vmf->pmd,
4326 vmf->address);
4327 } else if (is_device_exclusive_entry(entry)) {
4328 vmf->page = pfn_swap_entry_to_page(entry);
4329 ret = remove_device_exclusive_entry(vmf);
4330 } else if (is_device_private_entry(entry)) {
4331 if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
4332 /*
4333 * migrate_to_ram is not yet ready to operate
4334 * under VMA lock.
4335 */
4336 vma_end_read(vma);
4337 ret = VM_FAULT_RETRY;
4338 goto out;
4339 }
4340
4341 vmf->page = pfn_swap_entry_to_page(entry);
4342 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4343 vmf->address, &vmf->ptl);
4344 if (unlikely(!vmf->pte ||
4345 !pte_same(ptep_get(vmf->pte),
4346 vmf->orig_pte)))
4347 goto unlock;
4348
4349 /*
4350 * Get a page reference while we know the page can't be
4351 * freed.
4352 */
4353 get_page(vmf->page);
4354 pte_unmap_unlock(vmf->pte, vmf->ptl);
4355 ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
4356 put_page(vmf->page);
4357 } else if (is_hwpoison_entry(entry)) {
4358 ret = VM_FAULT_HWPOISON;
4359 } else if (is_pte_marker_entry(entry)) {
4360 ret = handle_pte_marker(vmf);
4361 } else {
4362 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
4363 ret = VM_FAULT_SIGBUS;
4364 }
4365 goto out;
4366 }
4367
4368 /* Prevent swapoff from happening to us. */
4369 si = get_swap_device(entry);
4370 if (unlikely(!si))
4371 goto out;
4372
4373 folio = swap_cache_get_folio(entry, vma, vmf->address);
4374 if (folio)
4375 page = folio_file_page(folio, swp_offset(entry));
4376 swapcache = folio;
4377
4378 if (!folio) {
4379 if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
4380 __swap_count(entry) == 1) {
4381 /* skip swapcache */
4382 folio = alloc_swap_folio(vmf);
4383 if (folio) {
4384 __folio_set_locked(folio);
4385 __folio_set_swapbacked(folio);
4386
4387 nr_pages = folio_nr_pages(folio);
4388 if (folio_test_large(folio))
4389 entry.val = ALIGN_DOWN(entry.val, nr_pages);
4390 /*
4391 * Prevent parallel swapin from proceeding with
4392 * the cache flag. Otherwise, another thread
4393 * may finish swapin first, free the entry, and
4394 * swapout reusing the same entry. It's
4395 * undetectable as pte_same() returns true due
4396 * to entry reuse.
4397 */
4398 if (swapcache_prepare(entry, nr_pages)) {
4399 /*
4400 * Relax a bit to prevent rapid
4401 * repeated page faults.
4402 */
4403 add_wait_queue(&swapcache_wq, &wait);
4404 schedule_timeout_uninterruptible(1);
4405 remove_wait_queue(&swapcache_wq, &wait);
4406 goto out_page;
4407 }
4408 need_clear_cache = true;
4409
4410 mem_cgroup_swapin_uncharge_swap(entry, nr_pages);
4411
4412 shadow = get_shadow_from_swap_cache(entry);
4413 if (shadow)
4414 workingset_refault(folio, shadow);
4415
4416 folio_add_lru(folio);
4417
4418 /* To provide entry to swap_read_folio() */
4419 folio->swap = entry;
4420 swap_read_folio(folio, NULL);
4421 folio->private = NULL;
4422 }
4423 } else {
4424 folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
4425 vmf);
4426 swapcache = folio;
4427 }
4428
4429 if (!folio) {
4430 /*
4431 * Back out if somebody else faulted in this pte
4432 * while we released the pte lock.
4433 */
4434 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4435 vmf->address, &vmf->ptl);
4436 if (likely(vmf->pte &&
4437 pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
4438 ret = VM_FAULT_OOM;
4439 goto unlock;
4440 }
4441
4442 /* Had to read the page from swap area: Major fault */
4443 ret = VM_FAULT_MAJOR;
4444 count_vm_event(PGMAJFAULT);
4445 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
4446 page = folio_file_page(folio, swp_offset(entry));
4447 } else if (PageHWPoison(page)) {
4448 /*
4449 * hwpoisoned dirty swapcache pages are kept for killing
4450 * owner processes (which may be unknown at hwpoison time)
4451 */
4452 ret = VM_FAULT_HWPOISON;
4453 goto out_release;
4454 }
4455
4456 ret |= folio_lock_or_retry(folio, vmf);
4457 if (ret & VM_FAULT_RETRY)
4458 goto out_release;
4459
4460 if (swapcache) {
4461 /*
4462 * Make sure folio_free_swap() or swapoff did not release the
4463 * swapcache from under us. The page pin, and pte_same test
4464 * below, are not enough to exclude that. Even if it is still
4465 * swapcache, we need to check that the page's swap has not
4466 * changed.
4467 */
4468 if (unlikely(!folio_test_swapcache(folio) ||
4469 page_swap_entry(page).val != entry.val))
4470 goto out_page;
4471
4472 /*
4473 * KSM sometimes has to copy on read faults, for example, if
4474 * page->index of !PageKSM() pages would be nonlinear inside the
4475 * anon VMA -- PageKSM() is lost on actual swapout.
4476 */
4477 folio = ksm_might_need_to_copy(folio, vma, vmf->address);
4478 if (unlikely(!folio)) {
4479 ret = VM_FAULT_OOM;
4480 folio = swapcache;
4481 goto out_page;
4482 } else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
4483 ret = VM_FAULT_HWPOISON;
4484 folio = swapcache;
4485 goto out_page;
4486 }
4487 if (folio != swapcache)
4488 page = folio_page(folio, 0);
4489
4490 /*
4491 * If we want to map a page that's in the swapcache writable, we
4492 * have to detect via the refcount if we're really the exclusive
4493 * owner. Try removing the extra reference from the local LRU
4494 * caches if required.
4495 */
4496 if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache &&
4497 !folio_test_ksm(folio) && !folio_test_lru(folio))
4498 lru_add_drain();
4499 }
4500
4501 folio_throttle_swaprate(folio, GFP_KERNEL);
4502
4503 /*
4504 * Back out if somebody else already faulted in this pte.
4505 */
4506 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
4507 &vmf->ptl);
4508 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
4509 goto out_nomap;
4510
4511 if (unlikely(!folio_test_uptodate(folio))) {
4512 ret = VM_FAULT_SIGBUS;
4513 goto out_nomap;
4514 }
4515
4516 /* allocated large folios for SWP_SYNCHRONOUS_IO */
4517 if (folio_test_large(folio) && !folio_test_swapcache(folio)) {
4518 unsigned long nr = folio_nr_pages(folio);
4519 unsigned long folio_start = ALIGN_DOWN(vmf->address, nr * PAGE_SIZE);
4520 unsigned long idx = (vmf->address - folio_start) / PAGE_SIZE;
4521 pte_t *folio_ptep = vmf->pte - idx;
4522 pte_t folio_pte = ptep_get(folio_ptep);
4523
4524 if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
4525 swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
4526 goto out_nomap;
4527
4528 page_idx = idx;
4529 address = folio_start;
4530 ptep = folio_ptep;
4531 goto check_folio;
4532 }
4533
4534 nr_pages = 1;
4535 page_idx = 0;
4536 address = vmf->address;
4537 ptep = vmf->pte;
4538 if (folio_test_large(folio) && folio_test_swapcache(folio)) {
4539 int nr = folio_nr_pages(folio);
4540 unsigned long idx = folio_page_idx(folio, page);
4541 unsigned long folio_start = address - idx * PAGE_SIZE;
4542 unsigned long folio_end = folio_start + nr * PAGE_SIZE;
4543 pte_t *folio_ptep;
4544 pte_t folio_pte;
4545
4546 if (unlikely(folio_start < max(address & PMD_MASK, vma->vm_start)))
4547 goto check_folio;
4548 if (unlikely(folio_end > pmd_addr_end(address, vma->vm_end)))
4549 goto check_folio;
4550
4551 folio_ptep = vmf->pte - idx;
4552 folio_pte = ptep_get(folio_ptep);
4553 if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
4554 swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
4555 goto check_folio;
4556
4557 page_idx = idx;
4558 address = folio_start;
4559 ptep = folio_ptep;
4560 nr_pages = nr;
4561 entry = folio->swap;
4562 page = &folio->page;
4563 }
4564
4565 check_folio:
4566 /*
4567 * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte
4568 * must never point at an anonymous page in the swapcache that is
4569 * PG_anon_exclusive. Sanity check that this holds and especially, that
4570 * no filesystem set PG_mappedtodisk on a page in the swapcache. Sanity
4571 * check after taking the PT lock and making sure that nobody
4572 * concurrently faulted in this page and set PG_anon_exclusive.
4573 */
4574 BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio));
4575 BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page));
4576
4577 /*
4578 * Check under PT lock (to protect against concurrent fork() sharing
4579 * the swap entry concurrently) for certainly exclusive pages.
4580 */
4581 if (!folio_test_ksm(folio)) {
4582 exclusive = pte_swp_exclusive(vmf->orig_pte);
4583 if (folio != swapcache) {
4584 /*
4585 * We have a fresh page that is not exposed to the
4586 * swapcache -> certainly exclusive.
4587 */
4588 exclusive = true;
4589 } else if (exclusive && folio_test_writeback(folio) &&
4590 data_race(si->flags & SWP_STABLE_WRITES)) {
4591 /*
4592 * This is tricky: not all swap backends support
4593 * concurrent page modifications while under writeback.
4594 *
4595 * So if we stumble over such a page in the swapcache
4596 * we must not set the page exclusive, otherwise we can
4597 * map it writable without further checks and modify it
4598 * while still under writeback.
4599 *
4600 * For these problematic swap backends, simply drop the
4601 * exclusive marker: this is perfectly fine as we start
4602 * writeback only if we fully unmapped the page and
4603 * there are no unexpected references on the page after
4604 * unmapping succeeded. After fully unmapped, no
4605 * further GUP references (FOLL_GET and FOLL_PIN) can
4606 * appear, so dropping the exclusive marker and mapping
4607 * it only R/O is fine.
4608 */
4609 exclusive = false;
4610 }
4611 }
4612
4613 /*
4614 * Some architectures may have to restore extra metadata to the page
4615 * when reading from swap. This metadata may be indexed by swap entry
4616 * so this must be called before swap_free().
4617 */
4618 arch_swap_restore(folio_swap(entry, folio), folio);
4619
4620 /*
4621 * Remove the swap entry and conditionally try to free up the swapcache.
4622 * We're already holding a reference on the page but haven't mapped it
4623 * yet.
4624 */
4625 swap_free_nr(entry, nr_pages);
4626 if (should_try_to_free_swap(folio, vma, vmf->flags))
4627 folio_free_swap(folio);
4628
4629 add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
4630 add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
4631 pte = mk_pte(page, vma->vm_page_prot);
4632 if (pte_swp_soft_dirty(vmf->orig_pte))
4633 pte = pte_mksoft_dirty(pte);
4634 if (pte_swp_uffd_wp(vmf->orig_pte))
4635 pte = pte_mkuffd_wp(pte);
4636
4637 /*
4638 * Same logic as in do_wp_page(); however, optimize for pages that are
4639 * certainly not shared either because we just allocated them without
4640 * exposing them to the swapcache or because the swap entry indicates
4641 * exclusivity.
4642 */
4643 if (!folio_test_ksm(folio) &&
4644 (exclusive || folio_ref_count(folio) == 1)) {
4645 if ((vma->vm_flags & VM_WRITE) && !userfaultfd_pte_wp(vma, pte) &&
4646 !pte_needs_soft_dirty_wp(vma, pte)) {
4647 pte = pte_mkwrite(pte, vma);
4648 if (vmf->flags & FAULT_FLAG_WRITE) {
4649 pte = pte_mkdirty(pte);
4650 vmf->flags &= ~FAULT_FLAG_WRITE;
4651 }
4652 }
4653 rmap_flags |= RMAP_EXCLUSIVE;
4654 }
4655 folio_ref_add(folio, nr_pages - 1);
4656 flush_icache_pages(vma, page, nr_pages);
4657 vmf->orig_pte = pte_advance_pfn(pte, page_idx);
4658
4659 /* ksm created a completely new copy */
4660 if (unlikely(folio != swapcache && swapcache)) {
4661 folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE);
4662 folio_add_lru_vma(folio, vma);
4663 } else if (!folio_test_anon(folio)) {
4664 /*
4665 * We currently only expect small !anon folios which are either
4666 * fully exclusive or fully shared, or new allocated large
4667 * folios which are fully exclusive. If we ever get large
4668 * folios within swapcache here, we have to be careful.
4669 */
4670 VM_WARN_ON_ONCE(folio_test_large(folio) && folio_test_swapcache(folio));
4671 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
4672 folio_add_new_anon_rmap(folio, vma, address, rmap_flags);
4673 } else {
4674 folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address,
4675 rmap_flags);
4676 }
4677
4678 VM_BUG_ON(!folio_test_anon(folio) ||
4679 (pte_write(pte) && !PageAnonExclusive(page)));
4680 set_ptes(vma->vm_mm, address, ptep, pte, nr_pages);
4681 arch_do_swap_page_nr(vma->vm_mm, vma, address,
4682 pte, pte, nr_pages);
4683
4684 folio_unlock(folio);
4685 if (folio != swapcache && swapcache) {
4686 /*
4687 * Hold the lock to avoid the swap entry to be reused
4688 * until we take the PT lock for the pte_same() check
4689 * (to avoid false positives from pte_same). For
4690 * further safety release the lock after the swap_free
4691 * so that the swap count won't change under a
4692 * parallel locked swapcache.
4693 */
4694 folio_unlock(swapcache);
4695 folio_put(swapcache);
4696 }
4697
4698 if (vmf->flags & FAULT_FLAG_WRITE) {
4699 ret |= do_wp_page(vmf);
4700 if (ret & VM_FAULT_ERROR)
4701 ret &= VM_FAULT_ERROR;
4702 goto out;
4703 }
4704
4705 /* No need to invalidate - it was non-present before */
4706 update_mmu_cache_range(vmf, vma, address, ptep, nr_pages);
4707 unlock:
4708 if (vmf->pte)
4709 pte_unmap_unlock(vmf->pte, vmf->ptl);
4710 out:
4711 /* Clear the swap cache pin for direct swapin after PTL unlock */
4712 if (need_clear_cache) {
4713 swapcache_clear(si, entry, nr_pages);
4714 if (waitqueue_active(&swapcache_wq))
4715 wake_up(&swapcache_wq);
4716 }
4717 if (si)
4718 put_swap_device(si);
4719 return ret;
4720 out_nomap:
4721 if (vmf->pte)
4722 pte_unmap_unlock(vmf->pte, vmf->ptl);
4723 out_page:
4724 folio_unlock(folio);
4725 out_release:
4726 folio_put(folio);
4727 if (folio != swapcache && swapcache) {
4728 folio_unlock(swapcache);
4729 folio_put(swapcache);
4730 }
4731 if (need_clear_cache) {
4732 swapcache_clear(si, entry, nr_pages);
4733 if (waitqueue_active(&swapcache_wq))
4734 wake_up(&swapcache_wq);
4735 }
4736 if (si)
4737 put_swap_device(si);
4738 return ret;
4739 }
4740
pte_range_none(pte_t * pte,int nr_pages)4741 static bool pte_range_none(pte_t *pte, int nr_pages)
4742 {
4743 int i;
4744
4745 for (i = 0; i < nr_pages; i++) {
4746 if (!pte_none(ptep_get_lockless(pte + i)))
4747 return false;
4748 }
4749
4750 return true;
4751 }
4752
alloc_anon_folio(struct vm_fault * vmf)4753 static struct folio *alloc_anon_folio(struct vm_fault *vmf)
4754 {
4755 struct vm_area_struct *vma = vmf->vma;
4756 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4757 unsigned long orders;
4758 struct folio *folio;
4759 unsigned long addr;
4760 pte_t *pte;
4761 gfp_t gfp;
4762 int order;
4763
4764 /*
4765 * If uffd is active for the vma we need per-page fault fidelity to
4766 * maintain the uffd semantics.
4767 */
4768 if (unlikely(userfaultfd_armed(vma)))
4769 goto fallback;
4770
4771 /*
4772 * Get a list of all the (large) orders below PMD_ORDER that are enabled
4773 * for this vma. Then filter out the orders that can't be allocated over
4774 * the faulting address and still be fully contained in the vma.
4775 */
4776 orders = thp_vma_allowable_orders(vma, vma->vm_flags,
4777 TVA_IN_PF | TVA_ENFORCE_SYSFS, BIT(PMD_ORDER) - 1);
4778 orders = thp_vma_suitable_orders(vma, vmf->address, orders);
4779
4780 if (!orders)
4781 goto fallback;
4782
4783 pte = pte_offset_map(vmf->pmd, vmf->address & PMD_MASK);
4784 if (!pte)
4785 return ERR_PTR(-EAGAIN);
4786
4787 /*
4788 * Find the highest order where the aligned range is completely
4789 * pte_none(). Note that all remaining orders will be completely
4790 * pte_none().
4791 */
4792 order = highest_order(orders);
4793 while (orders) {
4794 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
4795 if (pte_range_none(pte + pte_index(addr), 1 << order))
4796 break;
4797 order = next_order(&orders, order);
4798 }
4799
4800 pte_unmap(pte);
4801
4802 if (!orders)
4803 goto fallback;
4804
4805 /* Try allocating the highest of the remaining orders. */
4806 gfp = vma_thp_gfp_mask(vma);
4807 while (orders) {
4808 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
4809 folio = vma_alloc_folio(gfp, order, vma, addr);
4810 if (folio) {
4811 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
4812 count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
4813 folio_put(folio);
4814 goto next;
4815 }
4816 folio_throttle_swaprate(folio, gfp);
4817 /*
4818 * When a folio is not zeroed during allocation
4819 * (__GFP_ZERO not used) or user folios require special
4820 * handling, folio_zero_user() is used to make sure
4821 * that the page corresponding to the faulting address
4822 * will be hot in the cache after zeroing.
4823 */
4824 if (user_alloc_needs_zeroing())
4825 folio_zero_user(folio, vmf->address);
4826 return folio;
4827 }
4828 next:
4829 count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK);
4830 order = next_order(&orders, order);
4831 }
4832
4833 fallback:
4834 #endif
4835 return folio_prealloc(vma->vm_mm, vma, vmf->address, true);
4836 }
4837
4838 /*
4839 * We enter with non-exclusive mmap_lock (to exclude vma changes,
4840 * but allow concurrent faults), and pte mapped but not yet locked.
4841 * We return with mmap_lock still held, but pte unmapped and unlocked.
4842 */
do_anonymous_page(struct vm_fault * vmf)4843 static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
4844 {
4845 struct vm_area_struct *vma = vmf->vma;
4846 unsigned long addr = vmf->address;
4847 struct folio *folio;
4848 vm_fault_t ret = 0;
4849 int nr_pages = 1;
4850 pte_t entry;
4851
4852 /* File mapping without ->vm_ops ? */
4853 if (vma->vm_flags & VM_SHARED)
4854 return VM_FAULT_SIGBUS;
4855
4856 /*
4857 * Use pte_alloc() instead of pte_alloc_map(), so that OOM can
4858 * be distinguished from a transient failure of pte_offset_map().
4859 */
4860 if (pte_alloc(vma->vm_mm, vmf->pmd))
4861 return VM_FAULT_OOM;
4862
4863 /* Use the zero-page for reads */
4864 if (!(vmf->flags & FAULT_FLAG_WRITE) &&
4865 !mm_forbids_zeropage(vma->vm_mm)) {
4866 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
4867 vma->vm_page_prot));
4868 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4869 vmf->address, &vmf->ptl);
4870 if (!vmf->pte)
4871 goto unlock;
4872 if (vmf_pte_changed(vmf)) {
4873 update_mmu_tlb(vma, vmf->address, vmf->pte);
4874 goto unlock;
4875 }
4876 ret = check_stable_address_space(vma->vm_mm);
4877 if (ret)
4878 goto unlock;
4879 /* Deliver the page fault to userland, check inside PT lock */
4880 if (userfaultfd_missing(vma)) {
4881 pte_unmap_unlock(vmf->pte, vmf->ptl);
4882 return handle_userfault(vmf, VM_UFFD_MISSING);
4883 }
4884 goto setpte;
4885 }
4886
4887 /* Allocate our own private page. */
4888 ret = vmf_anon_prepare(vmf);
4889 if (ret)
4890 return ret;
4891 /* Returns NULL on OOM or ERR_PTR(-EAGAIN) if we must retry the fault */
4892 folio = alloc_anon_folio(vmf);
4893 if (IS_ERR(folio))
4894 return 0;
4895 if (!folio)
4896 goto oom;
4897
4898 nr_pages = folio_nr_pages(folio);
4899 addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE);
4900
4901 /*
4902 * The memory barrier inside __folio_mark_uptodate makes sure that
4903 * preceding stores to the page contents become visible before
4904 * the set_pte_at() write.
4905 */
4906 __folio_mark_uptodate(folio);
4907
4908 entry = mk_pte(&folio->page, vma->vm_page_prot);
4909 entry = pte_sw_mkyoung(entry);
4910 if (vma->vm_flags & VM_WRITE)
4911 entry = pte_mkwrite(pte_mkdirty(entry), vma);
4912
4913 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
4914 if (!vmf->pte)
4915 goto release;
4916 if (nr_pages == 1 && vmf_pte_changed(vmf)) {
4917 update_mmu_tlb(vma, addr, vmf->pte);
4918 goto release;
4919 } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) {
4920 update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages);
4921 goto release;
4922 }
4923
4924 ret = check_stable_address_space(vma->vm_mm);
4925 if (ret)
4926 goto release;
4927
4928 /* Deliver the page fault to userland, check inside PT lock */
4929 if (userfaultfd_missing(vma)) {
4930 pte_unmap_unlock(vmf->pte, vmf->ptl);
4931 folio_put(folio);
4932 return handle_userfault(vmf, VM_UFFD_MISSING);
4933 }
4934
4935 folio_ref_add(folio, nr_pages - 1);
4936 add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
4937 count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_FAULT_ALLOC);
4938 folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
4939 folio_add_lru_vma(folio, vma);
4940 setpte:
4941 if (vmf_orig_pte_uffd_wp(vmf))
4942 entry = pte_mkuffd_wp(entry);
4943 set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr_pages);
4944
4945 /* No need to invalidate - it was non-present before */
4946 update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr_pages);
4947 unlock:
4948 if (vmf->pte)
4949 pte_unmap_unlock(vmf->pte, vmf->ptl);
4950 return ret;
4951 release:
4952 folio_put(folio);
4953 goto unlock;
4954 oom:
4955 return VM_FAULT_OOM;
4956 }
4957
4958 /*
4959 * The mmap_lock must have been held on entry, and may have been
4960 * released depending on flags and vma->vm_ops->fault() return value.
4961 * See filemap_fault() and __lock_page_retry().
4962 */
__do_fault(struct vm_fault * vmf)4963 static vm_fault_t __do_fault(struct vm_fault *vmf)
4964 {
4965 struct vm_area_struct *vma = vmf->vma;
4966 struct folio *folio;
4967 vm_fault_t ret;
4968
4969 /*
4970 * Preallocate pte before we take page_lock because this might lead to
4971 * deadlocks for memcg reclaim which waits for pages under writeback:
4972 * lock_page(A)
4973 * SetPageWriteback(A)
4974 * unlock_page(A)
4975 * lock_page(B)
4976 * lock_page(B)
4977 * pte_alloc_one
4978 * shrink_folio_list
4979 * wait_on_page_writeback(A)
4980 * SetPageWriteback(B)
4981 * unlock_page(B)
4982 * # flush A, B to clear the writeback
4983 */
4984 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
4985 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
4986 if (!vmf->prealloc_pte)
4987 return VM_FAULT_OOM;
4988 }
4989
4990 ret = vma->vm_ops->fault(vmf);
4991 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
4992 VM_FAULT_DONE_COW)))
4993 return ret;
4994
4995 folio = page_folio(vmf->page);
4996 if (unlikely(PageHWPoison(vmf->page))) {
4997 vm_fault_t poisonret = VM_FAULT_HWPOISON;
4998 if (ret & VM_FAULT_LOCKED) {
4999 if (page_mapped(vmf->page))
5000 unmap_mapping_folio(folio);
5001 /* Retry if a clean folio was removed from the cache. */
5002 if (mapping_evict_folio(folio->mapping, folio))
5003 poisonret = VM_FAULT_NOPAGE;
5004 folio_unlock(folio);
5005 }
5006 folio_put(folio);
5007 vmf->page = NULL;
5008 return poisonret;
5009 }
5010
5011 if (unlikely(!(ret & VM_FAULT_LOCKED)))
5012 folio_lock(folio);
5013 else
5014 VM_BUG_ON_PAGE(!folio_test_locked(folio), vmf->page);
5015
5016 return ret;
5017 }
5018
5019 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
deposit_prealloc_pte(struct vm_fault * vmf)5020 static void deposit_prealloc_pte(struct vm_fault *vmf)
5021 {
5022 struct vm_area_struct *vma = vmf->vma;
5023
5024 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
5025 /*
5026 * We are going to consume the prealloc table,
5027 * count that as nr_ptes.
5028 */
5029 mm_inc_nr_ptes(vma->vm_mm);
5030 vmf->prealloc_pte = NULL;
5031 }
5032
do_set_pmd(struct vm_fault * vmf,struct page * page)5033 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
5034 {
5035 struct folio *folio = page_folio(page);
5036 struct vm_area_struct *vma = vmf->vma;
5037 bool write = vmf->flags & FAULT_FLAG_WRITE;
5038 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
5039 pmd_t entry;
5040 vm_fault_t ret = VM_FAULT_FALLBACK;
5041
5042 /*
5043 * It is too late to allocate a small folio, we already have a large
5044 * folio in the pagecache: especially s390 KVM cannot tolerate any
5045 * PMD mappings, but PTE-mapped THP are fine. So let's simply refuse any
5046 * PMD mappings if THPs are disabled.
5047 */
5048 if (thp_disabled_by_hw() || vma_thp_disabled(vma, vma->vm_flags))
5049 return ret;
5050
5051 if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
5052 return ret;
5053
5054 if (folio_order(folio) != HPAGE_PMD_ORDER)
5055 return ret;
5056 page = &folio->page;
5057
5058 /*
5059 * Just backoff if any subpage of a THP is corrupted otherwise
5060 * the corrupted page may mapped by PMD silently to escape the
5061 * check. This kind of THP just can be PTE mapped. Access to
5062 * the corrupted subpage should trigger SIGBUS as expected.
5063 */
5064 if (unlikely(folio_test_has_hwpoisoned(folio)))
5065 return ret;
5066
5067 /*
5068 * Archs like ppc64 need additional space to store information
5069 * related to pte entry. Use the preallocated table for that.
5070 */
5071 if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
5072 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
5073 if (!vmf->prealloc_pte)
5074 return VM_FAULT_OOM;
5075 }
5076
5077 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
5078 if (unlikely(!pmd_none(*vmf->pmd)))
5079 goto out;
5080
5081 flush_icache_pages(vma, page, HPAGE_PMD_NR);
5082
5083 entry = mk_huge_pmd(page, vma->vm_page_prot);
5084 if (write)
5085 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
5086
5087 add_mm_counter(vma->vm_mm, mm_counter_file(folio), HPAGE_PMD_NR);
5088 folio_add_file_rmap_pmd(folio, page, vma);
5089
5090 /*
5091 * deposit and withdraw with pmd lock held
5092 */
5093 if (arch_needs_pgtable_deposit())
5094 deposit_prealloc_pte(vmf);
5095
5096 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
5097
5098 update_mmu_cache_pmd(vma, haddr, vmf->pmd);
5099
5100 /* fault is handled */
5101 ret = 0;
5102 count_vm_event(THP_FILE_MAPPED);
5103 out:
5104 spin_unlock(vmf->ptl);
5105 return ret;
5106 }
5107 #else
do_set_pmd(struct vm_fault * vmf,struct page * page)5108 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
5109 {
5110 return VM_FAULT_FALLBACK;
5111 }
5112 #endif
5113
5114 /**
5115 * set_pte_range - Set a range of PTEs to point to pages in a folio.
5116 * @vmf: Fault decription.
5117 * @folio: The folio that contains @page.
5118 * @page: The first page to create a PTE for.
5119 * @nr: The number of PTEs to create.
5120 * @addr: The first address to create a PTE for.
5121 */
set_pte_range(struct vm_fault * vmf,struct folio * folio,struct page * page,unsigned int nr,unsigned long addr)5122 void set_pte_range(struct vm_fault *vmf, struct folio *folio,
5123 struct page *page, unsigned int nr, unsigned long addr)
5124 {
5125 struct vm_area_struct *vma = vmf->vma;
5126 bool write = vmf->flags & FAULT_FLAG_WRITE;
5127 bool prefault = !in_range(vmf->address, addr, nr * PAGE_SIZE);
5128 pte_t entry;
5129
5130 flush_icache_pages(vma, page, nr);
5131 entry = mk_pte(page, vma->vm_page_prot);
5132
5133 if (prefault && arch_wants_old_prefaulted_pte())
5134 entry = pte_mkold(entry);
5135 else
5136 entry = pte_sw_mkyoung(entry);
5137
5138 if (write)
5139 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
5140 if (unlikely(vmf_orig_pte_uffd_wp(vmf)))
5141 entry = pte_mkuffd_wp(entry);
5142 /* copy-on-write page */
5143 if (write && !(vma->vm_flags & VM_SHARED)) {
5144 VM_BUG_ON_FOLIO(nr != 1, folio);
5145 folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
5146 folio_add_lru_vma(folio, vma);
5147 } else {
5148 folio_add_file_rmap_ptes(folio, page, nr, vma);
5149 }
5150 set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr);
5151
5152 /* no need to invalidate: a not-present page won't be cached */
5153 update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr);
5154 }
5155
vmf_pte_changed(struct vm_fault * vmf)5156 static bool vmf_pte_changed(struct vm_fault *vmf)
5157 {
5158 if (vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)
5159 return !pte_same(ptep_get(vmf->pte), vmf->orig_pte);
5160
5161 return !pte_none(ptep_get(vmf->pte));
5162 }
5163
5164 /**
5165 * finish_fault - finish page fault once we have prepared the page to fault
5166 *
5167 * @vmf: structure describing the fault
5168 *
5169 * This function handles all that is needed to finish a page fault once the
5170 * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
5171 * given page, adds reverse page mapping, handles memcg charges and LRU
5172 * addition.
5173 *
5174 * The function expects the page to be locked and on success it consumes a
5175 * reference of a page being mapped (for the PTE which maps it).
5176 *
5177 * Return: %0 on success, %VM_FAULT_ code in case of error.
5178 */
finish_fault(struct vm_fault * vmf)5179 vm_fault_t finish_fault(struct vm_fault *vmf)
5180 {
5181 struct vm_area_struct *vma = vmf->vma;
5182 struct page *page;
5183 struct folio *folio;
5184 vm_fault_t ret;
5185 bool is_cow = (vmf->flags & FAULT_FLAG_WRITE) &&
5186 !(vma->vm_flags & VM_SHARED);
5187 int type, nr_pages;
5188 unsigned long addr;
5189 bool needs_fallback = false;
5190
5191 fallback:
5192 addr = vmf->address;
5193
5194 /* Did we COW the page? */
5195 if (is_cow)
5196 page = vmf->cow_page;
5197 else
5198 page = vmf->page;
5199
5200 /*
5201 * check even for read faults because we might have lost our CoWed
5202 * page
5203 */
5204 if (!(vma->vm_flags & VM_SHARED)) {
5205 ret = check_stable_address_space(vma->vm_mm);
5206 if (ret)
5207 return ret;
5208 }
5209
5210 if (pmd_none(*vmf->pmd)) {
5211 if (PageTransCompound(page)) {
5212 ret = do_set_pmd(vmf, page);
5213 if (ret != VM_FAULT_FALLBACK)
5214 return ret;
5215 }
5216
5217 if (vmf->prealloc_pte)
5218 pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte);
5219 else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd)))
5220 return VM_FAULT_OOM;
5221 }
5222
5223 folio = page_folio(page);
5224 nr_pages = folio_nr_pages(folio);
5225
5226 /*
5227 * Using per-page fault to maintain the uffd semantics, and same
5228 * approach also applies to non-anonymous-shmem faults to avoid
5229 * inflating the RSS of the process.
5230 */
5231 if (!vma_is_anon_shmem(vma) || unlikely(userfaultfd_armed(vma)) ||
5232 unlikely(needs_fallback)) {
5233 nr_pages = 1;
5234 } else if (nr_pages > 1) {
5235 pgoff_t idx = folio_page_idx(folio, page);
5236 /* The page offset of vmf->address within the VMA. */
5237 pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff;
5238 /* The index of the entry in the pagetable for fault page. */
5239 pgoff_t pte_off = pte_index(vmf->address);
5240
5241 /*
5242 * Fallback to per-page fault in case the folio size in page
5243 * cache beyond the VMA limits and PMD pagetable limits.
5244 */
5245 if (unlikely(vma_off < idx ||
5246 vma_off + (nr_pages - idx) > vma_pages(vma) ||
5247 pte_off < idx ||
5248 pte_off + (nr_pages - idx) > PTRS_PER_PTE)) {
5249 nr_pages = 1;
5250 } else {
5251 /* Now we can set mappings for the whole large folio. */
5252 addr = vmf->address - idx * PAGE_SIZE;
5253 page = &folio->page;
5254 }
5255 }
5256
5257 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
5258 addr, &vmf->ptl);
5259 if (!vmf->pte)
5260 return VM_FAULT_NOPAGE;
5261
5262 /* Re-check under ptl */
5263 if (nr_pages == 1 && unlikely(vmf_pte_changed(vmf))) {
5264 update_mmu_tlb(vma, addr, vmf->pte);
5265 ret = VM_FAULT_NOPAGE;
5266 goto unlock;
5267 } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) {
5268 needs_fallback = true;
5269 pte_unmap_unlock(vmf->pte, vmf->ptl);
5270 goto fallback;
5271 }
5272
5273 folio_ref_add(folio, nr_pages - 1);
5274 set_pte_range(vmf, folio, page, nr_pages, addr);
5275 type = is_cow ? MM_ANONPAGES : mm_counter_file(folio);
5276 add_mm_counter(vma->vm_mm, type, nr_pages);
5277 ret = 0;
5278
5279 unlock:
5280 pte_unmap_unlock(vmf->pte, vmf->ptl);
5281 return ret;
5282 }
5283
5284 static unsigned long fault_around_pages __read_mostly =
5285 65536 >> PAGE_SHIFT;
5286
5287 #ifdef CONFIG_DEBUG_FS
fault_around_bytes_get(void * data,u64 * val)5288 static int fault_around_bytes_get(void *data, u64 *val)
5289 {
5290 *val = fault_around_pages << PAGE_SHIFT;
5291 return 0;
5292 }
5293
5294 /*
5295 * fault_around_bytes must be rounded down to the nearest page order as it's
5296 * what do_fault_around() expects to see.
5297 */
fault_around_bytes_set(void * data,u64 val)5298 static int fault_around_bytes_set(void *data, u64 val)
5299 {
5300 if (val / PAGE_SIZE > PTRS_PER_PTE)
5301 return -EINVAL;
5302
5303 /*
5304 * The minimum value is 1 page, however this results in no fault-around
5305 * at all. See should_fault_around().
5306 */
5307 val = max(val, PAGE_SIZE);
5308 fault_around_pages = rounddown_pow_of_two(val) >> PAGE_SHIFT;
5309
5310 return 0;
5311 }
5312 DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
5313 fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
5314
fault_around_debugfs(void)5315 static int __init fault_around_debugfs(void)
5316 {
5317 debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
5318 &fault_around_bytes_fops);
5319 return 0;
5320 }
5321 late_initcall(fault_around_debugfs);
5322 #endif
5323
5324 /*
5325 * do_fault_around() tries to map few pages around the fault address. The hope
5326 * is that the pages will be needed soon and this will lower the number of
5327 * faults to handle.
5328 *
5329 * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
5330 * not ready to be mapped: not up-to-date, locked, etc.
5331 *
5332 * This function doesn't cross VMA or page table boundaries, in order to call
5333 * map_pages() and acquire a PTE lock only once.
5334 *
5335 * fault_around_pages defines how many pages we'll try to map.
5336 * do_fault_around() expects it to be set to a power of two less than or equal
5337 * to PTRS_PER_PTE.
5338 *
5339 * The virtual address of the area that we map is naturally aligned to
5340 * fault_around_pages * PAGE_SIZE rounded down to the machine page size
5341 * (and therefore to page order). This way it's easier to guarantee
5342 * that we don't cross page table boundaries.
5343 */
do_fault_around(struct vm_fault * vmf)5344 static vm_fault_t do_fault_around(struct vm_fault *vmf)
5345 {
5346 pgoff_t nr_pages = READ_ONCE(fault_around_pages);
5347 pgoff_t pte_off = pte_index(vmf->address);
5348 /* The page offset of vmf->address within the VMA. */
5349 pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff;
5350 pgoff_t from_pte, to_pte;
5351 vm_fault_t ret;
5352
5353 /* The PTE offset of the start address, clamped to the VMA. */
5354 from_pte = max(ALIGN_DOWN(pte_off, nr_pages),
5355 pte_off - min(pte_off, vma_off));
5356
5357 /* The PTE offset of the end address, clamped to the VMA and PTE. */
5358 to_pte = min3(from_pte + nr_pages, (pgoff_t)PTRS_PER_PTE,
5359 pte_off + vma_pages(vmf->vma) - vma_off) - 1;
5360
5361 if (pmd_none(*vmf->pmd)) {
5362 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
5363 if (!vmf->prealloc_pte)
5364 return VM_FAULT_OOM;
5365 }
5366
5367 rcu_read_lock();
5368 ret = vmf->vma->vm_ops->map_pages(vmf,
5369 vmf->pgoff + from_pte - pte_off,
5370 vmf->pgoff + to_pte - pte_off);
5371 rcu_read_unlock();
5372
5373 return ret;
5374 }
5375
5376 /* Return true if we should do read fault-around, false otherwise */
should_fault_around(struct vm_fault * vmf)5377 static inline bool should_fault_around(struct vm_fault *vmf)
5378 {
5379 /* No ->map_pages? No way to fault around... */
5380 if (!vmf->vma->vm_ops->map_pages)
5381 return false;
5382
5383 if (uffd_disable_fault_around(vmf->vma))
5384 return false;
5385
5386 /* A single page implies no faulting 'around' at all. */
5387 return fault_around_pages > 1;
5388 }
5389
do_read_fault(struct vm_fault * vmf)5390 static vm_fault_t do_read_fault(struct vm_fault *vmf)
5391 {
5392 vm_fault_t ret = 0;
5393 struct folio *folio;
5394
5395 /*
5396 * Let's call ->map_pages() first and use ->fault() as fallback
5397 * if page by the offset is not ready to be mapped (cold cache or
5398 * something).
5399 */
5400 if (should_fault_around(vmf)) {
5401 ret = do_fault_around(vmf);
5402 if (ret)
5403 return ret;
5404 }
5405
5406 ret = vmf_can_call_fault(vmf);
5407 if (ret)
5408 return ret;
5409
5410 ret = __do_fault(vmf);
5411 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5412 return ret;
5413
5414 ret |= finish_fault(vmf);
5415 folio = page_folio(vmf->page);
5416 folio_unlock(folio);
5417 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5418 folio_put(folio);
5419 return ret;
5420 }
5421
do_cow_fault(struct vm_fault * vmf)5422 static vm_fault_t do_cow_fault(struct vm_fault *vmf)
5423 {
5424 struct vm_area_struct *vma = vmf->vma;
5425 struct folio *folio;
5426 vm_fault_t ret;
5427
5428 ret = vmf_can_call_fault(vmf);
5429 if (!ret)
5430 ret = vmf_anon_prepare(vmf);
5431 if (ret)
5432 return ret;
5433
5434 folio = folio_prealloc(vma->vm_mm, vma, vmf->address, false);
5435 if (!folio)
5436 return VM_FAULT_OOM;
5437
5438 vmf->cow_page = &folio->page;
5439
5440 ret = __do_fault(vmf);
5441 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5442 goto uncharge_out;
5443 if (ret & VM_FAULT_DONE_COW)
5444 return ret;
5445
5446 if (copy_mc_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma)) {
5447 ret = VM_FAULT_HWPOISON;
5448 goto unlock;
5449 }
5450 __folio_mark_uptodate(folio);
5451
5452 ret |= finish_fault(vmf);
5453 unlock:
5454 unlock_page(vmf->page);
5455 put_page(vmf->page);
5456 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5457 goto uncharge_out;
5458 return ret;
5459 uncharge_out:
5460 folio_put(folio);
5461 return ret;
5462 }
5463
do_shared_fault(struct vm_fault * vmf)5464 static vm_fault_t do_shared_fault(struct vm_fault *vmf)
5465 {
5466 struct vm_area_struct *vma = vmf->vma;
5467 vm_fault_t ret, tmp;
5468 struct folio *folio;
5469
5470 ret = vmf_can_call_fault(vmf);
5471 if (ret)
5472 return ret;
5473
5474 ret = __do_fault(vmf);
5475 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5476 return ret;
5477
5478 folio = page_folio(vmf->page);
5479
5480 /*
5481 * Check if the backing address space wants to know that the page is
5482 * about to become writable
5483 */
5484 if (vma->vm_ops->page_mkwrite) {
5485 folio_unlock(folio);
5486 tmp = do_page_mkwrite(vmf, folio);
5487 if (unlikely(!tmp ||
5488 (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
5489 folio_put(folio);
5490 return tmp;
5491 }
5492 }
5493
5494 ret |= finish_fault(vmf);
5495 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
5496 VM_FAULT_RETRY))) {
5497 folio_unlock(folio);
5498 folio_put(folio);
5499 return ret;
5500 }
5501
5502 ret |= fault_dirty_shared_page(vmf);
5503 return ret;
5504 }
5505
5506 /*
5507 * We enter with non-exclusive mmap_lock (to exclude vma changes,
5508 * but allow concurrent faults).
5509 * The mmap_lock may have been released depending on flags and our
5510 * return value. See filemap_fault() and __folio_lock_or_retry().
5511 * If mmap_lock is released, vma may become invalid (for example
5512 * by other thread calling munmap()).
5513 */
do_fault(struct vm_fault * vmf)5514 static vm_fault_t do_fault(struct vm_fault *vmf)
5515 {
5516 struct vm_area_struct *vma = vmf->vma;
5517 struct mm_struct *vm_mm = vma->vm_mm;
5518 vm_fault_t ret;
5519
5520 /*
5521 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
5522 */
5523 if (!vma->vm_ops->fault) {
5524 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
5525 vmf->address, &vmf->ptl);
5526 if (unlikely(!vmf->pte))
5527 ret = VM_FAULT_SIGBUS;
5528 else {
5529 /*
5530 * Make sure this is not a temporary clearing of pte
5531 * by holding ptl and checking again. A R/M/W update
5532 * of pte involves: take ptl, clearing the pte so that
5533 * we don't have concurrent modification by hardware
5534 * followed by an update.
5535 */
5536 if (unlikely(pte_none(ptep_get(vmf->pte))))
5537 ret = VM_FAULT_SIGBUS;
5538 else
5539 ret = VM_FAULT_NOPAGE;
5540
5541 pte_unmap_unlock(vmf->pte, vmf->ptl);
5542 }
5543 } else if (!(vmf->flags & FAULT_FLAG_WRITE))
5544 ret = do_read_fault(vmf);
5545 else if (!(vma->vm_flags & VM_SHARED))
5546 ret = do_cow_fault(vmf);
5547 else
5548 ret = do_shared_fault(vmf);
5549
5550 /* preallocated pagetable is unused: free it */
5551 if (vmf->prealloc_pte) {
5552 pte_free(vm_mm, vmf->prealloc_pte);
5553 vmf->prealloc_pte = NULL;
5554 }
5555 return ret;
5556 }
5557
numa_migrate_check(struct folio * folio,struct vm_fault * vmf,unsigned long addr,int * flags,bool writable,int * last_cpupid)5558 int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
5559 unsigned long addr, int *flags,
5560 bool writable, int *last_cpupid)
5561 {
5562 struct vm_area_struct *vma = vmf->vma;
5563
5564 /*
5565 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
5566 * much anyway since they can be in shared cache state. This misses
5567 * the case where a mapping is writable but the process never writes
5568 * to it but pte_write gets cleared during protection updates and
5569 * pte_dirty has unpredictable behaviour between PTE scan updates,
5570 * background writeback, dirty balancing and application behaviour.
5571 */
5572 if (!writable)
5573 *flags |= TNF_NO_GROUP;
5574
5575 /*
5576 * Flag if the folio is shared between multiple address spaces. This
5577 * is later used when determining whether to group tasks together
5578 */
5579 if (folio_likely_mapped_shared(folio) && (vma->vm_flags & VM_SHARED))
5580 *flags |= TNF_SHARED;
5581 /*
5582 * For memory tiering mode, cpupid of slow memory page is used
5583 * to record page access time. So use default value.
5584 */
5585 if (folio_use_access_time(folio))
5586 *last_cpupid = (-1 & LAST_CPUPID_MASK);
5587 else
5588 *last_cpupid = folio_last_cpupid(folio);
5589
5590 /* Record the current PID acceesing VMA */
5591 vma_set_access_pid_bit(vma);
5592
5593 count_vm_numa_event(NUMA_HINT_FAULTS);
5594 #ifdef CONFIG_NUMA_BALANCING
5595 count_memcg_folio_events(folio, NUMA_HINT_FAULTS, 1);
5596 #endif
5597 if (folio_nid(folio) == numa_node_id()) {
5598 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
5599 *flags |= TNF_FAULT_LOCAL;
5600 }
5601
5602 return mpol_misplaced(folio, vmf, addr);
5603 }
5604
numa_rebuild_single_mapping(struct vm_fault * vmf,struct vm_area_struct * vma,unsigned long fault_addr,pte_t * fault_pte,bool writable)5605 static void numa_rebuild_single_mapping(struct vm_fault *vmf, struct vm_area_struct *vma,
5606 unsigned long fault_addr, pte_t *fault_pte,
5607 bool writable)
5608 {
5609 pte_t pte, old_pte;
5610
5611 old_pte = ptep_modify_prot_start(vma, fault_addr, fault_pte);
5612 pte = pte_modify(old_pte, vma->vm_page_prot);
5613 pte = pte_mkyoung(pte);
5614 if (writable)
5615 pte = pte_mkwrite(pte, vma);
5616 ptep_modify_prot_commit(vma, fault_addr, fault_pte, old_pte, pte);
5617 update_mmu_cache_range(vmf, vma, fault_addr, fault_pte, 1);
5618 }
5619
numa_rebuild_large_mapping(struct vm_fault * vmf,struct vm_area_struct * vma,struct folio * folio,pte_t fault_pte,bool ignore_writable,bool pte_write_upgrade)5620 static void numa_rebuild_large_mapping(struct vm_fault *vmf, struct vm_area_struct *vma,
5621 struct folio *folio, pte_t fault_pte,
5622 bool ignore_writable, bool pte_write_upgrade)
5623 {
5624 int nr = pte_pfn(fault_pte) - folio_pfn(folio);
5625 unsigned long start, end, addr = vmf->address;
5626 unsigned long addr_start = addr - (nr << PAGE_SHIFT);
5627 unsigned long pt_start = ALIGN_DOWN(addr, PMD_SIZE);
5628 pte_t *start_ptep;
5629
5630 /* Stay within the VMA and within the page table. */
5631 start = max3(addr_start, pt_start, vma->vm_start);
5632 end = min3(addr_start + folio_size(folio), pt_start + PMD_SIZE,
5633 vma->vm_end);
5634 start_ptep = vmf->pte - ((addr - start) >> PAGE_SHIFT);
5635
5636 /* Restore all PTEs' mapping of the large folio */
5637 for (addr = start; addr != end; start_ptep++, addr += PAGE_SIZE) {
5638 pte_t ptent = ptep_get(start_ptep);
5639 bool writable = false;
5640
5641 if (!pte_present(ptent) || !pte_protnone(ptent))
5642 continue;
5643
5644 if (pfn_folio(pte_pfn(ptent)) != folio)
5645 continue;
5646
5647 if (!ignore_writable) {
5648 ptent = pte_modify(ptent, vma->vm_page_prot);
5649 writable = pte_write(ptent);
5650 if (!writable && pte_write_upgrade &&
5651 can_change_pte_writable(vma, addr, ptent))
5652 writable = true;
5653 }
5654
5655 numa_rebuild_single_mapping(vmf, vma, addr, start_ptep, writable);
5656 }
5657 }
5658
do_numa_page(struct vm_fault * vmf)5659 static vm_fault_t do_numa_page(struct vm_fault *vmf)
5660 {
5661 struct vm_area_struct *vma = vmf->vma;
5662 struct folio *folio = NULL;
5663 int nid = NUMA_NO_NODE;
5664 bool writable = false, ignore_writable = false;
5665 bool pte_write_upgrade = vma_wants_manual_pte_write_upgrade(vma);
5666 int last_cpupid;
5667 int target_nid;
5668 pte_t pte, old_pte;
5669 int flags = 0, nr_pages;
5670
5671 /*
5672 * The pte cannot be used safely until we verify, while holding the page
5673 * table lock, that its contents have not changed during fault handling.
5674 */
5675 spin_lock(vmf->ptl);
5676 /* Read the live PTE from the page tables: */
5677 old_pte = ptep_get(vmf->pte);
5678
5679 if (unlikely(!pte_same(old_pte, vmf->orig_pte))) {
5680 pte_unmap_unlock(vmf->pte, vmf->ptl);
5681 return 0;
5682 }
5683
5684 pte = pte_modify(old_pte, vma->vm_page_prot);
5685
5686 /*
5687 * Detect now whether the PTE could be writable; this information
5688 * is only valid while holding the PT lock.
5689 */
5690 writable = pte_write(pte);
5691 if (!writable && pte_write_upgrade &&
5692 can_change_pte_writable(vma, vmf->address, pte))
5693 writable = true;
5694
5695 folio = vm_normal_folio(vma, vmf->address, pte);
5696 if (!folio || folio_is_zone_device(folio))
5697 goto out_map;
5698
5699 nid = folio_nid(folio);
5700 nr_pages = folio_nr_pages(folio);
5701
5702 target_nid = numa_migrate_check(folio, vmf, vmf->address, &flags,
5703 writable, &last_cpupid);
5704 if (target_nid == NUMA_NO_NODE)
5705 goto out_map;
5706 if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
5707 flags |= TNF_MIGRATE_FAIL;
5708 goto out_map;
5709 }
5710 /* The folio is isolated and isolation code holds a folio reference. */
5711 pte_unmap_unlock(vmf->pte, vmf->ptl);
5712 writable = false;
5713 ignore_writable = true;
5714
5715 /* Migrate to the requested node */
5716 if (!migrate_misplaced_folio(folio, target_nid)) {
5717 nid = target_nid;
5718 flags |= TNF_MIGRATED;
5719 task_numa_fault(last_cpupid, nid, nr_pages, flags);
5720 return 0;
5721 }
5722
5723 flags |= TNF_MIGRATE_FAIL;
5724 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
5725 vmf->address, &vmf->ptl);
5726 if (unlikely(!vmf->pte))
5727 return 0;
5728 if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
5729 pte_unmap_unlock(vmf->pte, vmf->ptl);
5730 return 0;
5731 }
5732 out_map:
5733 /*
5734 * Make it present again, depending on how arch implements
5735 * non-accessible ptes, some can allow access by kernel mode.
5736 */
5737 if (folio && folio_test_large(folio))
5738 numa_rebuild_large_mapping(vmf, vma, folio, pte, ignore_writable,
5739 pte_write_upgrade);
5740 else
5741 numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte,
5742 writable);
5743 pte_unmap_unlock(vmf->pte, vmf->ptl);
5744
5745 if (nid != NUMA_NO_NODE)
5746 task_numa_fault(last_cpupid, nid, nr_pages, flags);
5747 return 0;
5748 }
5749
create_huge_pmd(struct vm_fault * vmf)5750 static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
5751 {
5752 struct vm_area_struct *vma = vmf->vma;
5753
5754 if (vma_is_anonymous(vma))
5755 return do_huge_pmd_anonymous_page(vmf);
5756 /*
5757 * Currently we just emit PAGE_SIZE for our fault events, so don't allow
5758 * a huge fault if we have a pre content watch on this file. This would
5759 * be trivial to support, but there would need to be tests to ensure
5760 * this works properly and those don't exist currently.
5761 */
5762 if (unlikely(FMODE_FSNOTIFY_HSM(vma->vm_file->f_mode)))
5763 return VM_FAULT_FALLBACK;
5764 if (vma->vm_ops->huge_fault)
5765 return vma->vm_ops->huge_fault(vmf, PMD_ORDER);
5766 return VM_FAULT_FALLBACK;
5767 }
5768
5769 /* `inline' is required to avoid gcc 4.1.2 build error */
wp_huge_pmd(struct vm_fault * vmf)5770 static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf)
5771 {
5772 struct vm_area_struct *vma = vmf->vma;
5773 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
5774 vm_fault_t ret;
5775
5776 if (vma_is_anonymous(vma)) {
5777 if (likely(!unshare) &&
5778 userfaultfd_huge_pmd_wp(vma, vmf->orig_pmd)) {
5779 if (userfaultfd_wp_async(vmf->vma))
5780 goto split;
5781 return handle_userfault(vmf, VM_UFFD_WP);
5782 }
5783 return do_huge_pmd_wp_page(vmf);
5784 }
5785
5786 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
5787 /* See comment in create_huge_pmd. */
5788 if (unlikely(FMODE_FSNOTIFY_HSM(vma->vm_file->f_mode)))
5789 goto split;
5790 if (vma->vm_ops->huge_fault) {
5791 ret = vma->vm_ops->huge_fault(vmf, PMD_ORDER);
5792 if (!(ret & VM_FAULT_FALLBACK))
5793 return ret;
5794 }
5795 }
5796
5797 split:
5798 /* COW or write-notify handled on pte level: split pmd. */
5799 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
5800
5801 return VM_FAULT_FALLBACK;
5802 }
5803
create_huge_pud(struct vm_fault * vmf)5804 static vm_fault_t create_huge_pud(struct vm_fault *vmf)
5805 {
5806 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
5807 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
5808 struct vm_area_struct *vma = vmf->vma;
5809 /* No support for anonymous transparent PUD pages yet */
5810 if (vma_is_anonymous(vma))
5811 return VM_FAULT_FALLBACK;
5812 /* See comment in create_huge_pmd. */
5813 if (unlikely(FMODE_FSNOTIFY_HSM(vma->vm_file->f_mode)))
5814 return VM_FAULT_FALLBACK;
5815 if (vma->vm_ops->huge_fault)
5816 return vma->vm_ops->huge_fault(vmf, PUD_ORDER);
5817 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
5818 return VM_FAULT_FALLBACK;
5819 }
5820
wp_huge_pud(struct vm_fault * vmf,pud_t orig_pud)5821 static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
5822 {
5823 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
5824 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
5825 struct vm_area_struct *vma = vmf->vma;
5826 vm_fault_t ret;
5827
5828 /* No support for anonymous transparent PUD pages yet */
5829 if (vma_is_anonymous(vma))
5830 goto split;
5831 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
5832 /* See comment in create_huge_pmd. */
5833 if (unlikely(FMODE_FSNOTIFY_HSM(vma->vm_file->f_mode)))
5834 goto split;
5835 if (vma->vm_ops->huge_fault) {
5836 ret = vma->vm_ops->huge_fault(vmf, PUD_ORDER);
5837 if (!(ret & VM_FAULT_FALLBACK))
5838 return ret;
5839 }
5840 }
5841 split:
5842 /* COW or write-notify not handled on PUD level: split pud.*/
5843 __split_huge_pud(vma, vmf->pud, vmf->address);
5844 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
5845 return VM_FAULT_FALLBACK;
5846 }
5847
5848 /*
5849 * These routines also need to handle stuff like marking pages dirty
5850 * and/or accessed for architectures that don't do it in hardware (most
5851 * RISC architectures). The early dirtying is also good on the i386.
5852 *
5853 * There is also a hook called "update_mmu_cache()" that architectures
5854 * with external mmu caches can use to update those (ie the Sparc or
5855 * PowerPC hashed page tables that act as extended TLBs).
5856 *
5857 * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow
5858 * concurrent faults).
5859 *
5860 * The mmap_lock may have been released depending on flags and our return value.
5861 * See filemap_fault() and __folio_lock_or_retry().
5862 */
handle_pte_fault(struct vm_fault * vmf)5863 static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
5864 {
5865 pte_t entry;
5866
5867 if (unlikely(pmd_none(*vmf->pmd))) {
5868 /*
5869 * Leave __pte_alloc() until later: because vm_ops->fault may
5870 * want to allocate huge page, and if we expose page table
5871 * for an instant, it will be difficult to retract from
5872 * concurrent faults and from rmap lookups.
5873 */
5874 vmf->pte = NULL;
5875 vmf->flags &= ~FAULT_FLAG_ORIG_PTE_VALID;
5876 } else {
5877 pmd_t dummy_pmdval;
5878
5879 /*
5880 * A regular pmd is established and it can't morph into a huge
5881 * pmd by anon khugepaged, since that takes mmap_lock in write
5882 * mode; but shmem or file collapse to THP could still morph
5883 * it into a huge pmd: just retry later if so.
5884 *
5885 * Use the maywrite version to indicate that vmf->pte may be
5886 * modified, but since we will use pte_same() to detect the
5887 * change of the !pte_none() entry, there is no need to recheck
5888 * the pmdval. Here we chooes to pass a dummy variable instead
5889 * of NULL, which helps new user think about why this place is
5890 * special.
5891 */
5892 vmf->pte = pte_offset_map_rw_nolock(vmf->vma->vm_mm, vmf->pmd,
5893 vmf->address, &dummy_pmdval,
5894 &vmf->ptl);
5895 if (unlikely(!vmf->pte))
5896 return 0;
5897 vmf->orig_pte = ptep_get_lockless(vmf->pte);
5898 vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID;
5899
5900 if (pte_none(vmf->orig_pte)) {
5901 pte_unmap(vmf->pte);
5902 vmf->pte = NULL;
5903 }
5904 }
5905
5906 if (!vmf->pte)
5907 return do_pte_missing(vmf);
5908
5909 if (!pte_present(vmf->orig_pte))
5910 return do_swap_page(vmf);
5911
5912 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
5913 return do_numa_page(vmf);
5914
5915 spin_lock(vmf->ptl);
5916 entry = vmf->orig_pte;
5917 if (unlikely(!pte_same(ptep_get(vmf->pte), entry))) {
5918 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
5919 goto unlock;
5920 }
5921 if (vmf->flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
5922 if (!pte_write(entry))
5923 return do_wp_page(vmf);
5924 else if (likely(vmf->flags & FAULT_FLAG_WRITE))
5925 entry = pte_mkdirty(entry);
5926 }
5927 entry = pte_mkyoung(entry);
5928 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
5929 vmf->flags & FAULT_FLAG_WRITE)) {
5930 update_mmu_cache_range(vmf, vmf->vma, vmf->address,
5931 vmf->pte, 1);
5932 } else {
5933 /* Skip spurious TLB flush for retried page fault */
5934 if (vmf->flags & FAULT_FLAG_TRIED)
5935 goto unlock;
5936 /*
5937 * This is needed only for protection faults but the arch code
5938 * is not yet telling us if this is a protection fault or not.
5939 * This still avoids useless tlb flushes for .text page faults
5940 * with threads.
5941 */
5942 if (vmf->flags & FAULT_FLAG_WRITE)
5943 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address,
5944 vmf->pte);
5945 }
5946 unlock:
5947 pte_unmap_unlock(vmf->pte, vmf->ptl);
5948 return 0;
5949 }
5950
5951 /*
5952 * On entry, we hold either the VMA lock or the mmap_lock
5953 * (FAULT_FLAG_VMA_LOCK tells you which). If VM_FAULT_RETRY is set in
5954 * the result, the mmap_lock is not held on exit. See filemap_fault()
5955 * and __folio_lock_or_retry().
5956 */
__handle_mm_fault(struct vm_area_struct * vma,unsigned long address,unsigned int flags)5957 static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
5958 unsigned long address, unsigned int flags)
5959 {
5960 struct vm_fault vmf = {
5961 .vma = vma,
5962 .address = address & PAGE_MASK,
5963 .real_address = address,
5964 .flags = flags,
5965 .pgoff = linear_page_index(vma, address),
5966 .gfp_mask = __get_fault_gfp_mask(vma),
5967 };
5968 struct mm_struct *mm = vma->vm_mm;
5969 unsigned long vm_flags = vma->vm_flags;
5970 pgd_t *pgd;
5971 p4d_t *p4d;
5972 vm_fault_t ret;
5973
5974 pgd = pgd_offset(mm, address);
5975 p4d = p4d_alloc(mm, pgd, address);
5976 if (!p4d)
5977 return VM_FAULT_OOM;
5978
5979 vmf.pud = pud_alloc(mm, p4d, address);
5980 if (!vmf.pud)
5981 return VM_FAULT_OOM;
5982 retry_pud:
5983 if (pud_none(*vmf.pud) &&
5984 thp_vma_allowable_order(vma, vm_flags,
5985 TVA_IN_PF | TVA_ENFORCE_SYSFS, PUD_ORDER)) {
5986 ret = create_huge_pud(&vmf);
5987 if (!(ret & VM_FAULT_FALLBACK))
5988 return ret;
5989 } else {
5990 pud_t orig_pud = *vmf.pud;
5991
5992 barrier();
5993 if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
5994
5995 /*
5996 * TODO once we support anonymous PUDs: NUMA case and
5997 * FAULT_FLAG_UNSHARE handling.
5998 */
5999 if ((flags & FAULT_FLAG_WRITE) && !pud_write(orig_pud)) {
6000 ret = wp_huge_pud(&vmf, orig_pud);
6001 if (!(ret & VM_FAULT_FALLBACK))
6002 return ret;
6003 } else {
6004 huge_pud_set_accessed(&vmf, orig_pud);
6005 return 0;
6006 }
6007 }
6008 }
6009
6010 vmf.pmd = pmd_alloc(mm, vmf.pud, address);
6011 if (!vmf.pmd)
6012 return VM_FAULT_OOM;
6013
6014 /* Huge pud page fault raced with pmd_alloc? */
6015 if (pud_trans_unstable(vmf.pud))
6016 goto retry_pud;
6017
6018 if (pmd_none(*vmf.pmd) &&
6019 thp_vma_allowable_order(vma, vm_flags,
6020 TVA_IN_PF | TVA_ENFORCE_SYSFS, PMD_ORDER)) {
6021 ret = create_huge_pmd(&vmf);
6022 if (!(ret & VM_FAULT_FALLBACK))
6023 return ret;
6024 } else {
6025 vmf.orig_pmd = pmdp_get_lockless(vmf.pmd);
6026
6027 if (unlikely(is_swap_pmd(vmf.orig_pmd))) {
6028 VM_BUG_ON(thp_migration_supported() &&
6029 !is_pmd_migration_entry(vmf.orig_pmd));
6030 if (is_pmd_migration_entry(vmf.orig_pmd))
6031 pmd_migration_entry_wait(mm, vmf.pmd);
6032 return 0;
6033 }
6034 if (pmd_trans_huge(vmf.orig_pmd) || pmd_devmap(vmf.orig_pmd)) {
6035 if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma))
6036 return do_huge_pmd_numa_page(&vmf);
6037
6038 if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
6039 !pmd_write(vmf.orig_pmd)) {
6040 ret = wp_huge_pmd(&vmf);
6041 if (!(ret & VM_FAULT_FALLBACK))
6042 return ret;
6043 } else {
6044 huge_pmd_set_accessed(&vmf);
6045 return 0;
6046 }
6047 }
6048 }
6049
6050 return handle_pte_fault(&vmf);
6051 }
6052
6053 /**
6054 * mm_account_fault - Do page fault accounting
6055 * @mm: mm from which memcg should be extracted. It can be NULL.
6056 * @regs: the pt_regs struct pointer. When set to NULL, will skip accounting
6057 * of perf event counters, but we'll still do the per-task accounting to
6058 * the task who triggered this page fault.
6059 * @address: the faulted address.
6060 * @flags: the fault flags.
6061 * @ret: the fault retcode.
6062 *
6063 * This will take care of most of the page fault accounting. Meanwhile, it
6064 * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter
6065 * updates. However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should
6066 * still be in per-arch page fault handlers at the entry of page fault.
6067 */
mm_account_fault(struct mm_struct * mm,struct pt_regs * regs,unsigned long address,unsigned int flags,vm_fault_t ret)6068 static inline void mm_account_fault(struct mm_struct *mm, struct pt_regs *regs,
6069 unsigned long address, unsigned int flags,
6070 vm_fault_t ret)
6071 {
6072 bool major;
6073
6074 /* Incomplete faults will be accounted upon completion. */
6075 if (ret & VM_FAULT_RETRY)
6076 return;
6077
6078 /*
6079 * To preserve the behavior of older kernels, PGFAULT counters record
6080 * both successful and failed faults, as opposed to perf counters,
6081 * which ignore failed cases.
6082 */
6083 count_vm_event(PGFAULT);
6084 count_memcg_event_mm(mm, PGFAULT);
6085
6086 /*
6087 * Do not account for unsuccessful faults (e.g. when the address wasn't
6088 * valid). That includes arch_vma_access_permitted() failing before
6089 * reaching here. So this is not a "this many hardware page faults"
6090 * counter. We should use the hw profiling for that.
6091 */
6092 if (ret & VM_FAULT_ERROR)
6093 return;
6094
6095 /*
6096 * We define the fault as a major fault when the final successful fault
6097 * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't
6098 * handle it immediately previously).
6099 */
6100 major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED);
6101
6102 if (major)
6103 current->maj_flt++;
6104 else
6105 current->min_flt++;
6106
6107 /*
6108 * If the fault is done for GUP, regs will be NULL. We only do the
6109 * accounting for the per thread fault counters who triggered the
6110 * fault, and we skip the perf event updates.
6111 */
6112 if (!regs)
6113 return;
6114
6115 if (major)
6116 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
6117 else
6118 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
6119 }
6120
6121 #ifdef CONFIG_LRU_GEN
lru_gen_enter_fault(struct vm_area_struct * vma)6122 static void lru_gen_enter_fault(struct vm_area_struct *vma)
6123 {
6124 /* the LRU algorithm only applies to accesses with recency */
6125 current->in_lru_fault = vma_has_recency(vma);
6126 }
6127
lru_gen_exit_fault(void)6128 static void lru_gen_exit_fault(void)
6129 {
6130 current->in_lru_fault = false;
6131 }
6132 #else
lru_gen_enter_fault(struct vm_area_struct * vma)6133 static void lru_gen_enter_fault(struct vm_area_struct *vma)
6134 {
6135 }
6136
lru_gen_exit_fault(void)6137 static void lru_gen_exit_fault(void)
6138 {
6139 }
6140 #endif /* CONFIG_LRU_GEN */
6141
sanitize_fault_flags(struct vm_area_struct * vma,unsigned int * flags)6142 static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma,
6143 unsigned int *flags)
6144 {
6145 if (unlikely(*flags & FAULT_FLAG_UNSHARE)) {
6146 if (WARN_ON_ONCE(*flags & FAULT_FLAG_WRITE))
6147 return VM_FAULT_SIGSEGV;
6148 /*
6149 * FAULT_FLAG_UNSHARE only applies to COW mappings. Let's
6150 * just treat it like an ordinary read-fault otherwise.
6151 */
6152 if (!is_cow_mapping(vma->vm_flags))
6153 *flags &= ~FAULT_FLAG_UNSHARE;
6154 } else if (*flags & FAULT_FLAG_WRITE) {
6155 /* Write faults on read-only mappings are impossible ... */
6156 if (WARN_ON_ONCE(!(vma->vm_flags & VM_MAYWRITE)))
6157 return VM_FAULT_SIGSEGV;
6158 /* ... and FOLL_FORCE only applies to COW mappings. */
6159 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE) &&
6160 !is_cow_mapping(vma->vm_flags)))
6161 return VM_FAULT_SIGSEGV;
6162 }
6163 #ifdef CONFIG_PER_VMA_LOCK
6164 /*
6165 * Per-VMA locks can't be used with FAULT_FLAG_RETRY_NOWAIT because of
6166 * the assumption that lock is dropped on VM_FAULT_RETRY.
6167 */
6168 if (WARN_ON_ONCE((*flags &
6169 (FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT)) ==
6170 (FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT)))
6171 return VM_FAULT_SIGSEGV;
6172 #endif
6173
6174 return 0;
6175 }
6176
6177 /*
6178 * By the time we get here, we already hold either the VMA lock or the
6179 * mmap_lock (FAULT_FLAG_VMA_LOCK tells you which).
6180 *
6181 * The mmap_lock may have been released depending on flags and our
6182 * return value. See filemap_fault() and __folio_lock_or_retry().
6183 */
handle_mm_fault(struct vm_area_struct * vma,unsigned long address,unsigned int flags,struct pt_regs * regs)6184 vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
6185 unsigned int flags, struct pt_regs *regs)
6186 {
6187 /* If the fault handler drops the mmap_lock, vma may be freed */
6188 struct mm_struct *mm = vma->vm_mm;
6189 vm_fault_t ret;
6190 bool is_droppable;
6191
6192 __set_current_state(TASK_RUNNING);
6193
6194 ret = sanitize_fault_flags(vma, &flags);
6195 if (ret)
6196 goto out;
6197
6198 if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
6199 flags & FAULT_FLAG_INSTRUCTION,
6200 flags & FAULT_FLAG_REMOTE)) {
6201 ret = VM_FAULT_SIGSEGV;
6202 goto out;
6203 }
6204
6205 is_droppable = !!(vma->vm_flags & VM_DROPPABLE);
6206
6207 /*
6208 * Enable the memcg OOM handling for faults triggered in user
6209 * space. Kernel faults are handled more gracefully.
6210 */
6211 if (flags & FAULT_FLAG_USER)
6212 mem_cgroup_enter_user_fault();
6213
6214 lru_gen_enter_fault(vma);
6215
6216 if (unlikely(is_vm_hugetlb_page(vma)))
6217 ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
6218 else
6219 ret = __handle_mm_fault(vma, address, flags);
6220
6221 /*
6222 * Warning: It is no longer safe to dereference vma-> after this point,
6223 * because mmap_lock might have been dropped by __handle_mm_fault(), so
6224 * vma might be destroyed from underneath us.
6225 */
6226
6227 lru_gen_exit_fault();
6228
6229 /* If the mapping is droppable, then errors due to OOM aren't fatal. */
6230 if (is_droppable)
6231 ret &= ~VM_FAULT_OOM;
6232
6233 if (flags & FAULT_FLAG_USER) {
6234 mem_cgroup_exit_user_fault();
6235 /*
6236 * The task may have entered a memcg OOM situation but
6237 * if the allocation error was handled gracefully (no
6238 * VM_FAULT_OOM), there is no need to kill anything.
6239 * Just clean up the OOM state peacefully.
6240 */
6241 if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
6242 mem_cgroup_oom_synchronize(false);
6243 }
6244 out:
6245 mm_account_fault(mm, regs, address, flags, ret);
6246
6247 return ret;
6248 }
6249 EXPORT_SYMBOL_GPL(handle_mm_fault);
6250
6251 #ifdef CONFIG_LOCK_MM_AND_FIND_VMA
6252 #include <linux/extable.h>
6253
get_mmap_lock_carefully(struct mm_struct * mm,struct pt_regs * regs)6254 static inline bool get_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs)
6255 {
6256 if (likely(mmap_read_trylock(mm)))
6257 return true;
6258
6259 if (regs && !user_mode(regs)) {
6260 unsigned long ip = exception_ip(regs);
6261 if (!search_exception_tables(ip))
6262 return false;
6263 }
6264
6265 return !mmap_read_lock_killable(mm);
6266 }
6267
mmap_upgrade_trylock(struct mm_struct * mm)6268 static inline bool mmap_upgrade_trylock(struct mm_struct *mm)
6269 {
6270 /*
6271 * We don't have this operation yet.
6272 *
6273 * It should be easy enough to do: it's basically a
6274 * atomic_long_try_cmpxchg_acquire()
6275 * from RWSEM_READER_BIAS -> RWSEM_WRITER_LOCKED, but
6276 * it also needs the proper lockdep magic etc.
6277 */
6278 return false;
6279 }
6280
upgrade_mmap_lock_carefully(struct mm_struct * mm,struct pt_regs * regs)6281 static inline bool upgrade_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs)
6282 {
6283 mmap_read_unlock(mm);
6284 if (regs && !user_mode(regs)) {
6285 unsigned long ip = exception_ip(regs);
6286 if (!search_exception_tables(ip))
6287 return false;
6288 }
6289 return !mmap_write_lock_killable(mm);
6290 }
6291
6292 /*
6293 * Helper for page fault handling.
6294 *
6295 * This is kind of equivalent to "mmap_read_lock()" followed
6296 * by "find_extend_vma()", except it's a lot more careful about
6297 * the locking (and will drop the lock on failure).
6298 *
6299 * For example, if we have a kernel bug that causes a page
6300 * fault, we don't want to just use mmap_read_lock() to get
6301 * the mm lock, because that would deadlock if the bug were
6302 * to happen while we're holding the mm lock for writing.
6303 *
6304 * So this checks the exception tables on kernel faults in
6305 * order to only do this all for instructions that are actually
6306 * expected to fault.
6307 *
6308 * We can also actually take the mm lock for writing if we
6309 * need to extend the vma, which helps the VM layer a lot.
6310 */
lock_mm_and_find_vma(struct mm_struct * mm,unsigned long addr,struct pt_regs * regs)6311 struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
6312 unsigned long addr, struct pt_regs *regs)
6313 {
6314 struct vm_area_struct *vma;
6315
6316 if (!get_mmap_lock_carefully(mm, regs))
6317 return NULL;
6318
6319 vma = find_vma(mm, addr);
6320 if (likely(vma && (vma->vm_start <= addr)))
6321 return vma;
6322
6323 /*
6324 * Well, dang. We might still be successful, but only
6325 * if we can extend a vma to do so.
6326 */
6327 if (!vma || !(vma->vm_flags & VM_GROWSDOWN)) {
6328 mmap_read_unlock(mm);
6329 return NULL;
6330 }
6331
6332 /*
6333 * We can try to upgrade the mmap lock atomically,
6334 * in which case we can continue to use the vma
6335 * we already looked up.
6336 *
6337 * Otherwise we'll have to drop the mmap lock and
6338 * re-take it, and also look up the vma again,
6339 * re-checking it.
6340 */
6341 if (!mmap_upgrade_trylock(mm)) {
6342 if (!upgrade_mmap_lock_carefully(mm, regs))
6343 return NULL;
6344
6345 vma = find_vma(mm, addr);
6346 if (!vma)
6347 goto fail;
6348 if (vma->vm_start <= addr)
6349 goto success;
6350 if (!(vma->vm_flags & VM_GROWSDOWN))
6351 goto fail;
6352 }
6353
6354 if (expand_stack_locked(vma, addr))
6355 goto fail;
6356
6357 success:
6358 mmap_write_downgrade(mm);
6359 return vma;
6360
6361 fail:
6362 mmap_write_unlock(mm);
6363 return NULL;
6364 }
6365 #endif
6366
6367 #ifdef CONFIG_PER_VMA_LOCK
6368 /*
6369 * Lookup and lock a VMA under RCU protection. Returned VMA is guaranteed to be
6370 * stable and not isolated. If the VMA is not found or is being modified the
6371 * function returns NULL.
6372 */
lock_vma_under_rcu(struct mm_struct * mm,unsigned long address)6373 struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
6374 unsigned long address)
6375 {
6376 MA_STATE(mas, &mm->mm_mt, address, address);
6377 struct vm_area_struct *vma;
6378
6379 rcu_read_lock();
6380 retry:
6381 vma = mas_walk(&mas);
6382 if (!vma)
6383 goto inval;
6384
6385 if (!vma_start_read(vma))
6386 goto inval;
6387
6388 /* Check if the VMA got isolated after we found it */
6389 if (vma->detached) {
6390 vma_end_read(vma);
6391 count_vm_vma_lock_event(VMA_LOCK_MISS);
6392 /* The area was replaced with another one */
6393 goto retry;
6394 }
6395 /*
6396 * At this point, we have a stable reference to a VMA: The VMA is
6397 * locked and we know it hasn't already been isolated.
6398 * From here on, we can access the VMA without worrying about which
6399 * fields are accessible for RCU readers.
6400 */
6401
6402 /* Check since vm_start/vm_end might change before we lock the VMA */
6403 if (unlikely(address < vma->vm_start || address >= vma->vm_end))
6404 goto inval_end_read;
6405
6406 rcu_read_unlock();
6407 return vma;
6408
6409 inval_end_read:
6410 vma_end_read(vma);
6411 inval:
6412 rcu_read_unlock();
6413 count_vm_vma_lock_event(VMA_LOCK_ABORT);
6414 return NULL;
6415 }
6416 #endif /* CONFIG_PER_VMA_LOCK */
6417
6418 #ifndef __PAGETABLE_P4D_FOLDED
6419 /*
6420 * Allocate p4d page table.
6421 * We've already handled the fast-path in-line.
6422 */
__p4d_alloc(struct mm_struct * mm,pgd_t * pgd,unsigned long address)6423 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
6424 {
6425 p4d_t *new = p4d_alloc_one(mm, address);
6426 if (!new)
6427 return -ENOMEM;
6428
6429 spin_lock(&mm->page_table_lock);
6430 if (pgd_present(*pgd)) { /* Another has populated it */
6431 p4d_free(mm, new);
6432 } else {
6433 smp_wmb(); /* See comment in pmd_install() */
6434 pgd_populate(mm, pgd, new);
6435 }
6436 spin_unlock(&mm->page_table_lock);
6437 return 0;
6438 }
6439 #endif /* __PAGETABLE_P4D_FOLDED */
6440
6441 #ifndef __PAGETABLE_PUD_FOLDED
6442 /*
6443 * Allocate page upper directory.
6444 * We've already handled the fast-path in-line.
6445 */
__pud_alloc(struct mm_struct * mm,p4d_t * p4d,unsigned long address)6446 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
6447 {
6448 pud_t *new = pud_alloc_one(mm, address);
6449 if (!new)
6450 return -ENOMEM;
6451
6452 spin_lock(&mm->page_table_lock);
6453 if (!p4d_present(*p4d)) {
6454 mm_inc_nr_puds(mm);
6455 smp_wmb(); /* See comment in pmd_install() */
6456 p4d_populate(mm, p4d, new);
6457 } else /* Another has populated it */
6458 pud_free(mm, new);
6459 spin_unlock(&mm->page_table_lock);
6460 return 0;
6461 }
6462 #endif /* __PAGETABLE_PUD_FOLDED */
6463
6464 #ifndef __PAGETABLE_PMD_FOLDED
6465 /*
6466 * Allocate page middle directory.
6467 * We've already handled the fast-path in-line.
6468 */
__pmd_alloc(struct mm_struct * mm,pud_t * pud,unsigned long address)6469 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
6470 {
6471 spinlock_t *ptl;
6472 pmd_t *new = pmd_alloc_one(mm, address);
6473 if (!new)
6474 return -ENOMEM;
6475
6476 ptl = pud_lock(mm, pud);
6477 if (!pud_present(*pud)) {
6478 mm_inc_nr_pmds(mm);
6479 smp_wmb(); /* See comment in pmd_install() */
6480 pud_populate(mm, pud, new);
6481 } else { /* Another has populated it */
6482 pmd_free(mm, new);
6483 }
6484 spin_unlock(ptl);
6485 return 0;
6486 }
6487 #endif /* __PAGETABLE_PMD_FOLDED */
6488
pfnmap_args_setup(struct follow_pfnmap_args * args,spinlock_t * lock,pte_t * ptep,pgprot_t pgprot,unsigned long pfn_base,unsigned long addr_mask,bool writable,bool special)6489 static inline void pfnmap_args_setup(struct follow_pfnmap_args *args,
6490 spinlock_t *lock, pte_t *ptep,
6491 pgprot_t pgprot, unsigned long pfn_base,
6492 unsigned long addr_mask, bool writable,
6493 bool special)
6494 {
6495 args->lock = lock;
6496 args->ptep = ptep;
6497 args->pfn = pfn_base + ((args->address & ~addr_mask) >> PAGE_SHIFT);
6498 args->pgprot = pgprot;
6499 args->writable = writable;
6500 args->special = special;
6501 }
6502
pfnmap_lockdep_assert(struct vm_area_struct * vma)6503 static inline void pfnmap_lockdep_assert(struct vm_area_struct *vma)
6504 {
6505 #ifdef CONFIG_LOCKDEP
6506 struct file *file = vma->vm_file;
6507 struct address_space *mapping = file ? file->f_mapping : NULL;
6508
6509 if (mapping)
6510 lockdep_assert(lockdep_is_held(&mapping->i_mmap_rwsem) ||
6511 lockdep_is_held(&vma->vm_mm->mmap_lock));
6512 else
6513 lockdep_assert(lockdep_is_held(&vma->vm_mm->mmap_lock));
6514 #endif
6515 }
6516
6517 /**
6518 * follow_pfnmap_start() - Look up a pfn mapping at a user virtual address
6519 * @args: Pointer to struct @follow_pfnmap_args
6520 *
6521 * The caller needs to setup args->vma and args->address to point to the
6522 * virtual address as the target of such lookup. On a successful return,
6523 * the results will be put into other output fields.
6524 *
6525 * After the caller finished using the fields, the caller must invoke
6526 * another follow_pfnmap_end() to proper releases the locks and resources
6527 * of such look up request.
6528 *
6529 * During the start() and end() calls, the results in @args will be valid
6530 * as proper locks will be held. After the end() is called, all the fields
6531 * in @follow_pfnmap_args will be invalid to be further accessed. Further
6532 * use of such information after end() may require proper synchronizations
6533 * by the caller with page table updates, otherwise it can create a
6534 * security bug.
6535 *
6536 * If the PTE maps a refcounted page, callers are responsible to protect
6537 * against invalidation with MMU notifiers; otherwise access to the PFN at
6538 * a later point in time can trigger use-after-free.
6539 *
6540 * Only IO mappings and raw PFN mappings are allowed. The mmap semaphore
6541 * should be taken for read, and the mmap semaphore cannot be released
6542 * before the end() is invoked.
6543 *
6544 * This function must not be used to modify PTE content.
6545 *
6546 * Return: zero on success, negative otherwise.
6547 */
follow_pfnmap_start(struct follow_pfnmap_args * args)6548 int follow_pfnmap_start(struct follow_pfnmap_args *args)
6549 {
6550 struct vm_area_struct *vma = args->vma;
6551 unsigned long address = args->address;
6552 struct mm_struct *mm = vma->vm_mm;
6553 spinlock_t *lock;
6554 pgd_t *pgdp;
6555 p4d_t *p4dp, p4d;
6556 pud_t *pudp, pud;
6557 pmd_t *pmdp, pmd;
6558 pte_t *ptep, pte;
6559
6560 pfnmap_lockdep_assert(vma);
6561
6562 if (unlikely(address < vma->vm_start || address >= vma->vm_end))
6563 goto out;
6564
6565 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
6566 goto out;
6567 retry:
6568 pgdp = pgd_offset(mm, address);
6569 if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
6570 goto out;
6571
6572 p4dp = p4d_offset(pgdp, address);
6573 p4d = READ_ONCE(*p4dp);
6574 if (p4d_none(p4d) || unlikely(p4d_bad(p4d)))
6575 goto out;
6576
6577 pudp = pud_offset(p4dp, address);
6578 pud = READ_ONCE(*pudp);
6579 if (pud_none(pud))
6580 goto out;
6581 if (pud_leaf(pud)) {
6582 lock = pud_lock(mm, pudp);
6583 if (!unlikely(pud_leaf(pud))) {
6584 spin_unlock(lock);
6585 goto retry;
6586 }
6587 pfnmap_args_setup(args, lock, NULL, pud_pgprot(pud),
6588 pud_pfn(pud), PUD_MASK, pud_write(pud),
6589 pud_special(pud));
6590 return 0;
6591 }
6592
6593 pmdp = pmd_offset(pudp, address);
6594 pmd = pmdp_get_lockless(pmdp);
6595 if (pmd_leaf(pmd)) {
6596 lock = pmd_lock(mm, pmdp);
6597 if (!unlikely(pmd_leaf(pmd))) {
6598 spin_unlock(lock);
6599 goto retry;
6600 }
6601 pfnmap_args_setup(args, lock, NULL, pmd_pgprot(pmd),
6602 pmd_pfn(pmd), PMD_MASK, pmd_write(pmd),
6603 pmd_special(pmd));
6604 return 0;
6605 }
6606
6607 ptep = pte_offset_map_lock(mm, pmdp, address, &lock);
6608 if (!ptep)
6609 goto out;
6610 pte = ptep_get(ptep);
6611 if (!pte_present(pte))
6612 goto unlock;
6613 pfnmap_args_setup(args, lock, ptep, pte_pgprot(pte),
6614 pte_pfn(pte), PAGE_MASK, pte_write(pte),
6615 pte_special(pte));
6616 return 0;
6617 unlock:
6618 pte_unmap_unlock(ptep, lock);
6619 out:
6620 return -EINVAL;
6621 }
6622 EXPORT_SYMBOL_GPL(follow_pfnmap_start);
6623
6624 /**
6625 * follow_pfnmap_end(): End a follow_pfnmap_start() process
6626 * @args: Pointer to struct @follow_pfnmap_args
6627 *
6628 * Must be used in pair of follow_pfnmap_start(). See the start() function
6629 * above for more information.
6630 */
follow_pfnmap_end(struct follow_pfnmap_args * args)6631 void follow_pfnmap_end(struct follow_pfnmap_args *args)
6632 {
6633 if (args->lock)
6634 spin_unlock(args->lock);
6635 if (args->ptep)
6636 pte_unmap(args->ptep);
6637 }
6638 EXPORT_SYMBOL_GPL(follow_pfnmap_end);
6639
6640 #ifdef CONFIG_HAVE_IOREMAP_PROT
6641 /**
6642 * generic_access_phys - generic implementation for iomem mmap access
6643 * @vma: the vma to access
6644 * @addr: userspace address, not relative offset within @vma
6645 * @buf: buffer to read/write
6646 * @len: length of transfer
6647 * @write: set to FOLL_WRITE when writing, otherwise reading
6648 *
6649 * This is a generic implementation for &vm_operations_struct.access for an
6650 * iomem mapping. This callback is used by access_process_vm() when the @vma is
6651 * not page based.
6652 */
generic_access_phys(struct vm_area_struct * vma,unsigned long addr,void * buf,int len,int write)6653 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
6654 void *buf, int len, int write)
6655 {
6656 resource_size_t phys_addr;
6657 unsigned long prot = 0;
6658 void __iomem *maddr;
6659 int offset = offset_in_page(addr);
6660 int ret = -EINVAL;
6661 bool writable;
6662 struct follow_pfnmap_args args = { .vma = vma, .address = addr };
6663
6664 retry:
6665 if (follow_pfnmap_start(&args))
6666 return -EINVAL;
6667 prot = pgprot_val(args.pgprot);
6668 phys_addr = (resource_size_t)args.pfn << PAGE_SHIFT;
6669 writable = args.writable;
6670 follow_pfnmap_end(&args);
6671
6672 if ((write & FOLL_WRITE) && !writable)
6673 return -EINVAL;
6674
6675 maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
6676 if (!maddr)
6677 return -ENOMEM;
6678
6679 if (follow_pfnmap_start(&args))
6680 goto out_unmap;
6681
6682 if ((prot != pgprot_val(args.pgprot)) ||
6683 (phys_addr != (args.pfn << PAGE_SHIFT)) ||
6684 (writable != args.writable)) {
6685 follow_pfnmap_end(&args);
6686 iounmap(maddr);
6687 goto retry;
6688 }
6689
6690 if (write)
6691 memcpy_toio(maddr + offset, buf, len);
6692 else
6693 memcpy_fromio(buf, maddr + offset, len);
6694 ret = len;
6695 follow_pfnmap_end(&args);
6696 out_unmap:
6697 iounmap(maddr);
6698
6699 return ret;
6700 }
6701 EXPORT_SYMBOL_GPL(generic_access_phys);
6702 #endif
6703
6704 /*
6705 * Access another process' address space as given in mm.
6706 */
__access_remote_vm(struct mm_struct * mm,unsigned long addr,void * buf,int len,unsigned int gup_flags)6707 static int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
6708 void *buf, int len, unsigned int gup_flags)
6709 {
6710 void *old_buf = buf;
6711 int write = gup_flags & FOLL_WRITE;
6712
6713 if (mmap_read_lock_killable(mm))
6714 return 0;
6715
6716 /* Untag the address before looking up the VMA */
6717 addr = untagged_addr_remote(mm, addr);
6718
6719 /* Avoid triggering the temporary warning in __get_user_pages */
6720 if (!vma_lookup(mm, addr) && !expand_stack(mm, addr))
6721 return 0;
6722
6723 /* ignore errors, just check how much was successfully transferred */
6724 while (len) {
6725 int bytes, offset;
6726 void *maddr;
6727 struct vm_area_struct *vma = NULL;
6728 struct page *page = get_user_page_vma_remote(mm, addr,
6729 gup_flags, &vma);
6730
6731 if (IS_ERR(page)) {
6732 /* We might need to expand the stack to access it */
6733 vma = vma_lookup(mm, addr);
6734 if (!vma) {
6735 vma = expand_stack(mm, addr);
6736
6737 /* mmap_lock was dropped on failure */
6738 if (!vma)
6739 return buf - old_buf;
6740
6741 /* Try again if stack expansion worked */
6742 continue;
6743 }
6744
6745 /*
6746 * Check if this is a VM_IO | VM_PFNMAP VMA, which
6747 * we can access using slightly different code.
6748 */
6749 bytes = 0;
6750 #ifdef CONFIG_HAVE_IOREMAP_PROT
6751 if (vma->vm_ops && vma->vm_ops->access)
6752 bytes = vma->vm_ops->access(vma, addr, buf,
6753 len, write);
6754 #endif
6755 if (bytes <= 0)
6756 break;
6757 } else {
6758 bytes = len;
6759 offset = addr & (PAGE_SIZE-1);
6760 if (bytes > PAGE_SIZE-offset)
6761 bytes = PAGE_SIZE-offset;
6762
6763 maddr = kmap_local_page(page);
6764 if (write) {
6765 copy_to_user_page(vma, page, addr,
6766 maddr + offset, buf, bytes);
6767 set_page_dirty_lock(page);
6768 } else {
6769 copy_from_user_page(vma, page, addr,
6770 buf, maddr + offset, bytes);
6771 }
6772 unmap_and_put_page(page, maddr);
6773 }
6774 len -= bytes;
6775 buf += bytes;
6776 addr += bytes;
6777 }
6778 mmap_read_unlock(mm);
6779
6780 return buf - old_buf;
6781 }
6782
6783 /**
6784 * access_remote_vm - access another process' address space
6785 * @mm: the mm_struct of the target address space
6786 * @addr: start address to access
6787 * @buf: source or destination buffer
6788 * @len: number of bytes to transfer
6789 * @gup_flags: flags modifying lookup behaviour
6790 *
6791 * The caller must hold a reference on @mm.
6792 *
6793 * Return: number of bytes copied from source to destination.
6794 */
access_remote_vm(struct mm_struct * mm,unsigned long addr,void * buf,int len,unsigned int gup_flags)6795 int access_remote_vm(struct mm_struct *mm, unsigned long addr,
6796 void *buf, int len, unsigned int gup_flags)
6797 {
6798 return __access_remote_vm(mm, addr, buf, len, gup_flags);
6799 }
6800
6801 /*
6802 * Access another process' address space.
6803 * Source/target buffer must be kernel space,
6804 * Do not walk the page table directly, use get_user_pages
6805 */
access_process_vm(struct task_struct * tsk,unsigned long addr,void * buf,int len,unsigned int gup_flags)6806 int access_process_vm(struct task_struct *tsk, unsigned long addr,
6807 void *buf, int len, unsigned int gup_flags)
6808 {
6809 struct mm_struct *mm;
6810 int ret;
6811
6812 mm = get_task_mm(tsk);
6813 if (!mm)
6814 return 0;
6815
6816 ret = __access_remote_vm(mm, addr, buf, len, gup_flags);
6817
6818 mmput(mm);
6819
6820 return ret;
6821 }
6822 EXPORT_SYMBOL_GPL(access_process_vm);
6823
6824 /*
6825 * Print the name of a VMA.
6826 */
print_vma_addr(char * prefix,unsigned long ip)6827 void print_vma_addr(char *prefix, unsigned long ip)
6828 {
6829 struct mm_struct *mm = current->mm;
6830 struct vm_area_struct *vma;
6831
6832 /*
6833 * we might be running from an atomic context so we cannot sleep
6834 */
6835 if (!mmap_read_trylock(mm))
6836 return;
6837
6838 vma = vma_lookup(mm, ip);
6839 if (vma && vma->vm_file) {
6840 struct file *f = vma->vm_file;
6841 ip -= vma->vm_start;
6842 ip += vma->vm_pgoff << PAGE_SHIFT;
6843 printk("%s%pD[%lx,%lx+%lx]", prefix, f, ip,
6844 vma->vm_start,
6845 vma->vm_end - vma->vm_start);
6846 }
6847 mmap_read_unlock(mm);
6848 }
6849
6850 #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
__might_fault(const char * file,int line)6851 void __might_fault(const char *file, int line)
6852 {
6853 if (pagefault_disabled())
6854 return;
6855 __might_sleep(file, line);
6856 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
6857 if (current->mm)
6858 might_lock_read(¤t->mm->mmap_lock);
6859 #endif
6860 }
6861 EXPORT_SYMBOL(__might_fault);
6862 #endif
6863
6864 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
6865 /*
6866 * Process all subpages of the specified huge page with the specified
6867 * operation. The target subpage will be processed last to keep its
6868 * cache lines hot.
6869 */
process_huge_page(unsigned long addr_hint,unsigned int nr_pages,int (* process_subpage)(unsigned long addr,int idx,void * arg),void * arg)6870 static inline int process_huge_page(
6871 unsigned long addr_hint, unsigned int nr_pages,
6872 int (*process_subpage)(unsigned long addr, int idx, void *arg),
6873 void *arg)
6874 {
6875 int i, n, base, l, ret;
6876 unsigned long addr = addr_hint &
6877 ~(((unsigned long)nr_pages << PAGE_SHIFT) - 1);
6878
6879 /* Process target subpage last to keep its cache lines hot */
6880 might_sleep();
6881 n = (addr_hint - addr) / PAGE_SIZE;
6882 if (2 * n <= nr_pages) {
6883 /* If target subpage in first half of huge page */
6884 base = 0;
6885 l = n;
6886 /* Process subpages at the end of huge page */
6887 for (i = nr_pages - 1; i >= 2 * n; i--) {
6888 cond_resched();
6889 ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
6890 if (ret)
6891 return ret;
6892 }
6893 } else {
6894 /* If target subpage in second half of huge page */
6895 base = nr_pages - 2 * (nr_pages - n);
6896 l = nr_pages - n;
6897 /* Process subpages at the begin of huge page */
6898 for (i = 0; i < base; i++) {
6899 cond_resched();
6900 ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
6901 if (ret)
6902 return ret;
6903 }
6904 }
6905 /*
6906 * Process remaining subpages in left-right-left-right pattern
6907 * towards the target subpage
6908 */
6909 for (i = 0; i < l; i++) {
6910 int left_idx = base + i;
6911 int right_idx = base + 2 * l - 1 - i;
6912
6913 cond_resched();
6914 ret = process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
6915 if (ret)
6916 return ret;
6917 cond_resched();
6918 ret = process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
6919 if (ret)
6920 return ret;
6921 }
6922 return 0;
6923 }
6924
clear_gigantic_page(struct folio * folio,unsigned long addr_hint,unsigned int nr_pages)6925 static void clear_gigantic_page(struct folio *folio, unsigned long addr_hint,
6926 unsigned int nr_pages)
6927 {
6928 unsigned long addr = ALIGN_DOWN(addr_hint, folio_size(folio));
6929 int i;
6930
6931 might_sleep();
6932 for (i = 0; i < nr_pages; i++) {
6933 cond_resched();
6934 clear_user_highpage(folio_page(folio, i), addr + i * PAGE_SIZE);
6935 }
6936 }
6937
clear_subpage(unsigned long addr,int idx,void * arg)6938 static int clear_subpage(unsigned long addr, int idx, void *arg)
6939 {
6940 struct folio *folio = arg;
6941
6942 clear_user_highpage(folio_page(folio, idx), addr);
6943 return 0;
6944 }
6945
6946 /**
6947 * folio_zero_user - Zero a folio which will be mapped to userspace.
6948 * @folio: The folio to zero.
6949 * @addr_hint: The address will be accessed or the base address if uncelar.
6950 */
folio_zero_user(struct folio * folio,unsigned long addr_hint)6951 void folio_zero_user(struct folio *folio, unsigned long addr_hint)
6952 {
6953 unsigned int nr_pages = folio_nr_pages(folio);
6954
6955 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES))
6956 clear_gigantic_page(folio, addr_hint, nr_pages);
6957 else
6958 process_huge_page(addr_hint, nr_pages, clear_subpage, folio);
6959 }
6960
copy_user_gigantic_page(struct folio * dst,struct folio * src,unsigned long addr_hint,struct vm_area_struct * vma,unsigned int nr_pages)6961 static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
6962 unsigned long addr_hint,
6963 struct vm_area_struct *vma,
6964 unsigned int nr_pages)
6965 {
6966 unsigned long addr = ALIGN_DOWN(addr_hint, folio_size(dst));
6967 struct page *dst_page;
6968 struct page *src_page;
6969 int i;
6970
6971 for (i = 0; i < nr_pages; i++) {
6972 dst_page = folio_page(dst, i);
6973 src_page = folio_page(src, i);
6974
6975 cond_resched();
6976 if (copy_mc_user_highpage(dst_page, src_page,
6977 addr + i*PAGE_SIZE, vma))
6978 return -EHWPOISON;
6979 }
6980 return 0;
6981 }
6982
6983 struct copy_subpage_arg {
6984 struct folio *dst;
6985 struct folio *src;
6986 struct vm_area_struct *vma;
6987 };
6988
copy_subpage(unsigned long addr,int idx,void * arg)6989 static int copy_subpage(unsigned long addr, int idx, void *arg)
6990 {
6991 struct copy_subpage_arg *copy_arg = arg;
6992 struct page *dst = folio_page(copy_arg->dst, idx);
6993 struct page *src = folio_page(copy_arg->src, idx);
6994
6995 if (copy_mc_user_highpage(dst, src, addr, copy_arg->vma))
6996 return -EHWPOISON;
6997 return 0;
6998 }
6999
copy_user_large_folio(struct folio * dst,struct folio * src,unsigned long addr_hint,struct vm_area_struct * vma)7000 int copy_user_large_folio(struct folio *dst, struct folio *src,
7001 unsigned long addr_hint, struct vm_area_struct *vma)
7002 {
7003 unsigned int nr_pages = folio_nr_pages(dst);
7004 struct copy_subpage_arg arg = {
7005 .dst = dst,
7006 .src = src,
7007 .vma = vma,
7008 };
7009
7010 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES))
7011 return copy_user_gigantic_page(dst, src, addr_hint, vma, nr_pages);
7012
7013 return process_huge_page(addr_hint, nr_pages, copy_subpage, &arg);
7014 }
7015
copy_folio_from_user(struct folio * dst_folio,const void __user * usr_src,bool allow_pagefault)7016 long copy_folio_from_user(struct folio *dst_folio,
7017 const void __user *usr_src,
7018 bool allow_pagefault)
7019 {
7020 void *kaddr;
7021 unsigned long i, rc = 0;
7022 unsigned int nr_pages = folio_nr_pages(dst_folio);
7023 unsigned long ret_val = nr_pages * PAGE_SIZE;
7024 struct page *subpage;
7025
7026 for (i = 0; i < nr_pages; i++) {
7027 subpage = folio_page(dst_folio, i);
7028 kaddr = kmap_local_page(subpage);
7029 if (!allow_pagefault)
7030 pagefault_disable();
7031 rc = copy_from_user(kaddr, usr_src + i * PAGE_SIZE, PAGE_SIZE);
7032 if (!allow_pagefault)
7033 pagefault_enable();
7034 kunmap_local(kaddr);
7035
7036 ret_val -= (PAGE_SIZE - rc);
7037 if (rc)
7038 break;
7039
7040 flush_dcache_page(subpage);
7041
7042 cond_resched();
7043 }
7044 return ret_val;
7045 }
7046 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
7047
7048 #if defined(CONFIG_SPLIT_PTE_PTLOCKS) && ALLOC_SPLIT_PTLOCKS
7049
7050 static struct kmem_cache *page_ptl_cachep;
7051
ptlock_cache_init(void)7052 void __init ptlock_cache_init(void)
7053 {
7054 page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
7055 SLAB_PANIC, NULL);
7056 }
7057
ptlock_alloc(struct ptdesc * ptdesc)7058 bool ptlock_alloc(struct ptdesc *ptdesc)
7059 {
7060 spinlock_t *ptl;
7061
7062 ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
7063 if (!ptl)
7064 return false;
7065 ptdesc->ptl = ptl;
7066 return true;
7067 }
7068
ptlock_free(struct ptdesc * ptdesc)7069 void ptlock_free(struct ptdesc *ptdesc)
7070 {
7071 if (ptdesc->ptl)
7072 kmem_cache_free(page_ptl_cachep, ptdesc->ptl);
7073 }
7074 #endif
7075
vma_pgtable_walk_begin(struct vm_area_struct * vma)7076 void vma_pgtable_walk_begin(struct vm_area_struct *vma)
7077 {
7078 if (is_vm_hugetlb_page(vma))
7079 hugetlb_vma_lock_read(vma);
7080 }
7081
vma_pgtable_walk_end(struct vm_area_struct * vma)7082 void vma_pgtable_walk_end(struct vm_area_struct *vma)
7083 {
7084 if (is_vm_hugetlb_page(vma))
7085 hugetlb_vma_unlock_read(vma);
7086 }
7087