xref: /linux/mm/memory.c (revision eb88e6bfbc0a975e08a18c39d1138d3e6cdc00a5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/memory.c
4  *
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  */
7 
8 /*
9  * demand-loading started 01.12.91 - seems it is high on the list of
10  * things wanted, and it should be easy to implement. - Linus
11  */
12 
13 /*
14  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
15  * pages started 02.12.91, seems to work. - Linus.
16  *
17  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
18  * would have taken more than the 6M I have free, but it worked well as
19  * far as I could see.
20  *
21  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
22  */
23 
24 /*
25  * Real VM (paging to/from disk) started 18.12.91. Much more work and
26  * thought has to go into this. Oh, well..
27  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
28  *		Found it. Everything seems to work now.
29  * 20.12.91  -  Ok, making the swap-device changeable like the root.
30  */
31 
32 /*
33  * 05.04.94  -  Multi-page memory management added for v1.1.
34  *              Idea by Alex Bligh (alex@cconcepts.co.uk)
35  *
36  * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
37  *		(Gerhard.Wichert@pdb.siemens.de)
38  *
39  * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
40  */
41 
42 #include <linux/kernel_stat.h>
43 #include <linux/mm.h>
44 #include <linux/mm_inline.h>
45 #include <linux/sched/mm.h>
46 #include <linux/sched/numa_balancing.h>
47 #include <linux/sched/task.h>
48 #include <linux/hugetlb.h>
49 #include <linux/mman.h>
50 #include <linux/swap.h>
51 #include <linux/highmem.h>
52 #include <linux/pagemap.h>
53 #include <linux/memremap.h>
54 #include <linux/kmsan.h>
55 #include <linux/ksm.h>
56 #include <linux/rmap.h>
57 #include <linux/export.h>
58 #include <linux/delayacct.h>
59 #include <linux/init.h>
60 #include <linux/pfn_t.h>
61 #include <linux/writeback.h>
62 #include <linux/memcontrol.h>
63 #include <linux/mmu_notifier.h>
64 #include <linux/swapops.h>
65 #include <linux/elf.h>
66 #include <linux/gfp.h>
67 #include <linux/migrate.h>
68 #include <linux/string.h>
69 #include <linux/memory-tiers.h>
70 #include <linux/debugfs.h>
71 #include <linux/userfaultfd_k.h>
72 #include <linux/dax.h>
73 #include <linux/oom.h>
74 #include <linux/numa.h>
75 #include <linux/perf_event.h>
76 #include <linux/ptrace.h>
77 #include <linux/vmalloc.h>
78 #include <linux/sched/sysctl.h>
79 
80 #include <trace/events/kmem.h>
81 
82 #include <asm/io.h>
83 #include <asm/mmu_context.h>
84 #include <asm/pgalloc.h>
85 #include <linux/uaccess.h>
86 #include <asm/tlb.h>
87 #include <asm/tlbflush.h>
88 
89 #include "pgalloc-track.h"
90 #include "internal.h"
91 #include "swap.h"
92 
93 #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
94 #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
95 #endif
96 
97 #ifndef CONFIG_NUMA
98 unsigned long max_mapnr;
99 EXPORT_SYMBOL(max_mapnr);
100 
101 struct page *mem_map;
102 EXPORT_SYMBOL(mem_map);
103 #endif
104 
105 static vm_fault_t do_fault(struct vm_fault *vmf);
106 static vm_fault_t do_anonymous_page(struct vm_fault *vmf);
107 static bool vmf_pte_changed(struct vm_fault *vmf);
108 
109 /*
110  * Return true if the original pte was a uffd-wp pte marker (so the pte was
111  * wr-protected).
112  */
vmf_orig_pte_uffd_wp(struct vm_fault * vmf)113 static __always_inline bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf)
114 {
115 	if (!userfaultfd_wp(vmf->vma))
116 		return false;
117 	if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID))
118 		return false;
119 
120 	return pte_marker_uffd_wp(vmf->orig_pte);
121 }
122 
123 /*
124  * A number of key systems in x86 including ioremap() rely on the assumption
125  * that high_memory defines the upper bound on direct map memory, then end
126  * of ZONE_NORMAL.
127  */
128 void *high_memory;
129 EXPORT_SYMBOL(high_memory);
130 
131 /*
132  * Randomize the address space (stacks, mmaps, brk, etc.).
133  *
134  * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
135  *   as ancient (libc5 based) binaries can segfault. )
136  */
137 int randomize_va_space __read_mostly =
138 #ifdef CONFIG_COMPAT_BRK
139 					1;
140 #else
141 					2;
142 #endif
143 
144 #ifndef arch_wants_old_prefaulted_pte
arch_wants_old_prefaulted_pte(void)145 static inline bool arch_wants_old_prefaulted_pte(void)
146 {
147 	/*
148 	 * Transitioning a PTE from 'old' to 'young' can be expensive on
149 	 * some architectures, even if it's performed in hardware. By
150 	 * default, "false" means prefaulted entries will be 'young'.
151 	 */
152 	return false;
153 }
154 #endif
155 
disable_randmaps(char * s)156 static int __init disable_randmaps(char *s)
157 {
158 	randomize_va_space = 0;
159 	return 1;
160 }
161 __setup("norandmaps", disable_randmaps);
162 
163 unsigned long zero_pfn __read_mostly;
164 EXPORT_SYMBOL(zero_pfn);
165 
166 unsigned long highest_memmap_pfn __read_mostly;
167 
168 /*
169  * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
170  */
init_zero_pfn(void)171 static int __init init_zero_pfn(void)
172 {
173 	zero_pfn = page_to_pfn(ZERO_PAGE(0));
174 	return 0;
175 }
176 early_initcall(init_zero_pfn);
177 
mm_trace_rss_stat(struct mm_struct * mm,int member)178 void mm_trace_rss_stat(struct mm_struct *mm, int member)
179 {
180 	trace_rss_stat(mm, member);
181 }
182 
183 /*
184  * Note: this doesn't free the actual pages themselves. That
185  * has been handled earlier when unmapping all the memory regions.
186  */
free_pte_range(struct mmu_gather * tlb,pmd_t * pmd,unsigned long addr)187 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
188 			   unsigned long addr)
189 {
190 	pgtable_t token = pmd_pgtable(*pmd);
191 	pmd_clear(pmd);
192 	pte_free_tlb(tlb, token, addr);
193 	mm_dec_nr_ptes(tlb->mm);
194 }
195 
free_pmd_range(struct mmu_gather * tlb,pud_t * pud,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)196 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
197 				unsigned long addr, unsigned long end,
198 				unsigned long floor, unsigned long ceiling)
199 {
200 	pmd_t *pmd;
201 	unsigned long next;
202 	unsigned long start;
203 
204 	start = addr;
205 	pmd = pmd_offset(pud, addr);
206 	do {
207 		next = pmd_addr_end(addr, end);
208 		if (pmd_none_or_clear_bad(pmd))
209 			continue;
210 		free_pte_range(tlb, pmd, addr);
211 	} while (pmd++, addr = next, addr != end);
212 
213 	start &= PUD_MASK;
214 	if (start < floor)
215 		return;
216 	if (ceiling) {
217 		ceiling &= PUD_MASK;
218 		if (!ceiling)
219 			return;
220 	}
221 	if (end - 1 > ceiling - 1)
222 		return;
223 
224 	pmd = pmd_offset(pud, start);
225 	pud_clear(pud);
226 	pmd_free_tlb(tlb, pmd, start);
227 	mm_dec_nr_pmds(tlb->mm);
228 }
229 
free_pud_range(struct mmu_gather * tlb,p4d_t * p4d,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)230 static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
231 				unsigned long addr, unsigned long end,
232 				unsigned long floor, unsigned long ceiling)
233 {
234 	pud_t *pud;
235 	unsigned long next;
236 	unsigned long start;
237 
238 	start = addr;
239 	pud = pud_offset(p4d, addr);
240 	do {
241 		next = pud_addr_end(addr, end);
242 		if (pud_none_or_clear_bad(pud))
243 			continue;
244 		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
245 	} while (pud++, addr = next, addr != end);
246 
247 	start &= P4D_MASK;
248 	if (start < floor)
249 		return;
250 	if (ceiling) {
251 		ceiling &= P4D_MASK;
252 		if (!ceiling)
253 			return;
254 	}
255 	if (end - 1 > ceiling - 1)
256 		return;
257 
258 	pud = pud_offset(p4d, start);
259 	p4d_clear(p4d);
260 	pud_free_tlb(tlb, pud, start);
261 	mm_dec_nr_puds(tlb->mm);
262 }
263 
free_p4d_range(struct mmu_gather * tlb,pgd_t * pgd,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)264 static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
265 				unsigned long addr, unsigned long end,
266 				unsigned long floor, unsigned long ceiling)
267 {
268 	p4d_t *p4d;
269 	unsigned long next;
270 	unsigned long start;
271 
272 	start = addr;
273 	p4d = p4d_offset(pgd, addr);
274 	do {
275 		next = p4d_addr_end(addr, end);
276 		if (p4d_none_or_clear_bad(p4d))
277 			continue;
278 		free_pud_range(tlb, p4d, addr, next, floor, ceiling);
279 	} while (p4d++, addr = next, addr != end);
280 
281 	start &= PGDIR_MASK;
282 	if (start < floor)
283 		return;
284 	if (ceiling) {
285 		ceiling &= PGDIR_MASK;
286 		if (!ceiling)
287 			return;
288 	}
289 	if (end - 1 > ceiling - 1)
290 		return;
291 
292 	p4d = p4d_offset(pgd, start);
293 	pgd_clear(pgd);
294 	p4d_free_tlb(tlb, p4d, start);
295 }
296 
297 /*
298  * This function frees user-level page tables of a process.
299  */
free_pgd_range(struct mmu_gather * tlb,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)300 void free_pgd_range(struct mmu_gather *tlb,
301 			unsigned long addr, unsigned long end,
302 			unsigned long floor, unsigned long ceiling)
303 {
304 	pgd_t *pgd;
305 	unsigned long next;
306 
307 	/*
308 	 * The next few lines have given us lots of grief...
309 	 *
310 	 * Why are we testing PMD* at this top level?  Because often
311 	 * there will be no work to do at all, and we'd prefer not to
312 	 * go all the way down to the bottom just to discover that.
313 	 *
314 	 * Why all these "- 1"s?  Because 0 represents both the bottom
315 	 * of the address space and the top of it (using -1 for the
316 	 * top wouldn't help much: the masks would do the wrong thing).
317 	 * The rule is that addr 0 and floor 0 refer to the bottom of
318 	 * the address space, but end 0 and ceiling 0 refer to the top
319 	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
320 	 * that end 0 case should be mythical).
321 	 *
322 	 * Wherever addr is brought up or ceiling brought down, we must
323 	 * be careful to reject "the opposite 0" before it confuses the
324 	 * subsequent tests.  But what about where end is brought down
325 	 * by PMD_SIZE below? no, end can't go down to 0 there.
326 	 *
327 	 * Whereas we round start (addr) and ceiling down, by different
328 	 * masks at different levels, in order to test whether a table
329 	 * now has no other vmas using it, so can be freed, we don't
330 	 * bother to round floor or end up - the tests don't need that.
331 	 */
332 
333 	addr &= PMD_MASK;
334 	if (addr < floor) {
335 		addr += PMD_SIZE;
336 		if (!addr)
337 			return;
338 	}
339 	if (ceiling) {
340 		ceiling &= PMD_MASK;
341 		if (!ceiling)
342 			return;
343 	}
344 	if (end - 1 > ceiling - 1)
345 		end -= PMD_SIZE;
346 	if (addr > end - 1)
347 		return;
348 	/*
349 	 * We add page table cache pages with PAGE_SIZE,
350 	 * (see pte_free_tlb()), flush the tlb if we need
351 	 */
352 	tlb_change_page_size(tlb, PAGE_SIZE);
353 	pgd = pgd_offset(tlb->mm, addr);
354 	do {
355 		next = pgd_addr_end(addr, end);
356 		if (pgd_none_or_clear_bad(pgd))
357 			continue;
358 		free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
359 	} while (pgd++, addr = next, addr != end);
360 }
361 
free_pgtables(struct mmu_gather * tlb,struct ma_state * mas,struct vm_area_struct * vma,unsigned long floor,unsigned long ceiling,bool mm_wr_locked)362 void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
363 		   struct vm_area_struct *vma, unsigned long floor,
364 		   unsigned long ceiling, bool mm_wr_locked)
365 {
366 	struct unlink_vma_file_batch vb;
367 
368 	do {
369 		unsigned long addr = vma->vm_start;
370 		struct vm_area_struct *next;
371 
372 		/*
373 		 * Note: USER_PGTABLES_CEILING may be passed as ceiling and may
374 		 * be 0.  This will underflow and is okay.
375 		 */
376 		next = mas_find(mas, ceiling - 1);
377 		if (unlikely(xa_is_zero(next)))
378 			next = NULL;
379 
380 		/*
381 		 * Hide vma from rmap and truncate_pagecache before freeing
382 		 * pgtables
383 		 */
384 		if (mm_wr_locked)
385 			vma_start_write(vma);
386 		unlink_anon_vmas(vma);
387 
388 		if (is_vm_hugetlb_page(vma)) {
389 			unlink_file_vma(vma);
390 			hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
391 				floor, next ? next->vm_start : ceiling);
392 		} else {
393 			unlink_file_vma_batch_init(&vb);
394 			unlink_file_vma_batch_add(&vb, vma);
395 
396 			/*
397 			 * Optimization: gather nearby vmas into one call down
398 			 */
399 			while (next && next->vm_start <= vma->vm_end + PMD_SIZE
400 			       && !is_vm_hugetlb_page(next)) {
401 				vma = next;
402 				next = mas_find(mas, ceiling - 1);
403 				if (unlikely(xa_is_zero(next)))
404 					next = NULL;
405 				if (mm_wr_locked)
406 					vma_start_write(vma);
407 				unlink_anon_vmas(vma);
408 				unlink_file_vma_batch_add(&vb, vma);
409 			}
410 			unlink_file_vma_batch_final(&vb);
411 			free_pgd_range(tlb, addr, vma->vm_end,
412 				floor, next ? next->vm_start : ceiling);
413 		}
414 		vma = next;
415 	} while (vma);
416 }
417 
pmd_install(struct mm_struct * mm,pmd_t * pmd,pgtable_t * pte)418 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
419 {
420 	spinlock_t *ptl = pmd_lock(mm, pmd);
421 
422 	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
423 		mm_inc_nr_ptes(mm);
424 		/*
425 		 * Ensure all pte setup (eg. pte page lock and page clearing) are
426 		 * visible before the pte is made visible to other CPUs by being
427 		 * put into page tables.
428 		 *
429 		 * The other side of the story is the pointer chasing in the page
430 		 * table walking code (when walking the page table without locking;
431 		 * ie. most of the time). Fortunately, these data accesses consist
432 		 * of a chain of data-dependent loads, meaning most CPUs (alpha
433 		 * being the notable exception) will already guarantee loads are
434 		 * seen in-order. See the alpha page table accessors for the
435 		 * smp_rmb() barriers in page table walking code.
436 		 */
437 		smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
438 		pmd_populate(mm, pmd, *pte);
439 		*pte = NULL;
440 	}
441 	spin_unlock(ptl);
442 }
443 
__pte_alloc(struct mm_struct * mm,pmd_t * pmd)444 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
445 {
446 	pgtable_t new = pte_alloc_one(mm);
447 	if (!new)
448 		return -ENOMEM;
449 
450 	pmd_install(mm, pmd, &new);
451 	if (new)
452 		pte_free(mm, new);
453 	return 0;
454 }
455 
__pte_alloc_kernel(pmd_t * pmd)456 int __pte_alloc_kernel(pmd_t *pmd)
457 {
458 	pte_t *new = pte_alloc_one_kernel(&init_mm);
459 	if (!new)
460 		return -ENOMEM;
461 
462 	spin_lock(&init_mm.page_table_lock);
463 	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
464 		smp_wmb(); /* See comment in pmd_install() */
465 		pmd_populate_kernel(&init_mm, pmd, new);
466 		new = NULL;
467 	}
468 	spin_unlock(&init_mm.page_table_lock);
469 	if (new)
470 		pte_free_kernel(&init_mm, new);
471 	return 0;
472 }
473 
init_rss_vec(int * rss)474 static inline void init_rss_vec(int *rss)
475 {
476 	memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
477 }
478 
add_mm_rss_vec(struct mm_struct * mm,int * rss)479 static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
480 {
481 	int i;
482 
483 	for (i = 0; i < NR_MM_COUNTERS; i++)
484 		if (rss[i])
485 			add_mm_counter(mm, i, rss[i]);
486 }
487 
488 /*
489  * This function is called to print an error when a bad pte
490  * is found. For example, we might have a PFN-mapped pte in
491  * a region that doesn't allow it.
492  *
493  * The calling function must still handle the error.
494  */
print_bad_pte(struct vm_area_struct * vma,unsigned long addr,pte_t pte,struct page * page)495 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
496 			  pte_t pte, struct page *page)
497 {
498 	pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
499 	p4d_t *p4d = p4d_offset(pgd, addr);
500 	pud_t *pud = pud_offset(p4d, addr);
501 	pmd_t *pmd = pmd_offset(pud, addr);
502 	struct address_space *mapping;
503 	pgoff_t index;
504 	static unsigned long resume;
505 	static unsigned long nr_shown;
506 	static unsigned long nr_unshown;
507 
508 	/*
509 	 * Allow a burst of 60 reports, then keep quiet for that minute;
510 	 * or allow a steady drip of one report per second.
511 	 */
512 	if (nr_shown == 60) {
513 		if (time_before(jiffies, resume)) {
514 			nr_unshown++;
515 			return;
516 		}
517 		if (nr_unshown) {
518 			pr_alert("BUG: Bad page map: %lu messages suppressed\n",
519 				 nr_unshown);
520 			nr_unshown = 0;
521 		}
522 		nr_shown = 0;
523 	}
524 	if (nr_shown++ == 0)
525 		resume = jiffies + 60 * HZ;
526 
527 	mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
528 	index = linear_page_index(vma, addr);
529 
530 	pr_alert("BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
531 		 current->comm,
532 		 (long long)pte_val(pte), (long long)pmd_val(*pmd));
533 	if (page)
534 		dump_page(page, "bad pte");
535 	pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
536 		 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
537 	pr_alert("file:%pD fault:%ps mmap:%ps read_folio:%ps\n",
538 		 vma->vm_file,
539 		 vma->vm_ops ? vma->vm_ops->fault : NULL,
540 		 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
541 		 mapping ? mapping->a_ops->read_folio : NULL);
542 	dump_stack();
543 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
544 }
545 
546 /*
547  * vm_normal_page -- This function gets the "struct page" associated with a pte.
548  *
549  * "Special" mappings do not wish to be associated with a "struct page" (either
550  * it doesn't exist, or it exists but they don't want to touch it). In this
551  * case, NULL is returned here. "Normal" mappings do have a struct page.
552  *
553  * There are 2 broad cases. Firstly, an architecture may define a pte_special()
554  * pte bit, in which case this function is trivial. Secondly, an architecture
555  * may not have a spare pte bit, which requires a more complicated scheme,
556  * described below.
557  *
558  * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
559  * special mapping (even if there are underlying and valid "struct pages").
560  * COWed pages of a VM_PFNMAP are always normal.
561  *
562  * The way we recognize COWed pages within VM_PFNMAP mappings is through the
563  * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
564  * set, and the vm_pgoff will point to the first PFN mapped: thus every special
565  * mapping will always honor the rule
566  *
567  *	pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
568  *
569  * And for normal mappings this is false.
570  *
571  * This restricts such mappings to be a linear translation from virtual address
572  * to pfn. To get around this restriction, we allow arbitrary mappings so long
573  * as the vma is not a COW mapping; in that case, we know that all ptes are
574  * special (because none can have been COWed).
575  *
576  *
577  * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
578  *
579  * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
580  * page" backing, however the difference is that _all_ pages with a struct
581  * page (that is, those where pfn_valid is true) are refcounted and considered
582  * normal pages by the VM. The only exception are zeropages, which are
583  * *never* refcounted.
584  *
585  * The disadvantage is that pages are refcounted (which can be slower and
586  * simply not an option for some PFNMAP users). The advantage is that we
587  * don't have to follow the strict linearity rule of PFNMAP mappings in
588  * order to support COWable mappings.
589  *
590  */
vm_normal_page(struct vm_area_struct * vma,unsigned long addr,pte_t pte)591 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
592 			    pte_t pte)
593 {
594 	unsigned long pfn = pte_pfn(pte);
595 
596 	if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
597 		if (likely(!pte_special(pte)))
598 			goto check_pfn;
599 		if (vma->vm_ops && vma->vm_ops->find_special_page)
600 			return vma->vm_ops->find_special_page(vma, addr);
601 		if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
602 			return NULL;
603 		if (is_zero_pfn(pfn))
604 			return NULL;
605 		if (pte_devmap(pte))
606 		/*
607 		 * NOTE: New users of ZONE_DEVICE will not set pte_devmap()
608 		 * and will have refcounts incremented on their struct pages
609 		 * when they are inserted into PTEs, thus they are safe to
610 		 * return here. Legacy ZONE_DEVICE pages that set pte_devmap()
611 		 * do not have refcounts. Example of legacy ZONE_DEVICE is
612 		 * MEMORY_DEVICE_FS_DAX type in pmem or virtio_fs drivers.
613 		 */
614 			return NULL;
615 
616 		print_bad_pte(vma, addr, pte, NULL);
617 		return NULL;
618 	}
619 
620 	/* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
621 
622 	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
623 		if (vma->vm_flags & VM_MIXEDMAP) {
624 			if (!pfn_valid(pfn))
625 				return NULL;
626 			if (is_zero_pfn(pfn))
627 				return NULL;
628 			goto out;
629 		} else {
630 			unsigned long off;
631 			off = (addr - vma->vm_start) >> PAGE_SHIFT;
632 			if (pfn == vma->vm_pgoff + off)
633 				return NULL;
634 			if (!is_cow_mapping(vma->vm_flags))
635 				return NULL;
636 		}
637 	}
638 
639 	if (is_zero_pfn(pfn))
640 		return NULL;
641 
642 check_pfn:
643 	if (unlikely(pfn > highest_memmap_pfn)) {
644 		print_bad_pte(vma, addr, pte, NULL);
645 		return NULL;
646 	}
647 
648 	/*
649 	 * NOTE! We still have PageReserved() pages in the page tables.
650 	 * eg. VDSO mappings can cause them to exist.
651 	 */
652 out:
653 	VM_WARN_ON_ONCE(is_zero_pfn(pfn));
654 	return pfn_to_page(pfn);
655 }
656 
vm_normal_folio(struct vm_area_struct * vma,unsigned long addr,pte_t pte)657 struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
658 			    pte_t pte)
659 {
660 	struct page *page = vm_normal_page(vma, addr, pte);
661 
662 	if (page)
663 		return page_folio(page);
664 	return NULL;
665 }
666 
667 #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
vm_normal_page_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t pmd)668 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
669 				pmd_t pmd)
670 {
671 	unsigned long pfn = pmd_pfn(pmd);
672 
673 	/* Currently it's only used for huge pfnmaps */
674 	if (unlikely(pmd_special(pmd)))
675 		return NULL;
676 
677 	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
678 		if (vma->vm_flags & VM_MIXEDMAP) {
679 			if (!pfn_valid(pfn))
680 				return NULL;
681 			goto out;
682 		} else {
683 			unsigned long off;
684 			off = (addr - vma->vm_start) >> PAGE_SHIFT;
685 			if (pfn == vma->vm_pgoff + off)
686 				return NULL;
687 			if (!is_cow_mapping(vma->vm_flags))
688 				return NULL;
689 		}
690 	}
691 
692 	if (pmd_devmap(pmd))
693 		return NULL;
694 	if (is_huge_zero_pmd(pmd))
695 		return NULL;
696 	if (unlikely(pfn > highest_memmap_pfn))
697 		return NULL;
698 
699 	/*
700 	 * NOTE! We still have PageReserved() pages in the page tables.
701 	 * eg. VDSO mappings can cause them to exist.
702 	 */
703 out:
704 	return pfn_to_page(pfn);
705 }
706 
vm_normal_folio_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t pmd)707 struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
708 				  unsigned long addr, pmd_t pmd)
709 {
710 	struct page *page = vm_normal_page_pmd(vma, addr, pmd);
711 
712 	if (page)
713 		return page_folio(page);
714 	return NULL;
715 }
716 #endif
717 
restore_exclusive_pte(struct vm_area_struct * vma,struct page * page,unsigned long address,pte_t * ptep)718 static void restore_exclusive_pte(struct vm_area_struct *vma,
719 				  struct page *page, unsigned long address,
720 				  pte_t *ptep)
721 {
722 	struct folio *folio = page_folio(page);
723 	pte_t orig_pte;
724 	pte_t pte;
725 	swp_entry_t entry;
726 
727 	orig_pte = ptep_get(ptep);
728 	pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
729 	if (pte_swp_soft_dirty(orig_pte))
730 		pte = pte_mksoft_dirty(pte);
731 
732 	entry = pte_to_swp_entry(orig_pte);
733 	if (pte_swp_uffd_wp(orig_pte))
734 		pte = pte_mkuffd_wp(pte);
735 	else if (is_writable_device_exclusive_entry(entry))
736 		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
737 
738 	VM_BUG_ON_FOLIO(pte_write(pte) && (!folio_test_anon(folio) &&
739 					   PageAnonExclusive(page)), folio);
740 
741 	/*
742 	 * No need to take a page reference as one was already
743 	 * created when the swap entry was made.
744 	 */
745 	if (folio_test_anon(folio))
746 		folio_add_anon_rmap_pte(folio, page, vma, address, RMAP_NONE);
747 	else
748 		/*
749 		 * Currently device exclusive access only supports anonymous
750 		 * memory so the entry shouldn't point to a filebacked page.
751 		 */
752 		WARN_ON_ONCE(1);
753 
754 	set_pte_at(vma->vm_mm, address, ptep, pte);
755 
756 	/*
757 	 * No need to invalidate - it was non-present before. However
758 	 * secondary CPUs may have mappings that need invalidating.
759 	 */
760 	update_mmu_cache(vma, address, ptep);
761 }
762 
763 /*
764  * Tries to restore an exclusive pte if the page lock can be acquired without
765  * sleeping.
766  */
767 static int
try_restore_exclusive_pte(pte_t * src_pte,struct vm_area_struct * vma,unsigned long addr)768 try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma,
769 			unsigned long addr)
770 {
771 	swp_entry_t entry = pte_to_swp_entry(ptep_get(src_pte));
772 	struct page *page = pfn_swap_entry_to_page(entry);
773 
774 	if (trylock_page(page)) {
775 		restore_exclusive_pte(vma, page, addr, src_pte);
776 		unlock_page(page);
777 		return 0;
778 	}
779 
780 	return -EBUSY;
781 }
782 
783 /*
784  * copy one vm_area from one task to the other. Assumes the page tables
785  * already present in the new task to be cleared in the whole range
786  * covered by this vma.
787  */
788 
789 static unsigned long
copy_nonpresent_pte(struct mm_struct * dst_mm,struct mm_struct * src_mm,pte_t * dst_pte,pte_t * src_pte,struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,unsigned long addr,int * rss)790 copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
791 		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma,
792 		struct vm_area_struct *src_vma, unsigned long addr, int *rss)
793 {
794 	unsigned long vm_flags = dst_vma->vm_flags;
795 	pte_t orig_pte = ptep_get(src_pte);
796 	pte_t pte = orig_pte;
797 	struct folio *folio;
798 	struct page *page;
799 	swp_entry_t entry = pte_to_swp_entry(orig_pte);
800 
801 	if (likely(!non_swap_entry(entry))) {
802 		if (swap_duplicate(entry) < 0)
803 			return -EIO;
804 
805 		/* make sure dst_mm is on swapoff's mmlist. */
806 		if (unlikely(list_empty(&dst_mm->mmlist))) {
807 			spin_lock(&mmlist_lock);
808 			if (list_empty(&dst_mm->mmlist))
809 				list_add(&dst_mm->mmlist,
810 						&src_mm->mmlist);
811 			spin_unlock(&mmlist_lock);
812 		}
813 		/* Mark the swap entry as shared. */
814 		if (pte_swp_exclusive(orig_pte)) {
815 			pte = pte_swp_clear_exclusive(orig_pte);
816 			set_pte_at(src_mm, addr, src_pte, pte);
817 		}
818 		rss[MM_SWAPENTS]++;
819 	} else if (is_migration_entry(entry)) {
820 		folio = pfn_swap_entry_folio(entry);
821 
822 		rss[mm_counter(folio)]++;
823 
824 		if (!is_readable_migration_entry(entry) &&
825 				is_cow_mapping(vm_flags)) {
826 			/*
827 			 * COW mappings require pages in both parent and child
828 			 * to be set to read. A previously exclusive entry is
829 			 * now shared.
830 			 */
831 			entry = make_readable_migration_entry(
832 							swp_offset(entry));
833 			pte = swp_entry_to_pte(entry);
834 			if (pte_swp_soft_dirty(orig_pte))
835 				pte = pte_swp_mksoft_dirty(pte);
836 			if (pte_swp_uffd_wp(orig_pte))
837 				pte = pte_swp_mkuffd_wp(pte);
838 			set_pte_at(src_mm, addr, src_pte, pte);
839 		}
840 	} else if (is_device_private_entry(entry)) {
841 		page = pfn_swap_entry_to_page(entry);
842 		folio = page_folio(page);
843 
844 		/*
845 		 * Update rss count even for unaddressable pages, as
846 		 * they should treated just like normal pages in this
847 		 * respect.
848 		 *
849 		 * We will likely want to have some new rss counters
850 		 * for unaddressable pages, at some point. But for now
851 		 * keep things as they are.
852 		 */
853 		folio_get(folio);
854 		rss[mm_counter(folio)]++;
855 		/* Cannot fail as these pages cannot get pinned. */
856 		folio_try_dup_anon_rmap_pte(folio, page, src_vma);
857 
858 		/*
859 		 * We do not preserve soft-dirty information, because so
860 		 * far, checkpoint/restore is the only feature that
861 		 * requires that. And checkpoint/restore does not work
862 		 * when a device driver is involved (you cannot easily
863 		 * save and restore device driver state).
864 		 */
865 		if (is_writable_device_private_entry(entry) &&
866 		    is_cow_mapping(vm_flags)) {
867 			entry = make_readable_device_private_entry(
868 							swp_offset(entry));
869 			pte = swp_entry_to_pte(entry);
870 			if (pte_swp_uffd_wp(orig_pte))
871 				pte = pte_swp_mkuffd_wp(pte);
872 			set_pte_at(src_mm, addr, src_pte, pte);
873 		}
874 	} else if (is_device_exclusive_entry(entry)) {
875 		/*
876 		 * Make device exclusive entries present by restoring the
877 		 * original entry then copying as for a present pte. Device
878 		 * exclusive entries currently only support private writable
879 		 * (ie. COW) mappings.
880 		 */
881 		VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags));
882 		if (try_restore_exclusive_pte(src_pte, src_vma, addr))
883 			return -EBUSY;
884 		return -ENOENT;
885 	} else if (is_pte_marker_entry(entry)) {
886 		pte_marker marker = copy_pte_marker(entry, dst_vma);
887 
888 		if (marker)
889 			set_pte_at(dst_mm, addr, dst_pte,
890 				   make_pte_marker(marker));
891 		return 0;
892 	}
893 	if (!userfaultfd_wp(dst_vma))
894 		pte = pte_swp_clear_uffd_wp(pte);
895 	set_pte_at(dst_mm, addr, dst_pte, pte);
896 	return 0;
897 }
898 
899 /*
900  * Copy a present and normal page.
901  *
902  * NOTE! The usual case is that this isn't required;
903  * instead, the caller can just increase the page refcount
904  * and re-use the pte the traditional way.
905  *
906  * And if we need a pre-allocated page but don't yet have
907  * one, return a negative error to let the preallocation
908  * code know so that it can do so outside the page table
909  * lock.
910  */
911 static inline int
copy_present_page(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pte_t * dst_pte,pte_t * src_pte,unsigned long addr,int * rss,struct folio ** prealloc,struct page * page)912 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
913 		  pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
914 		  struct folio **prealloc, struct page *page)
915 {
916 	struct folio *new_folio;
917 	pte_t pte;
918 
919 	new_folio = *prealloc;
920 	if (!new_folio)
921 		return -EAGAIN;
922 
923 	/*
924 	 * We have a prealloc page, all good!  Take it
925 	 * over and copy the page & arm it.
926 	 */
927 
928 	if (copy_mc_user_highpage(&new_folio->page, page, addr, src_vma))
929 		return -EHWPOISON;
930 
931 	*prealloc = NULL;
932 	__folio_mark_uptodate(new_folio);
933 	folio_add_new_anon_rmap(new_folio, dst_vma, addr, RMAP_EXCLUSIVE);
934 	folio_add_lru_vma(new_folio, dst_vma);
935 	rss[MM_ANONPAGES]++;
936 
937 	/* All done, just insert the new page copy in the child */
938 	pte = mk_pte(&new_folio->page, dst_vma->vm_page_prot);
939 	pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma);
940 	if (userfaultfd_pte_wp(dst_vma, ptep_get(src_pte)))
941 		/* Uffd-wp needs to be delivered to dest pte as well */
942 		pte = pte_mkuffd_wp(pte);
943 	set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
944 	return 0;
945 }
946 
__copy_present_ptes(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pte_t * dst_pte,pte_t * src_pte,pte_t pte,unsigned long addr,int nr)947 static __always_inline void __copy_present_ptes(struct vm_area_struct *dst_vma,
948 		struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte,
949 		pte_t pte, unsigned long addr, int nr)
950 {
951 	struct mm_struct *src_mm = src_vma->vm_mm;
952 
953 	/* If it's a COW mapping, write protect it both processes. */
954 	if (is_cow_mapping(src_vma->vm_flags) && pte_write(pte)) {
955 		wrprotect_ptes(src_mm, addr, src_pte, nr);
956 		pte = pte_wrprotect(pte);
957 	}
958 
959 	/* If it's a shared mapping, mark it clean in the child. */
960 	if (src_vma->vm_flags & VM_SHARED)
961 		pte = pte_mkclean(pte);
962 	pte = pte_mkold(pte);
963 
964 	if (!userfaultfd_wp(dst_vma))
965 		pte = pte_clear_uffd_wp(pte);
966 
967 	set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr);
968 }
969 
970 /*
971  * Copy one present PTE, trying to batch-process subsequent PTEs that map
972  * consecutive pages of the same folio by copying them as well.
973  *
974  * Returns -EAGAIN if one preallocated page is required to copy the next PTE.
975  * Otherwise, returns the number of copied PTEs (at least 1).
976  */
977 static inline int
copy_present_ptes(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pte_t * dst_pte,pte_t * src_pte,pte_t pte,unsigned long addr,int max_nr,int * rss,struct folio ** prealloc)978 copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
979 		 pte_t *dst_pte, pte_t *src_pte, pte_t pte, unsigned long addr,
980 		 int max_nr, int *rss, struct folio **prealloc)
981 {
982 	struct page *page;
983 	struct folio *folio;
984 	bool any_writable;
985 	fpb_t flags = 0;
986 	int err, nr;
987 
988 	page = vm_normal_page(src_vma, addr, pte);
989 	if (unlikely(!page))
990 		goto copy_pte;
991 
992 	folio = page_folio(page);
993 
994 	/*
995 	 * If we likely have to copy, just don't bother with batching. Make
996 	 * sure that the common "small folio" case is as fast as possible
997 	 * by keeping the batching logic separate.
998 	 */
999 	if (unlikely(!*prealloc && folio_test_large(folio) && max_nr != 1)) {
1000 		if (src_vma->vm_flags & VM_SHARED)
1001 			flags |= FPB_IGNORE_DIRTY;
1002 		if (!vma_soft_dirty_enabled(src_vma))
1003 			flags |= FPB_IGNORE_SOFT_DIRTY;
1004 
1005 		nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr, flags,
1006 				     &any_writable, NULL, NULL);
1007 		folio_ref_add(folio, nr);
1008 		if (folio_test_anon(folio)) {
1009 			if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page,
1010 								  nr, src_vma))) {
1011 				folio_ref_sub(folio, nr);
1012 				return -EAGAIN;
1013 			}
1014 			rss[MM_ANONPAGES] += nr;
1015 			VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio);
1016 		} else {
1017 			folio_dup_file_rmap_ptes(folio, page, nr);
1018 			rss[mm_counter_file(folio)] += nr;
1019 		}
1020 		if (any_writable)
1021 			pte = pte_mkwrite(pte, src_vma);
1022 		__copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte,
1023 				    addr, nr);
1024 		return nr;
1025 	}
1026 
1027 	folio_get(folio);
1028 	if (folio_test_anon(folio)) {
1029 		/*
1030 		 * If this page may have been pinned by the parent process,
1031 		 * copy the page immediately for the child so that we'll always
1032 		 * guarantee the pinned page won't be randomly replaced in the
1033 		 * future.
1034 		 */
1035 		if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, src_vma))) {
1036 			/* Page may be pinned, we have to copy. */
1037 			folio_put(folio);
1038 			err = copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
1039 						addr, rss, prealloc, page);
1040 			return err ? err : 1;
1041 		}
1042 		rss[MM_ANONPAGES]++;
1043 		VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio);
1044 	} else {
1045 		folio_dup_file_rmap_pte(folio, page);
1046 		rss[mm_counter_file(folio)]++;
1047 	}
1048 
1049 copy_pte:
1050 	__copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte, addr, 1);
1051 	return 1;
1052 }
1053 
folio_prealloc(struct mm_struct * src_mm,struct vm_area_struct * vma,unsigned long addr,bool need_zero)1054 static inline struct folio *folio_prealloc(struct mm_struct *src_mm,
1055 		struct vm_area_struct *vma, unsigned long addr, bool need_zero)
1056 {
1057 	struct folio *new_folio;
1058 
1059 	if (need_zero)
1060 		new_folio = vma_alloc_zeroed_movable_folio(vma, addr);
1061 	else
1062 		new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr);
1063 
1064 	if (!new_folio)
1065 		return NULL;
1066 
1067 	if (mem_cgroup_charge(new_folio, src_mm, GFP_KERNEL)) {
1068 		folio_put(new_folio);
1069 		return NULL;
1070 	}
1071 	folio_throttle_swaprate(new_folio, GFP_KERNEL);
1072 
1073 	return new_folio;
1074 }
1075 
1076 static int
copy_pte_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pmd_t * dst_pmd,pmd_t * src_pmd,unsigned long addr,unsigned long end)1077 copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1078 	       pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1079 	       unsigned long end)
1080 {
1081 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1082 	struct mm_struct *src_mm = src_vma->vm_mm;
1083 	pte_t *orig_src_pte, *orig_dst_pte;
1084 	pte_t *src_pte, *dst_pte;
1085 	pmd_t dummy_pmdval;
1086 	pte_t ptent;
1087 	spinlock_t *src_ptl, *dst_ptl;
1088 	int progress, max_nr, ret = 0;
1089 	int rss[NR_MM_COUNTERS];
1090 	swp_entry_t entry = (swp_entry_t){0};
1091 	struct folio *prealloc = NULL;
1092 	int nr;
1093 
1094 again:
1095 	progress = 0;
1096 	init_rss_vec(rss);
1097 
1098 	/*
1099 	 * copy_pmd_range()'s prior pmd_none_or_clear_bad(src_pmd), and the
1100 	 * error handling here, assume that exclusive mmap_lock on dst and src
1101 	 * protects anon from unexpected THP transitions; with shmem and file
1102 	 * protected by mmap_lock-less collapse skipping areas with anon_vma
1103 	 * (whereas vma_needs_copy() skips areas without anon_vma).  A rework
1104 	 * can remove such assumptions later, but this is good enough for now.
1105 	 */
1106 	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
1107 	if (!dst_pte) {
1108 		ret = -ENOMEM;
1109 		goto out;
1110 	}
1111 
1112 	/*
1113 	 * We already hold the exclusive mmap_lock, the copy_pte_range() and
1114 	 * retract_page_tables() are using vma->anon_vma to be exclusive, so
1115 	 * the PTE page is stable, and there is no need to get pmdval and do
1116 	 * pmd_same() check.
1117 	 */
1118 	src_pte = pte_offset_map_rw_nolock(src_mm, src_pmd, addr, &dummy_pmdval,
1119 					   &src_ptl);
1120 	if (!src_pte) {
1121 		pte_unmap_unlock(dst_pte, dst_ptl);
1122 		/* ret == 0 */
1123 		goto out;
1124 	}
1125 	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1126 	orig_src_pte = src_pte;
1127 	orig_dst_pte = dst_pte;
1128 	arch_enter_lazy_mmu_mode();
1129 
1130 	do {
1131 		nr = 1;
1132 
1133 		/*
1134 		 * We are holding two locks at this point - either of them
1135 		 * could generate latencies in another task on another CPU.
1136 		 */
1137 		if (progress >= 32) {
1138 			progress = 0;
1139 			if (need_resched() ||
1140 			    spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
1141 				break;
1142 		}
1143 		ptent = ptep_get(src_pte);
1144 		if (pte_none(ptent)) {
1145 			progress++;
1146 			continue;
1147 		}
1148 		if (unlikely(!pte_present(ptent))) {
1149 			ret = copy_nonpresent_pte(dst_mm, src_mm,
1150 						  dst_pte, src_pte,
1151 						  dst_vma, src_vma,
1152 						  addr, rss);
1153 			if (ret == -EIO) {
1154 				entry = pte_to_swp_entry(ptep_get(src_pte));
1155 				break;
1156 			} else if (ret == -EBUSY) {
1157 				break;
1158 			} else if (!ret) {
1159 				progress += 8;
1160 				continue;
1161 			}
1162 			ptent = ptep_get(src_pte);
1163 			VM_WARN_ON_ONCE(!pte_present(ptent));
1164 
1165 			/*
1166 			 * Device exclusive entry restored, continue by copying
1167 			 * the now present pte.
1168 			 */
1169 			WARN_ON_ONCE(ret != -ENOENT);
1170 		}
1171 		/* copy_present_ptes() will clear `*prealloc' if consumed */
1172 		max_nr = (end - addr) / PAGE_SIZE;
1173 		ret = copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte,
1174 					ptent, addr, max_nr, rss, &prealloc);
1175 		/*
1176 		 * If we need a pre-allocated page for this pte, drop the
1177 		 * locks, allocate, and try again.
1178 		 * If copy failed due to hwpoison in source page, break out.
1179 		 */
1180 		if (unlikely(ret == -EAGAIN || ret == -EHWPOISON))
1181 			break;
1182 		if (unlikely(prealloc)) {
1183 			/*
1184 			 * pre-alloc page cannot be reused by next time so as
1185 			 * to strictly follow mempolicy (e.g., alloc_page_vma()
1186 			 * will allocate page according to address).  This
1187 			 * could only happen if one pinned pte changed.
1188 			 */
1189 			folio_put(prealloc);
1190 			prealloc = NULL;
1191 		}
1192 		nr = ret;
1193 		progress += 8 * nr;
1194 	} while (dst_pte += nr, src_pte += nr, addr += PAGE_SIZE * nr,
1195 		 addr != end);
1196 
1197 	arch_leave_lazy_mmu_mode();
1198 	pte_unmap_unlock(orig_src_pte, src_ptl);
1199 	add_mm_rss_vec(dst_mm, rss);
1200 	pte_unmap_unlock(orig_dst_pte, dst_ptl);
1201 	cond_resched();
1202 
1203 	if (ret == -EIO) {
1204 		VM_WARN_ON_ONCE(!entry.val);
1205 		if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) {
1206 			ret = -ENOMEM;
1207 			goto out;
1208 		}
1209 		entry.val = 0;
1210 	} else if (ret == -EBUSY || unlikely(ret == -EHWPOISON)) {
1211 		goto out;
1212 	} else if (ret ==  -EAGAIN) {
1213 		prealloc = folio_prealloc(src_mm, src_vma, addr, false);
1214 		if (!prealloc)
1215 			return -ENOMEM;
1216 	} else if (ret < 0) {
1217 		VM_WARN_ON_ONCE(1);
1218 	}
1219 
1220 	/* We've captured and resolved the error. Reset, try again. */
1221 	ret = 0;
1222 
1223 	if (addr != end)
1224 		goto again;
1225 out:
1226 	if (unlikely(prealloc))
1227 		folio_put(prealloc);
1228 	return ret;
1229 }
1230 
1231 static inline int
copy_pmd_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pud_t * dst_pud,pud_t * src_pud,unsigned long addr,unsigned long end)1232 copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1233 	       pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1234 	       unsigned long end)
1235 {
1236 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1237 	struct mm_struct *src_mm = src_vma->vm_mm;
1238 	pmd_t *src_pmd, *dst_pmd;
1239 	unsigned long next;
1240 
1241 	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
1242 	if (!dst_pmd)
1243 		return -ENOMEM;
1244 	src_pmd = pmd_offset(src_pud, addr);
1245 	do {
1246 		next = pmd_addr_end(addr, end);
1247 		if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
1248 			|| pmd_devmap(*src_pmd)) {
1249 			int err;
1250 			VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
1251 			err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd,
1252 					    addr, dst_vma, src_vma);
1253 			if (err == -ENOMEM)
1254 				return -ENOMEM;
1255 			if (!err)
1256 				continue;
1257 			/* fall through */
1258 		}
1259 		if (pmd_none_or_clear_bad(src_pmd))
1260 			continue;
1261 		if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd,
1262 				   addr, next))
1263 			return -ENOMEM;
1264 	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
1265 	return 0;
1266 }
1267 
1268 static inline int
copy_pud_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,p4d_t * dst_p4d,p4d_t * src_p4d,unsigned long addr,unsigned long end)1269 copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1270 	       p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr,
1271 	       unsigned long end)
1272 {
1273 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1274 	struct mm_struct *src_mm = src_vma->vm_mm;
1275 	pud_t *src_pud, *dst_pud;
1276 	unsigned long next;
1277 
1278 	dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
1279 	if (!dst_pud)
1280 		return -ENOMEM;
1281 	src_pud = pud_offset(src_p4d, addr);
1282 	do {
1283 		next = pud_addr_end(addr, end);
1284 		if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
1285 			int err;
1286 
1287 			VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma);
1288 			err = copy_huge_pud(dst_mm, src_mm,
1289 					    dst_pud, src_pud, addr, src_vma);
1290 			if (err == -ENOMEM)
1291 				return -ENOMEM;
1292 			if (!err)
1293 				continue;
1294 			/* fall through */
1295 		}
1296 		if (pud_none_or_clear_bad(src_pud))
1297 			continue;
1298 		if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud,
1299 				   addr, next))
1300 			return -ENOMEM;
1301 	} while (dst_pud++, src_pud++, addr = next, addr != end);
1302 	return 0;
1303 }
1304 
1305 static inline int
copy_p4d_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pgd_t * dst_pgd,pgd_t * src_pgd,unsigned long addr,unsigned long end)1306 copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1307 	       pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr,
1308 	       unsigned long end)
1309 {
1310 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1311 	p4d_t *src_p4d, *dst_p4d;
1312 	unsigned long next;
1313 
1314 	dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
1315 	if (!dst_p4d)
1316 		return -ENOMEM;
1317 	src_p4d = p4d_offset(src_pgd, addr);
1318 	do {
1319 		next = p4d_addr_end(addr, end);
1320 		if (p4d_none_or_clear_bad(src_p4d))
1321 			continue;
1322 		if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d,
1323 				   addr, next))
1324 			return -ENOMEM;
1325 	} while (dst_p4d++, src_p4d++, addr = next, addr != end);
1326 	return 0;
1327 }
1328 
1329 /*
1330  * Return true if the vma needs to copy the pgtable during this fork().  Return
1331  * false when we can speed up fork() by allowing lazy page faults later until
1332  * when the child accesses the memory range.
1333  */
1334 static bool
vma_needs_copy(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma)1335 vma_needs_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1336 {
1337 	/*
1338 	 * Always copy pgtables when dst_vma has uffd-wp enabled even if it's
1339 	 * file-backed (e.g. shmem). Because when uffd-wp is enabled, pgtable
1340 	 * contains uffd-wp protection information, that's something we can't
1341 	 * retrieve from page cache, and skip copying will lose those info.
1342 	 */
1343 	if (userfaultfd_wp(dst_vma))
1344 		return true;
1345 
1346 	if (src_vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
1347 		return true;
1348 
1349 	if (src_vma->anon_vma)
1350 		return true;
1351 
1352 	/*
1353 	 * Don't copy ptes where a page fault will fill them correctly.  Fork
1354 	 * becomes much lighter when there are big shared or private readonly
1355 	 * mappings. The tradeoff is that copy_page_range is more efficient
1356 	 * than faulting.
1357 	 */
1358 	return false;
1359 }
1360 
1361 int
copy_page_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma)1362 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1363 {
1364 	pgd_t *src_pgd, *dst_pgd;
1365 	unsigned long next;
1366 	unsigned long addr = src_vma->vm_start;
1367 	unsigned long end = src_vma->vm_end;
1368 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1369 	struct mm_struct *src_mm = src_vma->vm_mm;
1370 	struct mmu_notifier_range range;
1371 	bool is_cow;
1372 	int ret;
1373 
1374 	if (!vma_needs_copy(dst_vma, src_vma))
1375 		return 0;
1376 
1377 	if (is_vm_hugetlb_page(src_vma))
1378 		return copy_hugetlb_page_range(dst_mm, src_mm, dst_vma, src_vma);
1379 
1380 	if (unlikely(src_vma->vm_flags & VM_PFNMAP)) {
1381 		/*
1382 		 * We do not free on error cases below as remove_vma
1383 		 * gets called on error from higher level routine
1384 		 */
1385 		ret = track_pfn_copy(src_vma);
1386 		if (ret)
1387 			return ret;
1388 	}
1389 
1390 	/*
1391 	 * We need to invalidate the secondary MMU mappings only when
1392 	 * there could be a permission downgrade on the ptes of the
1393 	 * parent mm. And a permission downgrade will only happen if
1394 	 * is_cow_mapping() returns true.
1395 	 */
1396 	is_cow = is_cow_mapping(src_vma->vm_flags);
1397 
1398 	if (is_cow) {
1399 		mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
1400 					0, src_mm, addr, end);
1401 		mmu_notifier_invalidate_range_start(&range);
1402 		/*
1403 		 * Disabling preemption is not needed for the write side, as
1404 		 * the read side doesn't spin, but goes to the mmap_lock.
1405 		 *
1406 		 * Use the raw variant of the seqcount_t write API to avoid
1407 		 * lockdep complaining about preemptibility.
1408 		 */
1409 		vma_assert_write_locked(src_vma);
1410 		raw_write_seqcount_begin(&src_mm->write_protect_seq);
1411 	}
1412 
1413 	ret = 0;
1414 	dst_pgd = pgd_offset(dst_mm, addr);
1415 	src_pgd = pgd_offset(src_mm, addr);
1416 	do {
1417 		next = pgd_addr_end(addr, end);
1418 		if (pgd_none_or_clear_bad(src_pgd))
1419 			continue;
1420 		if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,
1421 					    addr, next))) {
1422 			untrack_pfn_clear(dst_vma);
1423 			ret = -ENOMEM;
1424 			break;
1425 		}
1426 	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
1427 
1428 	if (is_cow) {
1429 		raw_write_seqcount_end(&src_mm->write_protect_seq);
1430 		mmu_notifier_invalidate_range_end(&range);
1431 	}
1432 	return ret;
1433 }
1434 
1435 /* Whether we should zap all COWed (private) pages too */
should_zap_cows(struct zap_details * details)1436 static inline bool should_zap_cows(struct zap_details *details)
1437 {
1438 	/* By default, zap all pages */
1439 	if (!details || details->reclaim_pt)
1440 		return true;
1441 
1442 	/* Or, we zap COWed pages only if the caller wants to */
1443 	return details->even_cows;
1444 }
1445 
1446 /* Decides whether we should zap this folio with the folio pointer specified */
should_zap_folio(struct zap_details * details,struct folio * folio)1447 static inline bool should_zap_folio(struct zap_details *details,
1448 				    struct folio *folio)
1449 {
1450 	/* If we can make a decision without *folio.. */
1451 	if (should_zap_cows(details))
1452 		return true;
1453 
1454 	/* Otherwise we should only zap non-anon folios */
1455 	return !folio_test_anon(folio);
1456 }
1457 
zap_drop_markers(struct zap_details * details)1458 static inline bool zap_drop_markers(struct zap_details *details)
1459 {
1460 	if (!details)
1461 		return false;
1462 
1463 	return details->zap_flags & ZAP_FLAG_DROP_MARKER;
1464 }
1465 
1466 /*
1467  * This function makes sure that we'll replace the none pte with an uffd-wp
1468  * swap special pte marker when necessary. Must be with the pgtable lock held.
1469  *
1470  * Returns true if uffd-wp ptes was installed, false otherwise.
1471  */
1472 static inline bool
zap_install_uffd_wp_if_needed(struct vm_area_struct * vma,unsigned long addr,pte_t * pte,int nr,struct zap_details * details,pte_t pteval)1473 zap_install_uffd_wp_if_needed(struct vm_area_struct *vma,
1474 			      unsigned long addr, pte_t *pte, int nr,
1475 			      struct zap_details *details, pte_t pteval)
1476 {
1477 	bool was_installed = false;
1478 
1479 #ifdef CONFIG_PTE_MARKER_UFFD_WP
1480 	/* Zap on anonymous always means dropping everything */
1481 	if (vma_is_anonymous(vma))
1482 		return false;
1483 
1484 	if (zap_drop_markers(details))
1485 		return false;
1486 
1487 	for (;;) {
1488 		/* the PFN in the PTE is irrelevant. */
1489 		if (pte_install_uffd_wp_if_needed(vma, addr, pte, pteval))
1490 			was_installed = true;
1491 		if (--nr == 0)
1492 			break;
1493 		pte++;
1494 		addr += PAGE_SIZE;
1495 	}
1496 #endif
1497 	return was_installed;
1498 }
1499 
zap_present_folio_ptes(struct mmu_gather * tlb,struct vm_area_struct * vma,struct folio * folio,struct page * page,pte_t * pte,pte_t ptent,unsigned int nr,unsigned long addr,struct zap_details * details,int * rss,bool * force_flush,bool * force_break,bool * any_skipped)1500 static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb,
1501 		struct vm_area_struct *vma, struct folio *folio,
1502 		struct page *page, pte_t *pte, pte_t ptent, unsigned int nr,
1503 		unsigned long addr, struct zap_details *details, int *rss,
1504 		bool *force_flush, bool *force_break, bool *any_skipped)
1505 {
1506 	struct mm_struct *mm = tlb->mm;
1507 	bool delay_rmap = false;
1508 
1509 	if (!folio_test_anon(folio)) {
1510 		ptent = get_and_clear_full_ptes(mm, addr, pte, nr, tlb->fullmm);
1511 		if (pte_dirty(ptent)) {
1512 			folio_mark_dirty(folio);
1513 			if (tlb_delay_rmap(tlb)) {
1514 				delay_rmap = true;
1515 				*force_flush = true;
1516 			}
1517 		}
1518 		if (pte_young(ptent) && likely(vma_has_recency(vma)))
1519 			folio_mark_accessed(folio);
1520 		rss[mm_counter(folio)] -= nr;
1521 	} else {
1522 		/* We don't need up-to-date accessed/dirty bits. */
1523 		clear_full_ptes(mm, addr, pte, nr, tlb->fullmm);
1524 		rss[MM_ANONPAGES] -= nr;
1525 	}
1526 	/* Checking a single PTE in a batch is sufficient. */
1527 	arch_check_zapped_pte(vma, ptent);
1528 	tlb_remove_tlb_entries(tlb, pte, nr, addr);
1529 	if (unlikely(userfaultfd_pte_wp(vma, ptent)))
1530 		*any_skipped = zap_install_uffd_wp_if_needed(vma, addr, pte,
1531 							     nr, details, ptent);
1532 
1533 	if (!delay_rmap) {
1534 		folio_remove_rmap_ptes(folio, page, nr, vma);
1535 
1536 		if (unlikely(folio_mapcount(folio) < 0))
1537 			print_bad_pte(vma, addr, ptent, page);
1538 	}
1539 	if (unlikely(__tlb_remove_folio_pages(tlb, page, nr, delay_rmap))) {
1540 		*force_flush = true;
1541 		*force_break = true;
1542 	}
1543 }
1544 
1545 /*
1546  * Zap or skip at least one present PTE, trying to batch-process subsequent
1547  * PTEs that map consecutive pages of the same folio.
1548  *
1549  * Returns the number of processed (skipped or zapped) PTEs (at least 1).
1550  */
zap_present_ptes(struct mmu_gather * tlb,struct vm_area_struct * vma,pte_t * pte,pte_t ptent,unsigned int max_nr,unsigned long addr,struct zap_details * details,int * rss,bool * force_flush,bool * force_break,bool * any_skipped)1551 static inline int zap_present_ptes(struct mmu_gather *tlb,
1552 		struct vm_area_struct *vma, pte_t *pte, pte_t ptent,
1553 		unsigned int max_nr, unsigned long addr,
1554 		struct zap_details *details, int *rss, bool *force_flush,
1555 		bool *force_break, bool *any_skipped)
1556 {
1557 	const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY;
1558 	struct mm_struct *mm = tlb->mm;
1559 	struct folio *folio;
1560 	struct page *page;
1561 	int nr;
1562 
1563 	page = vm_normal_page(vma, addr, ptent);
1564 	if (!page) {
1565 		/* We don't need up-to-date accessed/dirty bits. */
1566 		ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm);
1567 		arch_check_zapped_pte(vma, ptent);
1568 		tlb_remove_tlb_entry(tlb, pte, addr);
1569 		if (userfaultfd_pte_wp(vma, ptent))
1570 			*any_skipped = zap_install_uffd_wp_if_needed(vma, addr,
1571 						pte, 1, details, ptent);
1572 		ksm_might_unmap_zero_page(mm, ptent);
1573 		return 1;
1574 	}
1575 
1576 	folio = page_folio(page);
1577 	if (unlikely(!should_zap_folio(details, folio))) {
1578 		*any_skipped = true;
1579 		return 1;
1580 	}
1581 
1582 	/*
1583 	 * Make sure that the common "small folio" case is as fast as possible
1584 	 * by keeping the batching logic separate.
1585 	 */
1586 	if (unlikely(folio_test_large(folio) && max_nr != 1)) {
1587 		nr = folio_pte_batch(folio, addr, pte, ptent, max_nr, fpb_flags,
1588 				     NULL, NULL, NULL);
1589 
1590 		zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr,
1591 				       addr, details, rss, force_flush,
1592 				       force_break, any_skipped);
1593 		return nr;
1594 	}
1595 	zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, 1, addr,
1596 			       details, rss, force_flush, force_break, any_skipped);
1597 	return 1;
1598 }
1599 
zap_nonpresent_ptes(struct mmu_gather * tlb,struct vm_area_struct * vma,pte_t * pte,pte_t ptent,unsigned int max_nr,unsigned long addr,struct zap_details * details,int * rss,bool * any_skipped)1600 static inline int zap_nonpresent_ptes(struct mmu_gather *tlb,
1601 		struct vm_area_struct *vma, pte_t *pte, pte_t ptent,
1602 		unsigned int max_nr, unsigned long addr,
1603 		struct zap_details *details, int *rss, bool *any_skipped)
1604 {
1605 	swp_entry_t entry;
1606 	int nr = 1;
1607 
1608 	*any_skipped = true;
1609 	entry = pte_to_swp_entry(ptent);
1610 	if (is_device_private_entry(entry) ||
1611 		is_device_exclusive_entry(entry)) {
1612 		struct page *page = pfn_swap_entry_to_page(entry);
1613 		struct folio *folio = page_folio(page);
1614 
1615 		if (unlikely(!should_zap_folio(details, folio)))
1616 			return 1;
1617 		/*
1618 		 * Both device private/exclusive mappings should only
1619 		 * work with anonymous page so far, so we don't need to
1620 		 * consider uffd-wp bit when zap. For more information,
1621 		 * see zap_install_uffd_wp_if_needed().
1622 		 */
1623 		WARN_ON_ONCE(!vma_is_anonymous(vma));
1624 		rss[mm_counter(folio)]--;
1625 		if (is_device_private_entry(entry))
1626 			folio_remove_rmap_pte(folio, page, vma);
1627 		folio_put(folio);
1628 	} else if (!non_swap_entry(entry)) {
1629 		/* Genuine swap entries, hence a private anon pages */
1630 		if (!should_zap_cows(details))
1631 			return 1;
1632 
1633 		nr = swap_pte_batch(pte, max_nr, ptent);
1634 		rss[MM_SWAPENTS] -= nr;
1635 		free_swap_and_cache_nr(entry, nr);
1636 	} else if (is_migration_entry(entry)) {
1637 		struct folio *folio = pfn_swap_entry_folio(entry);
1638 
1639 		if (!should_zap_folio(details, folio))
1640 			return 1;
1641 		rss[mm_counter(folio)]--;
1642 	} else if (pte_marker_entry_uffd_wp(entry)) {
1643 		/*
1644 		 * For anon: always drop the marker; for file: only
1645 		 * drop the marker if explicitly requested.
1646 		 */
1647 		if (!vma_is_anonymous(vma) && !zap_drop_markers(details))
1648 			return 1;
1649 	} else if (is_guard_swp_entry(entry)) {
1650 		/*
1651 		 * Ordinary zapping should not remove guard PTE
1652 		 * markers. Only do so if we should remove PTE markers
1653 		 * in general.
1654 		 */
1655 		if (!zap_drop_markers(details))
1656 			return 1;
1657 	} else if (is_hwpoison_entry(entry) || is_poisoned_swp_entry(entry)) {
1658 		if (!should_zap_cows(details))
1659 			return 1;
1660 	} else {
1661 		/* We should have covered all the swap entry types */
1662 		pr_alert("unrecognized swap entry 0x%lx\n", entry.val);
1663 		WARN_ON_ONCE(1);
1664 	}
1665 	clear_not_present_full_ptes(vma->vm_mm, addr, pte, nr, tlb->fullmm);
1666 	*any_skipped = zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, ptent);
1667 
1668 	return nr;
1669 }
1670 
do_zap_pte_range(struct mmu_gather * tlb,struct vm_area_struct * vma,pte_t * pte,unsigned long addr,unsigned long end,struct zap_details * details,int * rss,bool * force_flush,bool * force_break,bool * any_skipped)1671 static inline int do_zap_pte_range(struct mmu_gather *tlb,
1672 				   struct vm_area_struct *vma, pte_t *pte,
1673 				   unsigned long addr, unsigned long end,
1674 				   struct zap_details *details, int *rss,
1675 				   bool *force_flush, bool *force_break,
1676 				   bool *any_skipped)
1677 {
1678 	pte_t ptent = ptep_get(pte);
1679 	int max_nr = (end - addr) / PAGE_SIZE;
1680 	int nr = 0;
1681 
1682 	/* Skip all consecutive none ptes */
1683 	if (pte_none(ptent)) {
1684 		for (nr = 1; nr < max_nr; nr++) {
1685 			ptent = ptep_get(pte + nr);
1686 			if (!pte_none(ptent))
1687 				break;
1688 		}
1689 		max_nr -= nr;
1690 		if (!max_nr)
1691 			return nr;
1692 		pte += nr;
1693 		addr += nr * PAGE_SIZE;
1694 	}
1695 
1696 	if (pte_present(ptent))
1697 		nr += zap_present_ptes(tlb, vma, pte, ptent, max_nr, addr,
1698 				       details, rss, force_flush, force_break,
1699 				       any_skipped);
1700 	else
1701 		nr += zap_nonpresent_ptes(tlb, vma, pte, ptent, max_nr, addr,
1702 					  details, rss, any_skipped);
1703 
1704 	return nr;
1705 }
1706 
zap_pte_range(struct mmu_gather * tlb,struct vm_area_struct * vma,pmd_t * pmd,unsigned long addr,unsigned long end,struct zap_details * details)1707 static unsigned long zap_pte_range(struct mmu_gather *tlb,
1708 				struct vm_area_struct *vma, pmd_t *pmd,
1709 				unsigned long addr, unsigned long end,
1710 				struct zap_details *details)
1711 {
1712 	bool force_flush = false, force_break = false;
1713 	struct mm_struct *mm = tlb->mm;
1714 	int rss[NR_MM_COUNTERS];
1715 	spinlock_t *ptl;
1716 	pte_t *start_pte;
1717 	pte_t *pte;
1718 	pmd_t pmdval;
1719 	unsigned long start = addr;
1720 	bool can_reclaim_pt = reclaim_pt_is_enabled(start, end, details);
1721 	bool direct_reclaim = true;
1722 	int nr;
1723 
1724 retry:
1725 	tlb_change_page_size(tlb, PAGE_SIZE);
1726 	init_rss_vec(rss);
1727 	start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1728 	if (!pte)
1729 		return addr;
1730 
1731 	flush_tlb_batched_pending(mm);
1732 	arch_enter_lazy_mmu_mode();
1733 	do {
1734 		bool any_skipped = false;
1735 
1736 		if (need_resched()) {
1737 			direct_reclaim = false;
1738 			break;
1739 		}
1740 
1741 		nr = do_zap_pte_range(tlb, vma, pte, addr, end, details, rss,
1742 				      &force_flush, &force_break, &any_skipped);
1743 		if (any_skipped)
1744 			can_reclaim_pt = false;
1745 		if (unlikely(force_break)) {
1746 			addr += nr * PAGE_SIZE;
1747 			direct_reclaim = false;
1748 			break;
1749 		}
1750 	} while (pte += nr, addr += PAGE_SIZE * nr, addr != end);
1751 
1752 	/*
1753 	 * Fast path: try to hold the pmd lock and unmap the PTE page.
1754 	 *
1755 	 * If the pte lock was released midway (retry case), or if the attempt
1756 	 * to hold the pmd lock failed, then we need to recheck all pte entries
1757 	 * to ensure they are still none, thereby preventing the pte entries
1758 	 * from being repopulated by another thread.
1759 	 */
1760 	if (can_reclaim_pt && direct_reclaim && addr == end)
1761 		direct_reclaim = try_get_and_clear_pmd(mm, pmd, &pmdval);
1762 
1763 	add_mm_rss_vec(mm, rss);
1764 	arch_leave_lazy_mmu_mode();
1765 
1766 	/* Do the actual TLB flush before dropping ptl */
1767 	if (force_flush) {
1768 		tlb_flush_mmu_tlbonly(tlb);
1769 		tlb_flush_rmaps(tlb, vma);
1770 	}
1771 	pte_unmap_unlock(start_pte, ptl);
1772 
1773 	/*
1774 	 * If we forced a TLB flush (either due to running out of
1775 	 * batch buffers or because we needed to flush dirty TLB
1776 	 * entries before releasing the ptl), free the batched
1777 	 * memory too. Come back again if we didn't do everything.
1778 	 */
1779 	if (force_flush)
1780 		tlb_flush_mmu(tlb);
1781 
1782 	if (addr != end) {
1783 		cond_resched();
1784 		force_flush = false;
1785 		force_break = false;
1786 		goto retry;
1787 	}
1788 
1789 	if (can_reclaim_pt) {
1790 		if (direct_reclaim)
1791 			free_pte(mm, start, tlb, pmdval);
1792 		else
1793 			try_to_free_pte(mm, pmd, start, tlb);
1794 	}
1795 
1796 	return addr;
1797 }
1798 
zap_pmd_range(struct mmu_gather * tlb,struct vm_area_struct * vma,pud_t * pud,unsigned long addr,unsigned long end,struct zap_details * details)1799 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1800 				struct vm_area_struct *vma, pud_t *pud,
1801 				unsigned long addr, unsigned long end,
1802 				struct zap_details *details)
1803 {
1804 	pmd_t *pmd;
1805 	unsigned long next;
1806 
1807 	pmd = pmd_offset(pud, addr);
1808 	do {
1809 		next = pmd_addr_end(addr, end);
1810 		if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
1811 			if (next - addr != HPAGE_PMD_SIZE)
1812 				__split_huge_pmd(vma, pmd, addr, false, NULL);
1813 			else if (zap_huge_pmd(tlb, vma, pmd, addr)) {
1814 				addr = next;
1815 				continue;
1816 			}
1817 			/* fall through */
1818 		} else if (details && details->single_folio &&
1819 			   folio_test_pmd_mappable(details->single_folio) &&
1820 			   next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
1821 			spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
1822 			/*
1823 			 * Take and drop THP pmd lock so that we cannot return
1824 			 * prematurely, while zap_huge_pmd() has cleared *pmd,
1825 			 * but not yet decremented compound_mapcount().
1826 			 */
1827 			spin_unlock(ptl);
1828 		}
1829 		if (pmd_none(*pmd)) {
1830 			addr = next;
1831 			continue;
1832 		}
1833 		addr = zap_pte_range(tlb, vma, pmd, addr, next, details);
1834 		if (addr != next)
1835 			pmd--;
1836 	} while (pmd++, cond_resched(), addr != end);
1837 
1838 	return addr;
1839 }
1840 
zap_pud_range(struct mmu_gather * tlb,struct vm_area_struct * vma,p4d_t * p4d,unsigned long addr,unsigned long end,struct zap_details * details)1841 static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1842 				struct vm_area_struct *vma, p4d_t *p4d,
1843 				unsigned long addr, unsigned long end,
1844 				struct zap_details *details)
1845 {
1846 	pud_t *pud;
1847 	unsigned long next;
1848 
1849 	pud = pud_offset(p4d, addr);
1850 	do {
1851 		next = pud_addr_end(addr, end);
1852 		if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
1853 			if (next - addr != HPAGE_PUD_SIZE) {
1854 				mmap_assert_locked(tlb->mm);
1855 				split_huge_pud(vma, pud, addr);
1856 			} else if (zap_huge_pud(tlb, vma, pud, addr))
1857 				goto next;
1858 			/* fall through */
1859 		}
1860 		if (pud_none_or_clear_bad(pud))
1861 			continue;
1862 		next = zap_pmd_range(tlb, vma, pud, addr, next, details);
1863 next:
1864 		cond_resched();
1865 	} while (pud++, addr = next, addr != end);
1866 
1867 	return addr;
1868 }
1869 
zap_p4d_range(struct mmu_gather * tlb,struct vm_area_struct * vma,pgd_t * pgd,unsigned long addr,unsigned long end,struct zap_details * details)1870 static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
1871 				struct vm_area_struct *vma, pgd_t *pgd,
1872 				unsigned long addr, unsigned long end,
1873 				struct zap_details *details)
1874 {
1875 	p4d_t *p4d;
1876 	unsigned long next;
1877 
1878 	p4d = p4d_offset(pgd, addr);
1879 	do {
1880 		next = p4d_addr_end(addr, end);
1881 		if (p4d_none_or_clear_bad(p4d))
1882 			continue;
1883 		next = zap_pud_range(tlb, vma, p4d, addr, next, details);
1884 	} while (p4d++, addr = next, addr != end);
1885 
1886 	return addr;
1887 }
1888 
unmap_page_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long addr,unsigned long end,struct zap_details * details)1889 void unmap_page_range(struct mmu_gather *tlb,
1890 			     struct vm_area_struct *vma,
1891 			     unsigned long addr, unsigned long end,
1892 			     struct zap_details *details)
1893 {
1894 	pgd_t *pgd;
1895 	unsigned long next;
1896 
1897 	BUG_ON(addr >= end);
1898 	tlb_start_vma(tlb, vma);
1899 	pgd = pgd_offset(vma->vm_mm, addr);
1900 	do {
1901 		next = pgd_addr_end(addr, end);
1902 		if (pgd_none_or_clear_bad(pgd))
1903 			continue;
1904 		next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
1905 	} while (pgd++, addr = next, addr != end);
1906 	tlb_end_vma(tlb, vma);
1907 }
1908 
1909 
unmap_single_vma(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start_addr,unsigned long end_addr,struct zap_details * details,bool mm_wr_locked)1910 static void unmap_single_vma(struct mmu_gather *tlb,
1911 		struct vm_area_struct *vma, unsigned long start_addr,
1912 		unsigned long end_addr,
1913 		struct zap_details *details, bool mm_wr_locked)
1914 {
1915 	unsigned long start = max(vma->vm_start, start_addr);
1916 	unsigned long end;
1917 
1918 	if (start >= vma->vm_end)
1919 		return;
1920 	end = min(vma->vm_end, end_addr);
1921 	if (end <= vma->vm_start)
1922 		return;
1923 
1924 	if (vma->vm_file)
1925 		uprobe_munmap(vma, start, end);
1926 
1927 	if (unlikely(vma->vm_flags & VM_PFNMAP))
1928 		untrack_pfn(vma, 0, 0, mm_wr_locked);
1929 
1930 	if (start != end) {
1931 		if (unlikely(is_vm_hugetlb_page(vma))) {
1932 			/*
1933 			 * It is undesirable to test vma->vm_file as it
1934 			 * should be non-null for valid hugetlb area.
1935 			 * However, vm_file will be NULL in the error
1936 			 * cleanup path of mmap_region. When
1937 			 * hugetlbfs ->mmap method fails,
1938 			 * mmap_region() nullifies vma->vm_file
1939 			 * before calling this function to clean up.
1940 			 * Since no pte has actually been setup, it is
1941 			 * safe to do nothing in this case.
1942 			 */
1943 			if (vma->vm_file) {
1944 				zap_flags_t zap_flags = details ?
1945 				    details->zap_flags : 0;
1946 				__unmap_hugepage_range(tlb, vma, start, end,
1947 							     NULL, zap_flags);
1948 			}
1949 		} else
1950 			unmap_page_range(tlb, vma, start, end, details);
1951 	}
1952 }
1953 
1954 /**
1955  * unmap_vmas - unmap a range of memory covered by a list of vma's
1956  * @tlb: address of the caller's struct mmu_gather
1957  * @mas: the maple state
1958  * @vma: the starting vma
1959  * @start_addr: virtual address at which to start unmapping
1960  * @end_addr: virtual address at which to end unmapping
1961  * @tree_end: The maximum index to check
1962  * @mm_wr_locked: lock flag
1963  *
1964  * Unmap all pages in the vma list.
1965  *
1966  * Only addresses between `start' and `end' will be unmapped.
1967  *
1968  * The VMA list must be sorted in ascending virtual address order.
1969  *
1970  * unmap_vmas() assumes that the caller will flush the whole unmapped address
1971  * range after unmap_vmas() returns.  So the only responsibility here is to
1972  * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1973  * drops the lock and schedules.
1974  */
unmap_vmas(struct mmu_gather * tlb,struct ma_state * mas,struct vm_area_struct * vma,unsigned long start_addr,unsigned long end_addr,unsigned long tree_end,bool mm_wr_locked)1975 void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
1976 		struct vm_area_struct *vma, unsigned long start_addr,
1977 		unsigned long end_addr, unsigned long tree_end,
1978 		bool mm_wr_locked)
1979 {
1980 	struct mmu_notifier_range range;
1981 	struct zap_details details = {
1982 		.zap_flags = ZAP_FLAG_DROP_MARKER | ZAP_FLAG_UNMAP,
1983 		/* Careful - we need to zap private pages too! */
1984 		.even_cows = true,
1985 	};
1986 
1987 	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm,
1988 				start_addr, end_addr);
1989 	mmu_notifier_invalidate_range_start(&range);
1990 	do {
1991 		unsigned long start = start_addr;
1992 		unsigned long end = end_addr;
1993 		hugetlb_zap_begin(vma, &start, &end);
1994 		unmap_single_vma(tlb, vma, start, end, &details,
1995 				 mm_wr_locked);
1996 		hugetlb_zap_end(vma, &details);
1997 		vma = mas_find(mas, tree_end - 1);
1998 	} while (vma && likely(!xa_is_zero(vma)));
1999 	mmu_notifier_invalidate_range_end(&range);
2000 }
2001 
2002 /**
2003  * zap_page_range_single - remove user pages in a given range
2004  * @vma: vm_area_struct holding the applicable pages
2005  * @address: starting address of pages to zap
2006  * @size: number of bytes to zap
2007  * @details: details of shared cache invalidation
2008  *
2009  * The range must fit into one VMA.
2010  */
zap_page_range_single(struct vm_area_struct * vma,unsigned long address,unsigned long size,struct zap_details * details)2011 void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
2012 		unsigned long size, struct zap_details *details)
2013 {
2014 	const unsigned long end = address + size;
2015 	struct mmu_notifier_range range;
2016 	struct mmu_gather tlb;
2017 
2018 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2019 				address, end);
2020 	hugetlb_zap_begin(vma, &range.start, &range.end);
2021 	tlb_gather_mmu(&tlb, vma->vm_mm);
2022 	update_hiwater_rss(vma->vm_mm);
2023 	mmu_notifier_invalidate_range_start(&range);
2024 	/*
2025 	 * unmap 'address-end' not 'range.start-range.end' as range
2026 	 * could have been expanded for hugetlb pmd sharing.
2027 	 */
2028 	unmap_single_vma(&tlb, vma, address, end, details, false);
2029 	mmu_notifier_invalidate_range_end(&range);
2030 	tlb_finish_mmu(&tlb);
2031 	hugetlb_zap_end(vma, details);
2032 }
2033 
2034 /**
2035  * zap_vma_ptes - remove ptes mapping the vma
2036  * @vma: vm_area_struct holding ptes to be zapped
2037  * @address: starting address of pages to zap
2038  * @size: number of bytes to zap
2039  *
2040  * This function only unmaps ptes assigned to VM_PFNMAP vmas.
2041  *
2042  * The entire address range must be fully contained within the vma.
2043  *
2044  */
zap_vma_ptes(struct vm_area_struct * vma,unsigned long address,unsigned long size)2045 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
2046 		unsigned long size)
2047 {
2048 	if (!range_in_vma(vma, address, address + size) ||
2049 	    		!(vma->vm_flags & VM_PFNMAP))
2050 		return;
2051 
2052 	zap_page_range_single(vma, address, size, NULL);
2053 }
2054 EXPORT_SYMBOL_GPL(zap_vma_ptes);
2055 
walk_to_pmd(struct mm_struct * mm,unsigned long addr)2056 static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
2057 {
2058 	pgd_t *pgd;
2059 	p4d_t *p4d;
2060 	pud_t *pud;
2061 	pmd_t *pmd;
2062 
2063 	pgd = pgd_offset(mm, addr);
2064 	p4d = p4d_alloc(mm, pgd, addr);
2065 	if (!p4d)
2066 		return NULL;
2067 	pud = pud_alloc(mm, p4d, addr);
2068 	if (!pud)
2069 		return NULL;
2070 	pmd = pmd_alloc(mm, pud, addr);
2071 	if (!pmd)
2072 		return NULL;
2073 
2074 	VM_BUG_ON(pmd_trans_huge(*pmd));
2075 	return pmd;
2076 }
2077 
__get_locked_pte(struct mm_struct * mm,unsigned long addr,spinlock_t ** ptl)2078 pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
2079 			spinlock_t **ptl)
2080 {
2081 	pmd_t *pmd = walk_to_pmd(mm, addr);
2082 
2083 	if (!pmd)
2084 		return NULL;
2085 	return pte_alloc_map_lock(mm, pmd, addr, ptl);
2086 }
2087 
vm_mixed_zeropage_allowed(struct vm_area_struct * vma)2088 static bool vm_mixed_zeropage_allowed(struct vm_area_struct *vma)
2089 {
2090 	VM_WARN_ON_ONCE(vma->vm_flags & VM_PFNMAP);
2091 	/*
2092 	 * Whoever wants to forbid the zeropage after some zeropages
2093 	 * might already have been mapped has to scan the page tables and
2094 	 * bail out on any zeropages. Zeropages in COW mappings can
2095 	 * be unshared using FAULT_FLAG_UNSHARE faults.
2096 	 */
2097 	if (mm_forbids_zeropage(vma->vm_mm))
2098 		return false;
2099 	/* zeropages in COW mappings are common and unproblematic. */
2100 	if (is_cow_mapping(vma->vm_flags))
2101 		return true;
2102 	/* Mappings that do not allow for writable PTEs are unproblematic. */
2103 	if (!(vma->vm_flags & (VM_WRITE | VM_MAYWRITE)))
2104 		return true;
2105 	/*
2106 	 * Why not allow any VMA that has vm_ops->pfn_mkwrite? GUP could
2107 	 * find the shared zeropage and longterm-pin it, which would
2108 	 * be problematic as soon as the zeropage gets replaced by a different
2109 	 * page due to vma->vm_ops->pfn_mkwrite, because what's mapped would
2110 	 * now differ to what GUP looked up. FSDAX is incompatible to
2111 	 * FOLL_LONGTERM and VM_IO is incompatible to GUP completely (see
2112 	 * check_vma_flags).
2113 	 */
2114 	return vma->vm_ops && vma->vm_ops->pfn_mkwrite &&
2115 	       (vma_is_fsdax(vma) || vma->vm_flags & VM_IO);
2116 }
2117 
validate_page_before_insert(struct vm_area_struct * vma,struct page * page)2118 static int validate_page_before_insert(struct vm_area_struct *vma,
2119 				       struct page *page)
2120 {
2121 	struct folio *folio = page_folio(page);
2122 
2123 	if (!folio_ref_count(folio))
2124 		return -EINVAL;
2125 	if (unlikely(is_zero_folio(folio))) {
2126 		if (!vm_mixed_zeropage_allowed(vma))
2127 			return -EINVAL;
2128 		return 0;
2129 	}
2130 	if (folio_test_anon(folio) || folio_test_slab(folio) ||
2131 	    page_has_type(page))
2132 		return -EINVAL;
2133 	flush_dcache_folio(folio);
2134 	return 0;
2135 }
2136 
insert_page_into_pte_locked(struct vm_area_struct * vma,pte_t * pte,unsigned long addr,struct page * page,pgprot_t prot)2137 static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte,
2138 			unsigned long addr, struct page *page, pgprot_t prot)
2139 {
2140 	struct folio *folio = page_folio(page);
2141 	pte_t pteval;
2142 
2143 	if (!pte_none(ptep_get(pte)))
2144 		return -EBUSY;
2145 	/* Ok, finally just insert the thing.. */
2146 	pteval = mk_pte(page, prot);
2147 	if (unlikely(is_zero_folio(folio))) {
2148 		pteval = pte_mkspecial(pteval);
2149 	} else {
2150 		folio_get(folio);
2151 		inc_mm_counter(vma->vm_mm, mm_counter_file(folio));
2152 		folio_add_file_rmap_pte(folio, page, vma);
2153 	}
2154 	set_pte_at(vma->vm_mm, addr, pte, pteval);
2155 	return 0;
2156 }
2157 
insert_page(struct vm_area_struct * vma,unsigned long addr,struct page * page,pgprot_t prot)2158 static int insert_page(struct vm_area_struct *vma, unsigned long addr,
2159 			struct page *page, pgprot_t prot)
2160 {
2161 	int retval;
2162 	pte_t *pte;
2163 	spinlock_t *ptl;
2164 
2165 	retval = validate_page_before_insert(vma, page);
2166 	if (retval)
2167 		goto out;
2168 	retval = -ENOMEM;
2169 	pte = get_locked_pte(vma->vm_mm, addr, &ptl);
2170 	if (!pte)
2171 		goto out;
2172 	retval = insert_page_into_pte_locked(vma, pte, addr, page, prot);
2173 	pte_unmap_unlock(pte, ptl);
2174 out:
2175 	return retval;
2176 }
2177 
insert_page_in_batch_locked(struct vm_area_struct * vma,pte_t * pte,unsigned long addr,struct page * page,pgprot_t prot)2178 static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte,
2179 			unsigned long addr, struct page *page, pgprot_t prot)
2180 {
2181 	int err;
2182 
2183 	err = validate_page_before_insert(vma, page);
2184 	if (err)
2185 		return err;
2186 	return insert_page_into_pte_locked(vma, pte, addr, page, prot);
2187 }
2188 
2189 /* insert_pages() amortizes the cost of spinlock operations
2190  * when inserting pages in a loop.
2191  */
insert_pages(struct vm_area_struct * vma,unsigned long addr,struct page ** pages,unsigned long * num,pgprot_t prot)2192 static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
2193 			struct page **pages, unsigned long *num, pgprot_t prot)
2194 {
2195 	pmd_t *pmd = NULL;
2196 	pte_t *start_pte, *pte;
2197 	spinlock_t *pte_lock;
2198 	struct mm_struct *const mm = vma->vm_mm;
2199 	unsigned long curr_page_idx = 0;
2200 	unsigned long remaining_pages_total = *num;
2201 	unsigned long pages_to_write_in_pmd;
2202 	int ret;
2203 more:
2204 	ret = -EFAULT;
2205 	pmd = walk_to_pmd(mm, addr);
2206 	if (!pmd)
2207 		goto out;
2208 
2209 	pages_to_write_in_pmd = min_t(unsigned long,
2210 		remaining_pages_total, PTRS_PER_PTE - pte_index(addr));
2211 
2212 	/* Allocate the PTE if necessary; takes PMD lock once only. */
2213 	ret = -ENOMEM;
2214 	if (pte_alloc(mm, pmd))
2215 		goto out;
2216 
2217 	while (pages_to_write_in_pmd) {
2218 		int pte_idx = 0;
2219 		const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
2220 
2221 		start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
2222 		if (!start_pte) {
2223 			ret = -EFAULT;
2224 			goto out;
2225 		}
2226 		for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
2227 			int err = insert_page_in_batch_locked(vma, pte,
2228 				addr, pages[curr_page_idx], prot);
2229 			if (unlikely(err)) {
2230 				pte_unmap_unlock(start_pte, pte_lock);
2231 				ret = err;
2232 				remaining_pages_total -= pte_idx;
2233 				goto out;
2234 			}
2235 			addr += PAGE_SIZE;
2236 			++curr_page_idx;
2237 		}
2238 		pte_unmap_unlock(start_pte, pte_lock);
2239 		pages_to_write_in_pmd -= batch_size;
2240 		remaining_pages_total -= batch_size;
2241 	}
2242 	if (remaining_pages_total)
2243 		goto more;
2244 	ret = 0;
2245 out:
2246 	*num = remaining_pages_total;
2247 	return ret;
2248 }
2249 
2250 /**
2251  * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
2252  * @vma: user vma to map to
2253  * @addr: target start user address of these pages
2254  * @pages: source kernel pages
2255  * @num: in: number of pages to map. out: number of pages that were *not*
2256  * mapped. (0 means all pages were successfully mapped).
2257  *
2258  * Preferred over vm_insert_page() when inserting multiple pages.
2259  *
2260  * In case of error, we may have mapped a subset of the provided
2261  * pages. It is the caller's responsibility to account for this case.
2262  *
2263  * The same restrictions apply as in vm_insert_page().
2264  */
vm_insert_pages(struct vm_area_struct * vma,unsigned long addr,struct page ** pages,unsigned long * num)2265 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
2266 			struct page **pages, unsigned long *num)
2267 {
2268 	const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
2269 
2270 	if (addr < vma->vm_start || end_addr >= vma->vm_end)
2271 		return -EFAULT;
2272 	if (!(vma->vm_flags & VM_MIXEDMAP)) {
2273 		BUG_ON(mmap_read_trylock(vma->vm_mm));
2274 		BUG_ON(vma->vm_flags & VM_PFNMAP);
2275 		vm_flags_set(vma, VM_MIXEDMAP);
2276 	}
2277 	/* Defer page refcount checking till we're about to map that page. */
2278 	return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
2279 }
2280 EXPORT_SYMBOL(vm_insert_pages);
2281 
2282 /**
2283  * vm_insert_page - insert single page into user vma
2284  * @vma: user vma to map to
2285  * @addr: target user address of this page
2286  * @page: source kernel page
2287  *
2288  * This allows drivers to insert individual pages they've allocated
2289  * into a user vma. The zeropage is supported in some VMAs,
2290  * see vm_mixed_zeropage_allowed().
2291  *
2292  * The page has to be a nice clean _individual_ kernel allocation.
2293  * If you allocate a compound page, you need to have marked it as
2294  * such (__GFP_COMP), or manually just split the page up yourself
2295  * (see split_page()).
2296  *
2297  * NOTE! Traditionally this was done with "remap_pfn_range()" which
2298  * took an arbitrary page protection parameter. This doesn't allow
2299  * that. Your vma protection will have to be set up correctly, which
2300  * means that if you want a shared writable mapping, you'd better
2301  * ask for a shared writable mapping!
2302  *
2303  * The page does not need to be reserved.
2304  *
2305  * Usually this function is called from f_op->mmap() handler
2306  * under mm->mmap_lock write-lock, so it can change vma->vm_flags.
2307  * Caller must set VM_MIXEDMAP on vma if it wants to call this
2308  * function from other places, for example from page-fault handler.
2309  *
2310  * Return: %0 on success, negative error code otherwise.
2311  */
vm_insert_page(struct vm_area_struct * vma,unsigned long addr,struct page * page)2312 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
2313 			struct page *page)
2314 {
2315 	if (addr < vma->vm_start || addr >= vma->vm_end)
2316 		return -EFAULT;
2317 	if (!(vma->vm_flags & VM_MIXEDMAP)) {
2318 		BUG_ON(mmap_read_trylock(vma->vm_mm));
2319 		BUG_ON(vma->vm_flags & VM_PFNMAP);
2320 		vm_flags_set(vma, VM_MIXEDMAP);
2321 	}
2322 	return insert_page(vma, addr, page, vma->vm_page_prot);
2323 }
2324 EXPORT_SYMBOL(vm_insert_page);
2325 
2326 /*
2327  * __vm_map_pages - maps range of kernel pages into user vma
2328  * @vma: user vma to map to
2329  * @pages: pointer to array of source kernel pages
2330  * @num: number of pages in page array
2331  * @offset: user's requested vm_pgoff
2332  *
2333  * This allows drivers to map range of kernel pages into a user vma.
2334  * The zeropage is supported in some VMAs, see
2335  * vm_mixed_zeropage_allowed().
2336  *
2337  * Return: 0 on success and error code otherwise.
2338  */
__vm_map_pages(struct vm_area_struct * vma,struct page ** pages,unsigned long num,unsigned long offset)2339 static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2340 				unsigned long num, unsigned long offset)
2341 {
2342 	unsigned long count = vma_pages(vma);
2343 	unsigned long uaddr = vma->vm_start;
2344 	int ret, i;
2345 
2346 	/* Fail if the user requested offset is beyond the end of the object */
2347 	if (offset >= num)
2348 		return -ENXIO;
2349 
2350 	/* Fail if the user requested size exceeds available object size */
2351 	if (count > num - offset)
2352 		return -ENXIO;
2353 
2354 	for (i = 0; i < count; i++) {
2355 		ret = vm_insert_page(vma, uaddr, pages[offset + i]);
2356 		if (ret < 0)
2357 			return ret;
2358 		uaddr += PAGE_SIZE;
2359 	}
2360 
2361 	return 0;
2362 }
2363 
2364 /**
2365  * vm_map_pages - maps range of kernel pages starts with non zero offset
2366  * @vma: user vma to map to
2367  * @pages: pointer to array of source kernel pages
2368  * @num: number of pages in page array
2369  *
2370  * Maps an object consisting of @num pages, catering for the user's
2371  * requested vm_pgoff
2372  *
2373  * If we fail to insert any page into the vma, the function will return
2374  * immediately leaving any previously inserted pages present.  Callers
2375  * from the mmap handler may immediately return the error as their caller
2376  * will destroy the vma, removing any successfully inserted pages. Other
2377  * callers should make their own arrangements for calling unmap_region().
2378  *
2379  * Context: Process context. Called by mmap handlers.
2380  * Return: 0 on success and error code otherwise.
2381  */
vm_map_pages(struct vm_area_struct * vma,struct page ** pages,unsigned long num)2382 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2383 				unsigned long num)
2384 {
2385 	return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
2386 }
2387 EXPORT_SYMBOL(vm_map_pages);
2388 
2389 /**
2390  * vm_map_pages_zero - map range of kernel pages starts with zero offset
2391  * @vma: user vma to map to
2392  * @pages: pointer to array of source kernel pages
2393  * @num: number of pages in page array
2394  *
2395  * Similar to vm_map_pages(), except that it explicitly sets the offset
2396  * to 0. This function is intended for the drivers that did not consider
2397  * vm_pgoff.
2398  *
2399  * Context: Process context. Called by mmap handlers.
2400  * Return: 0 on success and error code otherwise.
2401  */
vm_map_pages_zero(struct vm_area_struct * vma,struct page ** pages,unsigned long num)2402 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
2403 				unsigned long num)
2404 {
2405 	return __vm_map_pages(vma, pages, num, 0);
2406 }
2407 EXPORT_SYMBOL(vm_map_pages_zero);
2408 
insert_pfn(struct vm_area_struct * vma,unsigned long addr,pfn_t pfn,pgprot_t prot,bool mkwrite)2409 static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2410 			pfn_t pfn, pgprot_t prot, bool mkwrite)
2411 {
2412 	struct mm_struct *mm = vma->vm_mm;
2413 	pte_t *pte, entry;
2414 	spinlock_t *ptl;
2415 
2416 	pte = get_locked_pte(mm, addr, &ptl);
2417 	if (!pte)
2418 		return VM_FAULT_OOM;
2419 	entry = ptep_get(pte);
2420 	if (!pte_none(entry)) {
2421 		if (mkwrite) {
2422 			/*
2423 			 * For read faults on private mappings the PFN passed
2424 			 * in may not match the PFN we have mapped if the
2425 			 * mapped PFN is a writeable COW page.  In the mkwrite
2426 			 * case we are creating a writable PTE for a shared
2427 			 * mapping and we expect the PFNs to match. If they
2428 			 * don't match, we are likely racing with block
2429 			 * allocation and mapping invalidation so just skip the
2430 			 * update.
2431 			 */
2432 			if (pte_pfn(entry) != pfn_t_to_pfn(pfn)) {
2433 				WARN_ON_ONCE(!is_zero_pfn(pte_pfn(entry)));
2434 				goto out_unlock;
2435 			}
2436 			entry = pte_mkyoung(entry);
2437 			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2438 			if (ptep_set_access_flags(vma, addr, pte, entry, 1))
2439 				update_mmu_cache(vma, addr, pte);
2440 		}
2441 		goto out_unlock;
2442 	}
2443 
2444 	/* Ok, finally just insert the thing.. */
2445 	if (pfn_t_devmap(pfn))
2446 		entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
2447 	else
2448 		entry = pte_mkspecial(pfn_t_pte(pfn, prot));
2449 
2450 	if (mkwrite) {
2451 		entry = pte_mkyoung(entry);
2452 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2453 	}
2454 
2455 	set_pte_at(mm, addr, pte, entry);
2456 	update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
2457 
2458 out_unlock:
2459 	pte_unmap_unlock(pte, ptl);
2460 	return VM_FAULT_NOPAGE;
2461 }
2462 
2463 /**
2464  * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
2465  * @vma: user vma to map to
2466  * @addr: target user address of this page
2467  * @pfn: source kernel pfn
2468  * @pgprot: pgprot flags for the inserted page
2469  *
2470  * This is exactly like vmf_insert_pfn(), except that it allows drivers
2471  * to override pgprot on a per-page basis.
2472  *
2473  * This only makes sense for IO mappings, and it makes no sense for
2474  * COW mappings.  In general, using multiple vmas is preferable;
2475  * vmf_insert_pfn_prot should only be used if using multiple VMAs is
2476  * impractical.
2477  *
2478  * pgprot typically only differs from @vma->vm_page_prot when drivers set
2479  * caching- and encryption bits different than those of @vma->vm_page_prot,
2480  * because the caching- or encryption mode may not be known at mmap() time.
2481  *
2482  * This is ok as long as @vma->vm_page_prot is not used by the core vm
2483  * to set caching and encryption bits for those vmas (except for COW pages).
2484  * This is ensured by core vm only modifying these page table entries using
2485  * functions that don't touch caching- or encryption bits, using pte_modify()
2486  * if needed. (See for example mprotect()).
2487  *
2488  * Also when new page-table entries are created, this is only done using the
2489  * fault() callback, and never using the value of vma->vm_page_prot,
2490  * except for page-table entries that point to anonymous pages as the result
2491  * of COW.
2492  *
2493  * Context: Process context.  May allocate using %GFP_KERNEL.
2494  * Return: vm_fault_t value.
2495  */
vmf_insert_pfn_prot(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,pgprot_t pgprot)2496 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2497 			unsigned long pfn, pgprot_t pgprot)
2498 {
2499 	/*
2500 	 * Technically, architectures with pte_special can avoid all these
2501 	 * restrictions (same for remap_pfn_range).  However we would like
2502 	 * consistency in testing and feature parity among all, so we should
2503 	 * try to keep these invariants in place for everybody.
2504 	 */
2505 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
2506 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
2507 						(VM_PFNMAP|VM_MIXEDMAP));
2508 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
2509 	BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
2510 
2511 	if (addr < vma->vm_start || addr >= vma->vm_end)
2512 		return VM_FAULT_SIGBUS;
2513 
2514 	if (!pfn_modify_allowed(pfn, pgprot))
2515 		return VM_FAULT_SIGBUS;
2516 
2517 	track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
2518 
2519 	return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
2520 			false);
2521 }
2522 EXPORT_SYMBOL(vmf_insert_pfn_prot);
2523 
2524 /**
2525  * vmf_insert_pfn - insert single pfn into user vma
2526  * @vma: user vma to map to
2527  * @addr: target user address of this page
2528  * @pfn: source kernel pfn
2529  *
2530  * Similar to vm_insert_page, this allows drivers to insert individual pages
2531  * they've allocated into a user vma. Same comments apply.
2532  *
2533  * This function should only be called from a vm_ops->fault handler, and
2534  * in that case the handler should return the result of this function.
2535  *
2536  * vma cannot be a COW mapping.
2537  *
2538  * As this is called only for pages that do not currently exist, we
2539  * do not need to flush old virtual caches or the TLB.
2540  *
2541  * Context: Process context.  May allocate using %GFP_KERNEL.
2542  * Return: vm_fault_t value.
2543  */
vmf_insert_pfn(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn)2544 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2545 			unsigned long pfn)
2546 {
2547 	return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
2548 }
2549 EXPORT_SYMBOL(vmf_insert_pfn);
2550 
vm_mixed_ok(struct vm_area_struct * vma,pfn_t pfn,bool mkwrite)2551 static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn, bool mkwrite)
2552 {
2553 	if (unlikely(is_zero_pfn(pfn_t_to_pfn(pfn))) &&
2554 	    (mkwrite || !vm_mixed_zeropage_allowed(vma)))
2555 		return false;
2556 	/* these checks mirror the abort conditions in vm_normal_page */
2557 	if (vma->vm_flags & VM_MIXEDMAP)
2558 		return true;
2559 	if (pfn_t_devmap(pfn))
2560 		return true;
2561 	if (pfn_t_special(pfn))
2562 		return true;
2563 	if (is_zero_pfn(pfn_t_to_pfn(pfn)))
2564 		return true;
2565 	return false;
2566 }
2567 
__vm_insert_mixed(struct vm_area_struct * vma,unsigned long addr,pfn_t pfn,bool mkwrite)2568 static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
2569 		unsigned long addr, pfn_t pfn, bool mkwrite)
2570 {
2571 	pgprot_t pgprot = vma->vm_page_prot;
2572 	int err;
2573 
2574 	if (!vm_mixed_ok(vma, pfn, mkwrite))
2575 		return VM_FAULT_SIGBUS;
2576 
2577 	if (addr < vma->vm_start || addr >= vma->vm_end)
2578 		return VM_FAULT_SIGBUS;
2579 
2580 	track_pfn_insert(vma, &pgprot, pfn);
2581 
2582 	if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
2583 		return VM_FAULT_SIGBUS;
2584 
2585 	/*
2586 	 * If we don't have pte special, then we have to use the pfn_valid()
2587 	 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
2588 	 * refcount the page if pfn_valid is true (hence insert_page rather
2589 	 * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
2590 	 * without pte special, it would there be refcounted as a normal page.
2591 	 */
2592 	if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
2593 	    !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
2594 		struct page *page;
2595 
2596 		/*
2597 		 * At this point we are committed to insert_page()
2598 		 * regardless of whether the caller specified flags that
2599 		 * result in pfn_t_has_page() == false.
2600 		 */
2601 		page = pfn_to_page(pfn_t_to_pfn(pfn));
2602 		err = insert_page(vma, addr, page, pgprot);
2603 	} else {
2604 		return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
2605 	}
2606 
2607 	if (err == -ENOMEM)
2608 		return VM_FAULT_OOM;
2609 	if (err < 0 && err != -EBUSY)
2610 		return VM_FAULT_SIGBUS;
2611 
2612 	return VM_FAULT_NOPAGE;
2613 }
2614 
vmf_insert_mixed(struct vm_area_struct * vma,unsigned long addr,pfn_t pfn)2615 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2616 		pfn_t pfn)
2617 {
2618 	return __vm_insert_mixed(vma, addr, pfn, false);
2619 }
2620 EXPORT_SYMBOL(vmf_insert_mixed);
2621 
2622 /*
2623  *  If the insertion of PTE failed because someone else already added a
2624  *  different entry in the mean time, we treat that as success as we assume
2625  *  the same entry was actually inserted.
2626  */
vmf_insert_mixed_mkwrite(struct vm_area_struct * vma,unsigned long addr,pfn_t pfn)2627 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2628 		unsigned long addr, pfn_t pfn)
2629 {
2630 	return __vm_insert_mixed(vma, addr, pfn, true);
2631 }
2632 
2633 /*
2634  * maps a range of physical memory into the requested pages. the old
2635  * mappings are removed. any references to nonexistent pages results
2636  * in null mappings (currently treated as "copy-on-access")
2637  */
remap_pte_range(struct mm_struct * mm,pmd_t * pmd,unsigned long addr,unsigned long end,unsigned long pfn,pgprot_t prot)2638 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
2639 			unsigned long addr, unsigned long end,
2640 			unsigned long pfn, pgprot_t prot)
2641 {
2642 	pte_t *pte, *mapped_pte;
2643 	spinlock_t *ptl;
2644 	int err = 0;
2645 
2646 	mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
2647 	if (!pte)
2648 		return -ENOMEM;
2649 	arch_enter_lazy_mmu_mode();
2650 	do {
2651 		BUG_ON(!pte_none(ptep_get(pte)));
2652 		if (!pfn_modify_allowed(pfn, prot)) {
2653 			err = -EACCES;
2654 			break;
2655 		}
2656 		set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
2657 		pfn++;
2658 	} while (pte++, addr += PAGE_SIZE, addr != end);
2659 	arch_leave_lazy_mmu_mode();
2660 	pte_unmap_unlock(mapped_pte, ptl);
2661 	return err;
2662 }
2663 
remap_pmd_range(struct mm_struct * mm,pud_t * pud,unsigned long addr,unsigned long end,unsigned long pfn,pgprot_t prot)2664 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
2665 			unsigned long addr, unsigned long end,
2666 			unsigned long pfn, pgprot_t prot)
2667 {
2668 	pmd_t *pmd;
2669 	unsigned long next;
2670 	int err;
2671 
2672 	pfn -= addr >> PAGE_SHIFT;
2673 	pmd = pmd_alloc(mm, pud, addr);
2674 	if (!pmd)
2675 		return -ENOMEM;
2676 	VM_BUG_ON(pmd_trans_huge(*pmd));
2677 	do {
2678 		next = pmd_addr_end(addr, end);
2679 		err = remap_pte_range(mm, pmd, addr, next,
2680 				pfn + (addr >> PAGE_SHIFT), prot);
2681 		if (err)
2682 			return err;
2683 	} while (pmd++, addr = next, addr != end);
2684 	return 0;
2685 }
2686 
remap_pud_range(struct mm_struct * mm,p4d_t * p4d,unsigned long addr,unsigned long end,unsigned long pfn,pgprot_t prot)2687 static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
2688 			unsigned long addr, unsigned long end,
2689 			unsigned long pfn, pgprot_t prot)
2690 {
2691 	pud_t *pud;
2692 	unsigned long next;
2693 	int err;
2694 
2695 	pfn -= addr >> PAGE_SHIFT;
2696 	pud = pud_alloc(mm, p4d, addr);
2697 	if (!pud)
2698 		return -ENOMEM;
2699 	do {
2700 		next = pud_addr_end(addr, end);
2701 		err = remap_pmd_range(mm, pud, addr, next,
2702 				pfn + (addr >> PAGE_SHIFT), prot);
2703 		if (err)
2704 			return err;
2705 	} while (pud++, addr = next, addr != end);
2706 	return 0;
2707 }
2708 
remap_p4d_range(struct mm_struct * mm,pgd_t * pgd,unsigned long addr,unsigned long end,unsigned long pfn,pgprot_t prot)2709 static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2710 			unsigned long addr, unsigned long end,
2711 			unsigned long pfn, pgprot_t prot)
2712 {
2713 	p4d_t *p4d;
2714 	unsigned long next;
2715 	int err;
2716 
2717 	pfn -= addr >> PAGE_SHIFT;
2718 	p4d = p4d_alloc(mm, pgd, addr);
2719 	if (!p4d)
2720 		return -ENOMEM;
2721 	do {
2722 		next = p4d_addr_end(addr, end);
2723 		err = remap_pud_range(mm, p4d, addr, next,
2724 				pfn + (addr >> PAGE_SHIFT), prot);
2725 		if (err)
2726 			return err;
2727 	} while (p4d++, addr = next, addr != end);
2728 	return 0;
2729 }
2730 
remap_pfn_range_internal(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,unsigned long size,pgprot_t prot)2731 static int remap_pfn_range_internal(struct vm_area_struct *vma, unsigned long addr,
2732 		unsigned long pfn, unsigned long size, pgprot_t prot)
2733 {
2734 	pgd_t *pgd;
2735 	unsigned long next;
2736 	unsigned long end = addr + PAGE_ALIGN(size);
2737 	struct mm_struct *mm = vma->vm_mm;
2738 	int err;
2739 
2740 	if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
2741 		return -EINVAL;
2742 
2743 	/*
2744 	 * Physically remapped pages are special. Tell the
2745 	 * rest of the world about it:
2746 	 *   VM_IO tells people not to look at these pages
2747 	 *	(accesses can have side effects).
2748 	 *   VM_PFNMAP tells the core MM that the base pages are just
2749 	 *	raw PFN mappings, and do not have a "struct page" associated
2750 	 *	with them.
2751 	 *   VM_DONTEXPAND
2752 	 *      Disable vma merging and expanding with mremap().
2753 	 *   VM_DONTDUMP
2754 	 *      Omit vma from core dump, even when VM_IO turned off.
2755 	 *
2756 	 * There's a horrible special case to handle copy-on-write
2757 	 * behaviour that some programs depend on. We mark the "original"
2758 	 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
2759 	 * See vm_normal_page() for details.
2760 	 */
2761 	if (is_cow_mapping(vma->vm_flags)) {
2762 		if (addr != vma->vm_start || end != vma->vm_end)
2763 			return -EINVAL;
2764 		vma->vm_pgoff = pfn;
2765 	}
2766 
2767 	vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
2768 
2769 	BUG_ON(addr >= end);
2770 	pfn -= addr >> PAGE_SHIFT;
2771 	pgd = pgd_offset(mm, addr);
2772 	flush_cache_range(vma, addr, end);
2773 	do {
2774 		next = pgd_addr_end(addr, end);
2775 		err = remap_p4d_range(mm, pgd, addr, next,
2776 				pfn + (addr >> PAGE_SHIFT), prot);
2777 		if (err)
2778 			return err;
2779 	} while (pgd++, addr = next, addr != end);
2780 
2781 	return 0;
2782 }
2783 
2784 /*
2785  * Variant of remap_pfn_range that does not call track_pfn_remap.  The caller
2786  * must have pre-validated the caching bits of the pgprot_t.
2787  */
remap_pfn_range_notrack(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,unsigned long size,pgprot_t prot)2788 int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
2789 		unsigned long pfn, unsigned long size, pgprot_t prot)
2790 {
2791 	int error = remap_pfn_range_internal(vma, addr, pfn, size, prot);
2792 
2793 	if (!error)
2794 		return 0;
2795 
2796 	/*
2797 	 * A partial pfn range mapping is dangerous: it does not
2798 	 * maintain page reference counts, and callers may free
2799 	 * pages due to the error. So zap it early.
2800 	 */
2801 	zap_page_range_single(vma, addr, size, NULL);
2802 	return error;
2803 }
2804 
2805 /**
2806  * remap_pfn_range - remap kernel memory to userspace
2807  * @vma: user vma to map to
2808  * @addr: target page aligned user address to start at
2809  * @pfn: page frame number of kernel physical memory address
2810  * @size: size of mapping area
2811  * @prot: page protection flags for this mapping
2812  *
2813  * Note: this is only safe if the mm semaphore is held when called.
2814  *
2815  * Return: %0 on success, negative error code otherwise.
2816  */
remap_pfn_range(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,unsigned long size,pgprot_t prot)2817 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
2818 		    unsigned long pfn, unsigned long size, pgprot_t prot)
2819 {
2820 	int err;
2821 
2822 	err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size));
2823 	if (err)
2824 		return -EINVAL;
2825 
2826 	err = remap_pfn_range_notrack(vma, addr, pfn, size, prot);
2827 	if (err)
2828 		untrack_pfn(vma, pfn, PAGE_ALIGN(size), true);
2829 	return err;
2830 }
2831 EXPORT_SYMBOL(remap_pfn_range);
2832 
2833 /**
2834  * vm_iomap_memory - remap memory to userspace
2835  * @vma: user vma to map to
2836  * @start: start of the physical memory to be mapped
2837  * @len: size of area
2838  *
2839  * This is a simplified io_remap_pfn_range() for common driver use. The
2840  * driver just needs to give us the physical memory range to be mapped,
2841  * we'll figure out the rest from the vma information.
2842  *
2843  * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
2844  * whatever write-combining details or similar.
2845  *
2846  * Return: %0 on success, negative error code otherwise.
2847  */
vm_iomap_memory(struct vm_area_struct * vma,phys_addr_t start,unsigned long len)2848 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
2849 {
2850 	unsigned long vm_len, pfn, pages;
2851 
2852 	/* Check that the physical memory area passed in looks valid */
2853 	if (start + len < start)
2854 		return -EINVAL;
2855 	/*
2856 	 * You *really* shouldn't map things that aren't page-aligned,
2857 	 * but we've historically allowed it because IO memory might
2858 	 * just have smaller alignment.
2859 	 */
2860 	len += start & ~PAGE_MASK;
2861 	pfn = start >> PAGE_SHIFT;
2862 	pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
2863 	if (pfn + pages < pfn)
2864 		return -EINVAL;
2865 
2866 	/* We start the mapping 'vm_pgoff' pages into the area */
2867 	if (vma->vm_pgoff > pages)
2868 		return -EINVAL;
2869 	pfn += vma->vm_pgoff;
2870 	pages -= vma->vm_pgoff;
2871 
2872 	/* Can we fit all of the mapping? */
2873 	vm_len = vma->vm_end - vma->vm_start;
2874 	if (vm_len >> PAGE_SHIFT > pages)
2875 		return -EINVAL;
2876 
2877 	/* Ok, let it rip */
2878 	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
2879 }
2880 EXPORT_SYMBOL(vm_iomap_memory);
2881 
apply_to_pte_range(struct mm_struct * mm,pmd_t * pmd,unsigned long addr,unsigned long end,pte_fn_t fn,void * data,bool create,pgtbl_mod_mask * mask)2882 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2883 				     unsigned long addr, unsigned long end,
2884 				     pte_fn_t fn, void *data, bool create,
2885 				     pgtbl_mod_mask *mask)
2886 {
2887 	pte_t *pte, *mapped_pte;
2888 	int err = 0;
2889 	spinlock_t *ptl;
2890 
2891 	if (create) {
2892 		mapped_pte = pte = (mm == &init_mm) ?
2893 			pte_alloc_kernel_track(pmd, addr, mask) :
2894 			pte_alloc_map_lock(mm, pmd, addr, &ptl);
2895 		if (!pte)
2896 			return -ENOMEM;
2897 	} else {
2898 		mapped_pte = pte = (mm == &init_mm) ?
2899 			pte_offset_kernel(pmd, addr) :
2900 			pte_offset_map_lock(mm, pmd, addr, &ptl);
2901 		if (!pte)
2902 			return -EINVAL;
2903 	}
2904 
2905 	arch_enter_lazy_mmu_mode();
2906 
2907 	if (fn) {
2908 		do {
2909 			if (create || !pte_none(ptep_get(pte))) {
2910 				err = fn(pte++, addr, data);
2911 				if (err)
2912 					break;
2913 			}
2914 		} while (addr += PAGE_SIZE, addr != end);
2915 	}
2916 	*mask |= PGTBL_PTE_MODIFIED;
2917 
2918 	arch_leave_lazy_mmu_mode();
2919 
2920 	if (mm != &init_mm)
2921 		pte_unmap_unlock(mapped_pte, ptl);
2922 	return err;
2923 }
2924 
apply_to_pmd_range(struct mm_struct * mm,pud_t * pud,unsigned long addr,unsigned long end,pte_fn_t fn,void * data,bool create,pgtbl_mod_mask * mask)2925 static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
2926 				     unsigned long addr, unsigned long end,
2927 				     pte_fn_t fn, void *data, bool create,
2928 				     pgtbl_mod_mask *mask)
2929 {
2930 	pmd_t *pmd;
2931 	unsigned long next;
2932 	int err = 0;
2933 
2934 	BUG_ON(pud_leaf(*pud));
2935 
2936 	if (create) {
2937 		pmd = pmd_alloc_track(mm, pud, addr, mask);
2938 		if (!pmd)
2939 			return -ENOMEM;
2940 	} else {
2941 		pmd = pmd_offset(pud, addr);
2942 	}
2943 	do {
2944 		next = pmd_addr_end(addr, end);
2945 		if (pmd_none(*pmd) && !create)
2946 			continue;
2947 		if (WARN_ON_ONCE(pmd_leaf(*pmd)))
2948 			return -EINVAL;
2949 		if (!pmd_none(*pmd) && WARN_ON_ONCE(pmd_bad(*pmd))) {
2950 			if (!create)
2951 				continue;
2952 			pmd_clear_bad(pmd);
2953 		}
2954 		err = apply_to_pte_range(mm, pmd, addr, next,
2955 					 fn, data, create, mask);
2956 		if (err)
2957 			break;
2958 	} while (pmd++, addr = next, addr != end);
2959 
2960 	return err;
2961 }
2962 
apply_to_pud_range(struct mm_struct * mm,p4d_t * p4d,unsigned long addr,unsigned long end,pte_fn_t fn,void * data,bool create,pgtbl_mod_mask * mask)2963 static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
2964 				     unsigned long addr, unsigned long end,
2965 				     pte_fn_t fn, void *data, bool create,
2966 				     pgtbl_mod_mask *mask)
2967 {
2968 	pud_t *pud;
2969 	unsigned long next;
2970 	int err = 0;
2971 
2972 	if (create) {
2973 		pud = pud_alloc_track(mm, p4d, addr, mask);
2974 		if (!pud)
2975 			return -ENOMEM;
2976 	} else {
2977 		pud = pud_offset(p4d, addr);
2978 	}
2979 	do {
2980 		next = pud_addr_end(addr, end);
2981 		if (pud_none(*pud) && !create)
2982 			continue;
2983 		if (WARN_ON_ONCE(pud_leaf(*pud)))
2984 			return -EINVAL;
2985 		if (!pud_none(*pud) && WARN_ON_ONCE(pud_bad(*pud))) {
2986 			if (!create)
2987 				continue;
2988 			pud_clear_bad(pud);
2989 		}
2990 		err = apply_to_pmd_range(mm, pud, addr, next,
2991 					 fn, data, create, mask);
2992 		if (err)
2993 			break;
2994 	} while (pud++, addr = next, addr != end);
2995 
2996 	return err;
2997 }
2998 
apply_to_p4d_range(struct mm_struct * mm,pgd_t * pgd,unsigned long addr,unsigned long end,pte_fn_t fn,void * data,bool create,pgtbl_mod_mask * mask)2999 static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
3000 				     unsigned long addr, unsigned long end,
3001 				     pte_fn_t fn, void *data, bool create,
3002 				     pgtbl_mod_mask *mask)
3003 {
3004 	p4d_t *p4d;
3005 	unsigned long next;
3006 	int err = 0;
3007 
3008 	if (create) {
3009 		p4d = p4d_alloc_track(mm, pgd, addr, mask);
3010 		if (!p4d)
3011 			return -ENOMEM;
3012 	} else {
3013 		p4d = p4d_offset(pgd, addr);
3014 	}
3015 	do {
3016 		next = p4d_addr_end(addr, end);
3017 		if (p4d_none(*p4d) && !create)
3018 			continue;
3019 		if (WARN_ON_ONCE(p4d_leaf(*p4d)))
3020 			return -EINVAL;
3021 		if (!p4d_none(*p4d) && WARN_ON_ONCE(p4d_bad(*p4d))) {
3022 			if (!create)
3023 				continue;
3024 			p4d_clear_bad(p4d);
3025 		}
3026 		err = apply_to_pud_range(mm, p4d, addr, next,
3027 					 fn, data, create, mask);
3028 		if (err)
3029 			break;
3030 	} while (p4d++, addr = next, addr != end);
3031 
3032 	return err;
3033 }
3034 
__apply_to_page_range(struct mm_struct * mm,unsigned long addr,unsigned long size,pte_fn_t fn,void * data,bool create)3035 static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
3036 				 unsigned long size, pte_fn_t fn,
3037 				 void *data, bool create)
3038 {
3039 	pgd_t *pgd;
3040 	unsigned long start = addr, next;
3041 	unsigned long end = addr + size;
3042 	pgtbl_mod_mask mask = 0;
3043 	int err = 0;
3044 
3045 	if (WARN_ON(addr >= end))
3046 		return -EINVAL;
3047 
3048 	pgd = pgd_offset(mm, addr);
3049 	do {
3050 		next = pgd_addr_end(addr, end);
3051 		if (pgd_none(*pgd) && !create)
3052 			continue;
3053 		if (WARN_ON_ONCE(pgd_leaf(*pgd))) {
3054 			err = -EINVAL;
3055 			break;
3056 		}
3057 		if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) {
3058 			if (!create)
3059 				continue;
3060 			pgd_clear_bad(pgd);
3061 		}
3062 		err = apply_to_p4d_range(mm, pgd, addr, next,
3063 					 fn, data, create, &mask);
3064 		if (err)
3065 			break;
3066 	} while (pgd++, addr = next, addr != end);
3067 
3068 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
3069 		arch_sync_kernel_mappings(start, start + size);
3070 
3071 	return err;
3072 }
3073 
3074 /*
3075  * Scan a region of virtual memory, filling in page tables as necessary
3076  * and calling a provided function on each leaf page table.
3077  */
apply_to_page_range(struct mm_struct * mm,unsigned long addr,unsigned long size,pte_fn_t fn,void * data)3078 int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
3079 			unsigned long size, pte_fn_t fn, void *data)
3080 {
3081 	return __apply_to_page_range(mm, addr, size, fn, data, true);
3082 }
3083 EXPORT_SYMBOL_GPL(apply_to_page_range);
3084 
3085 /*
3086  * Scan a region of virtual memory, calling a provided function on
3087  * each leaf page table where it exists.
3088  *
3089  * Unlike apply_to_page_range, this does _not_ fill in page tables
3090  * where they are absent.
3091  */
apply_to_existing_page_range(struct mm_struct * mm,unsigned long addr,unsigned long size,pte_fn_t fn,void * data)3092 int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
3093 				 unsigned long size, pte_fn_t fn, void *data)
3094 {
3095 	return __apply_to_page_range(mm, addr, size, fn, data, false);
3096 }
3097 
3098 /*
3099  * handle_pte_fault chooses page fault handler according to an entry which was
3100  * read non-atomically.  Before making any commitment, on those architectures
3101  * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
3102  * parts, do_swap_page must check under lock before unmapping the pte and
3103  * proceeding (but do_wp_page is only called after already making such a check;
3104  * and do_anonymous_page can safely check later on).
3105  */
pte_unmap_same(struct vm_fault * vmf)3106 static inline int pte_unmap_same(struct vm_fault *vmf)
3107 {
3108 	int same = 1;
3109 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION)
3110 	if (sizeof(pte_t) > sizeof(unsigned long)) {
3111 		spin_lock(vmf->ptl);
3112 		same = pte_same(ptep_get(vmf->pte), vmf->orig_pte);
3113 		spin_unlock(vmf->ptl);
3114 	}
3115 #endif
3116 	pte_unmap(vmf->pte);
3117 	vmf->pte = NULL;
3118 	return same;
3119 }
3120 
3121 /*
3122  * Return:
3123  *	0:		copied succeeded
3124  *	-EHWPOISON:	copy failed due to hwpoison in source page
3125  *	-EAGAIN:	copied failed (some other reason)
3126  */
__wp_page_copy_user(struct page * dst,struct page * src,struct vm_fault * vmf)3127 static inline int __wp_page_copy_user(struct page *dst, struct page *src,
3128 				      struct vm_fault *vmf)
3129 {
3130 	int ret;
3131 	void *kaddr;
3132 	void __user *uaddr;
3133 	struct vm_area_struct *vma = vmf->vma;
3134 	struct mm_struct *mm = vma->vm_mm;
3135 	unsigned long addr = vmf->address;
3136 
3137 	if (likely(src)) {
3138 		if (copy_mc_user_highpage(dst, src, addr, vma))
3139 			return -EHWPOISON;
3140 		return 0;
3141 	}
3142 
3143 	/*
3144 	 * If the source page was a PFN mapping, we don't have
3145 	 * a "struct page" for it. We do a best-effort copy by
3146 	 * just copying from the original user address. If that
3147 	 * fails, we just zero-fill it. Live with it.
3148 	 */
3149 	kaddr = kmap_local_page(dst);
3150 	pagefault_disable();
3151 	uaddr = (void __user *)(addr & PAGE_MASK);
3152 
3153 	/*
3154 	 * On architectures with software "accessed" bits, we would
3155 	 * take a double page fault, so mark it accessed here.
3156 	 */
3157 	vmf->pte = NULL;
3158 	if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) {
3159 		pte_t entry;
3160 
3161 		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
3162 		if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
3163 			/*
3164 			 * Other thread has already handled the fault
3165 			 * and update local tlb only
3166 			 */
3167 			if (vmf->pte)
3168 				update_mmu_tlb(vma, addr, vmf->pte);
3169 			ret = -EAGAIN;
3170 			goto pte_unlock;
3171 		}
3172 
3173 		entry = pte_mkyoung(vmf->orig_pte);
3174 		if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
3175 			update_mmu_cache_range(vmf, vma, addr, vmf->pte, 1);
3176 	}
3177 
3178 	/*
3179 	 * This really shouldn't fail, because the page is there
3180 	 * in the page tables. But it might just be unreadable,
3181 	 * in which case we just give up and fill the result with
3182 	 * zeroes.
3183 	 */
3184 	if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
3185 		if (vmf->pte)
3186 			goto warn;
3187 
3188 		/* Re-validate under PTL if the page is still mapped */
3189 		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
3190 		if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
3191 			/* The PTE changed under us, update local tlb */
3192 			if (vmf->pte)
3193 				update_mmu_tlb(vma, addr, vmf->pte);
3194 			ret = -EAGAIN;
3195 			goto pte_unlock;
3196 		}
3197 
3198 		/*
3199 		 * The same page can be mapped back since last copy attempt.
3200 		 * Try to copy again under PTL.
3201 		 */
3202 		if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
3203 			/*
3204 			 * Give a warn in case there can be some obscure
3205 			 * use-case
3206 			 */
3207 warn:
3208 			WARN_ON_ONCE(1);
3209 			clear_page(kaddr);
3210 		}
3211 	}
3212 
3213 	ret = 0;
3214 
3215 pte_unlock:
3216 	if (vmf->pte)
3217 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3218 	pagefault_enable();
3219 	kunmap_local(kaddr);
3220 	flush_dcache_page(dst);
3221 
3222 	return ret;
3223 }
3224 
__get_fault_gfp_mask(struct vm_area_struct * vma)3225 static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
3226 {
3227 	struct file *vm_file = vma->vm_file;
3228 
3229 	if (vm_file)
3230 		return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
3231 
3232 	/*
3233 	 * Special mappings (e.g. VDSO) do not have any file so fake
3234 	 * a default GFP_KERNEL for them.
3235 	 */
3236 	return GFP_KERNEL;
3237 }
3238 
3239 /*
3240  * Notify the address space that the page is about to become writable so that
3241  * it can prohibit this or wait for the page to get into an appropriate state.
3242  *
3243  * We do this without the lock held, so that it can sleep if it needs to.
3244  */
do_page_mkwrite(struct vm_fault * vmf,struct folio * folio)3245 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf, struct folio *folio)
3246 {
3247 	vm_fault_t ret;
3248 	unsigned int old_flags = vmf->flags;
3249 
3250 	vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
3251 
3252 	if (vmf->vma->vm_file &&
3253 	    IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
3254 		return VM_FAULT_SIGBUS;
3255 
3256 	ret = vmf->vma->vm_ops->page_mkwrite(vmf);
3257 	/* Restore original flags so that caller is not surprised */
3258 	vmf->flags = old_flags;
3259 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
3260 		return ret;
3261 	if (unlikely(!(ret & VM_FAULT_LOCKED))) {
3262 		folio_lock(folio);
3263 		if (!folio->mapping) {
3264 			folio_unlock(folio);
3265 			return 0; /* retry */
3266 		}
3267 		ret |= VM_FAULT_LOCKED;
3268 	} else
3269 		VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
3270 	return ret;
3271 }
3272 
3273 /*
3274  * Handle dirtying of a page in shared file mapping on a write fault.
3275  *
3276  * The function expects the page to be locked and unlocks it.
3277  */
fault_dirty_shared_page(struct vm_fault * vmf)3278 static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
3279 {
3280 	struct vm_area_struct *vma = vmf->vma;
3281 	struct address_space *mapping;
3282 	struct folio *folio = page_folio(vmf->page);
3283 	bool dirtied;
3284 	bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
3285 
3286 	dirtied = folio_mark_dirty(folio);
3287 	VM_BUG_ON_FOLIO(folio_test_anon(folio), folio);
3288 	/*
3289 	 * Take a local copy of the address_space - folio.mapping may be zeroed
3290 	 * by truncate after folio_unlock().   The address_space itself remains
3291 	 * pinned by vma->vm_file's reference.  We rely on folio_unlock()'s
3292 	 * release semantics to prevent the compiler from undoing this copying.
3293 	 */
3294 	mapping = folio_raw_mapping(folio);
3295 	folio_unlock(folio);
3296 
3297 	if (!page_mkwrite)
3298 		file_update_time(vma->vm_file);
3299 
3300 	/*
3301 	 * Throttle page dirtying rate down to writeback speed.
3302 	 *
3303 	 * mapping may be NULL here because some device drivers do not
3304 	 * set page.mapping but still dirty their pages
3305 	 *
3306 	 * Drop the mmap_lock before waiting on IO, if we can. The file
3307 	 * is pinning the mapping, as per above.
3308 	 */
3309 	if ((dirtied || page_mkwrite) && mapping) {
3310 		struct file *fpin;
3311 
3312 		fpin = maybe_unlock_mmap_for_io(vmf, NULL);
3313 		balance_dirty_pages_ratelimited(mapping);
3314 		if (fpin) {
3315 			fput(fpin);
3316 			return VM_FAULT_COMPLETED;
3317 		}
3318 	}
3319 
3320 	return 0;
3321 }
3322 
3323 /*
3324  * Handle write page faults for pages that can be reused in the current vma
3325  *
3326  * This can happen either due to the mapping being with the VM_SHARED flag,
3327  * or due to us being the last reference standing to the page. In either
3328  * case, all we need to do here is to mark the page as writable and update
3329  * any related book-keeping.
3330  */
wp_page_reuse(struct vm_fault * vmf,struct folio * folio)3331 static inline void wp_page_reuse(struct vm_fault *vmf, struct folio *folio)
3332 	__releases(vmf->ptl)
3333 {
3334 	struct vm_area_struct *vma = vmf->vma;
3335 	pte_t entry;
3336 
3337 	VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE));
3338 	VM_WARN_ON(is_zero_pfn(pte_pfn(vmf->orig_pte)));
3339 
3340 	if (folio) {
3341 		VM_BUG_ON(folio_test_anon(folio) &&
3342 			  !PageAnonExclusive(vmf->page));
3343 		/*
3344 		 * Clear the folio's cpupid information as the existing
3345 		 * information potentially belongs to a now completely
3346 		 * unrelated process.
3347 		 */
3348 		folio_xchg_last_cpupid(folio, (1 << LAST_CPUPID_SHIFT) - 1);
3349 	}
3350 
3351 	flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3352 	entry = pte_mkyoung(vmf->orig_pte);
3353 	entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3354 	if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
3355 		update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
3356 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3357 	count_vm_event(PGREUSE);
3358 }
3359 
3360 /*
3361  * We could add a bitflag somewhere, but for now, we know that all
3362  * vm_ops that have a ->map_pages have been audited and don't need
3363  * the mmap_lock to be held.
3364  */
vmf_can_call_fault(const struct vm_fault * vmf)3365 static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf)
3366 {
3367 	struct vm_area_struct *vma = vmf->vma;
3368 
3369 	if (vma->vm_ops->map_pages || !(vmf->flags & FAULT_FLAG_VMA_LOCK))
3370 		return 0;
3371 	vma_end_read(vma);
3372 	return VM_FAULT_RETRY;
3373 }
3374 
3375 /**
3376  * __vmf_anon_prepare - Prepare to handle an anonymous fault.
3377  * @vmf: The vm_fault descriptor passed from the fault handler.
3378  *
3379  * When preparing to insert an anonymous page into a VMA from a
3380  * fault handler, call this function rather than anon_vma_prepare().
3381  * If this vma does not already have an associated anon_vma and we are
3382  * only protected by the per-VMA lock, the caller must retry with the
3383  * mmap_lock held.  __anon_vma_prepare() will look at adjacent VMAs to
3384  * determine if this VMA can share its anon_vma, and that's not safe to
3385  * do with only the per-VMA lock held for this VMA.
3386  *
3387  * Return: 0 if fault handling can proceed.  Any other value should be
3388  * returned to the caller.
3389  */
__vmf_anon_prepare(struct vm_fault * vmf)3390 vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf)
3391 {
3392 	struct vm_area_struct *vma = vmf->vma;
3393 	vm_fault_t ret = 0;
3394 
3395 	if (likely(vma->anon_vma))
3396 		return 0;
3397 	if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
3398 		if (!mmap_read_trylock(vma->vm_mm))
3399 			return VM_FAULT_RETRY;
3400 	}
3401 	if (__anon_vma_prepare(vma))
3402 		ret = VM_FAULT_OOM;
3403 	if (vmf->flags & FAULT_FLAG_VMA_LOCK)
3404 		mmap_read_unlock(vma->vm_mm);
3405 	return ret;
3406 }
3407 
3408 /*
3409  * Handle the case of a page which we actually need to copy to a new page,
3410  * either due to COW or unsharing.
3411  *
3412  * Called with mmap_lock locked and the old page referenced, but
3413  * without the ptl held.
3414  *
3415  * High level logic flow:
3416  *
3417  * - Allocate a page, copy the content of the old page to the new one.
3418  * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
3419  * - Take the PTL. If the pte changed, bail out and release the allocated page
3420  * - If the pte is still the way we remember it, update the page table and all
3421  *   relevant references. This includes dropping the reference the page-table
3422  *   held to the old page, as well as updating the rmap.
3423  * - In any case, unlock the PTL and drop the reference we took to the old page.
3424  */
wp_page_copy(struct vm_fault * vmf)3425 static vm_fault_t wp_page_copy(struct vm_fault *vmf)
3426 {
3427 	const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
3428 	struct vm_area_struct *vma = vmf->vma;
3429 	struct mm_struct *mm = vma->vm_mm;
3430 	struct folio *old_folio = NULL;
3431 	struct folio *new_folio = NULL;
3432 	pte_t entry;
3433 	int page_copied = 0;
3434 	struct mmu_notifier_range range;
3435 	vm_fault_t ret;
3436 	bool pfn_is_zero;
3437 
3438 	delayacct_wpcopy_start();
3439 
3440 	if (vmf->page)
3441 		old_folio = page_folio(vmf->page);
3442 	ret = vmf_anon_prepare(vmf);
3443 	if (unlikely(ret))
3444 		goto out;
3445 
3446 	pfn_is_zero = is_zero_pfn(pte_pfn(vmf->orig_pte));
3447 	new_folio = folio_prealloc(mm, vma, vmf->address, pfn_is_zero);
3448 	if (!new_folio)
3449 		goto oom;
3450 
3451 	if (!pfn_is_zero) {
3452 		int err;
3453 
3454 		err = __wp_page_copy_user(&new_folio->page, vmf->page, vmf);
3455 		if (err) {
3456 			/*
3457 			 * COW failed, if the fault was solved by other,
3458 			 * it's fine. If not, userspace would re-fault on
3459 			 * the same address and we will handle the fault
3460 			 * from the second attempt.
3461 			 * The -EHWPOISON case will not be retried.
3462 			 */
3463 			folio_put(new_folio);
3464 			if (old_folio)
3465 				folio_put(old_folio);
3466 
3467 			delayacct_wpcopy_end();
3468 			return err == -EHWPOISON ? VM_FAULT_HWPOISON : 0;
3469 		}
3470 		kmsan_copy_page_meta(&new_folio->page, vmf->page);
3471 	}
3472 
3473 	__folio_mark_uptodate(new_folio);
3474 
3475 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
3476 				vmf->address & PAGE_MASK,
3477 				(vmf->address & PAGE_MASK) + PAGE_SIZE);
3478 	mmu_notifier_invalidate_range_start(&range);
3479 
3480 	/*
3481 	 * Re-check the pte - we dropped the lock
3482 	 */
3483 	vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
3484 	if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
3485 		if (old_folio) {
3486 			if (!folio_test_anon(old_folio)) {
3487 				dec_mm_counter(mm, mm_counter_file(old_folio));
3488 				inc_mm_counter(mm, MM_ANONPAGES);
3489 			}
3490 		} else {
3491 			ksm_might_unmap_zero_page(mm, vmf->orig_pte);
3492 			inc_mm_counter(mm, MM_ANONPAGES);
3493 		}
3494 		flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3495 		entry = mk_pte(&new_folio->page, vma->vm_page_prot);
3496 		entry = pte_sw_mkyoung(entry);
3497 		if (unlikely(unshare)) {
3498 			if (pte_soft_dirty(vmf->orig_pte))
3499 				entry = pte_mksoft_dirty(entry);
3500 			if (pte_uffd_wp(vmf->orig_pte))
3501 				entry = pte_mkuffd_wp(entry);
3502 		} else {
3503 			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3504 		}
3505 
3506 		/*
3507 		 * Clear the pte entry and flush it first, before updating the
3508 		 * pte with the new entry, to keep TLBs on different CPUs in
3509 		 * sync. This code used to set the new PTE then flush TLBs, but
3510 		 * that left a window where the new PTE could be loaded into
3511 		 * some TLBs while the old PTE remains in others.
3512 		 */
3513 		ptep_clear_flush(vma, vmf->address, vmf->pte);
3514 		folio_add_new_anon_rmap(new_folio, vma, vmf->address, RMAP_EXCLUSIVE);
3515 		folio_add_lru_vma(new_folio, vma);
3516 		BUG_ON(unshare && pte_write(entry));
3517 		set_pte_at(mm, vmf->address, vmf->pte, entry);
3518 		update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
3519 		if (old_folio) {
3520 			/*
3521 			 * Only after switching the pte to the new page may
3522 			 * we remove the mapcount here. Otherwise another
3523 			 * process may come and find the rmap count decremented
3524 			 * before the pte is switched to the new page, and
3525 			 * "reuse" the old page writing into it while our pte
3526 			 * here still points into it and can be read by other
3527 			 * threads.
3528 			 *
3529 			 * The critical issue is to order this
3530 			 * folio_remove_rmap_pte() with the ptp_clear_flush
3531 			 * above. Those stores are ordered by (if nothing else,)
3532 			 * the barrier present in the atomic_add_negative
3533 			 * in folio_remove_rmap_pte();
3534 			 *
3535 			 * Then the TLB flush in ptep_clear_flush ensures that
3536 			 * no process can access the old page before the
3537 			 * decremented mapcount is visible. And the old page
3538 			 * cannot be reused until after the decremented
3539 			 * mapcount is visible. So transitively, TLBs to
3540 			 * old page will be flushed before it can be reused.
3541 			 */
3542 			folio_remove_rmap_pte(old_folio, vmf->page, vma);
3543 		}
3544 
3545 		/* Free the old page.. */
3546 		new_folio = old_folio;
3547 		page_copied = 1;
3548 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3549 	} else if (vmf->pte) {
3550 		update_mmu_tlb(vma, vmf->address, vmf->pte);
3551 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3552 	}
3553 
3554 	mmu_notifier_invalidate_range_end(&range);
3555 
3556 	if (new_folio)
3557 		folio_put(new_folio);
3558 	if (old_folio) {
3559 		if (page_copied)
3560 			free_swap_cache(old_folio);
3561 		folio_put(old_folio);
3562 	}
3563 
3564 	delayacct_wpcopy_end();
3565 	return 0;
3566 oom:
3567 	ret = VM_FAULT_OOM;
3568 out:
3569 	if (old_folio)
3570 		folio_put(old_folio);
3571 
3572 	delayacct_wpcopy_end();
3573 	return ret;
3574 }
3575 
3576 /**
3577  * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
3578  *			  writeable once the page is prepared
3579  *
3580  * @vmf: structure describing the fault
3581  * @folio: the folio of vmf->page
3582  *
3583  * This function handles all that is needed to finish a write page fault in a
3584  * shared mapping due to PTE being read-only once the mapped page is prepared.
3585  * It handles locking of PTE and modifying it.
3586  *
3587  * The function expects the page to be locked or other protection against
3588  * concurrent faults / writeback (such as DAX radix tree locks).
3589  *
3590  * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before
3591  * we acquired PTE lock.
3592  */
finish_mkwrite_fault(struct vm_fault * vmf,struct folio * folio)3593 static vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf, struct folio *folio)
3594 {
3595 	WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
3596 	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
3597 				       &vmf->ptl);
3598 	if (!vmf->pte)
3599 		return VM_FAULT_NOPAGE;
3600 	/*
3601 	 * We might have raced with another page fault while we released the
3602 	 * pte_offset_map_lock.
3603 	 */
3604 	if (!pte_same(ptep_get(vmf->pte), vmf->orig_pte)) {
3605 		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
3606 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3607 		return VM_FAULT_NOPAGE;
3608 	}
3609 	wp_page_reuse(vmf, folio);
3610 	return 0;
3611 }
3612 
3613 /*
3614  * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
3615  * mapping
3616  */
wp_pfn_shared(struct vm_fault * vmf)3617 static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
3618 {
3619 	struct vm_area_struct *vma = vmf->vma;
3620 
3621 	if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
3622 		vm_fault_t ret;
3623 
3624 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3625 		ret = vmf_can_call_fault(vmf);
3626 		if (ret)
3627 			return ret;
3628 
3629 		vmf->flags |= FAULT_FLAG_MKWRITE;
3630 		ret = vma->vm_ops->pfn_mkwrite(vmf);
3631 		if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
3632 			return ret;
3633 		return finish_mkwrite_fault(vmf, NULL);
3634 	}
3635 	wp_page_reuse(vmf, NULL);
3636 	return 0;
3637 }
3638 
wp_page_shared(struct vm_fault * vmf,struct folio * folio)3639 static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio)
3640 	__releases(vmf->ptl)
3641 {
3642 	struct vm_area_struct *vma = vmf->vma;
3643 	vm_fault_t ret = 0;
3644 
3645 	folio_get(folio);
3646 
3647 	if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
3648 		vm_fault_t tmp;
3649 
3650 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3651 		tmp = vmf_can_call_fault(vmf);
3652 		if (tmp) {
3653 			folio_put(folio);
3654 			return tmp;
3655 		}
3656 
3657 		tmp = do_page_mkwrite(vmf, folio);
3658 		if (unlikely(!tmp || (tmp &
3659 				      (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
3660 			folio_put(folio);
3661 			return tmp;
3662 		}
3663 		tmp = finish_mkwrite_fault(vmf, folio);
3664 		if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
3665 			folio_unlock(folio);
3666 			folio_put(folio);
3667 			return tmp;
3668 		}
3669 	} else {
3670 		wp_page_reuse(vmf, folio);
3671 		folio_lock(folio);
3672 	}
3673 	ret |= fault_dirty_shared_page(vmf);
3674 	folio_put(folio);
3675 
3676 	return ret;
3677 }
3678 
wp_can_reuse_anon_folio(struct folio * folio,struct vm_area_struct * vma)3679 static bool wp_can_reuse_anon_folio(struct folio *folio,
3680 				    struct vm_area_struct *vma)
3681 {
3682 	/*
3683 	 * We could currently only reuse a subpage of a large folio if no
3684 	 * other subpages of the large folios are still mapped. However,
3685 	 * let's just consistently not reuse subpages even if we could
3686 	 * reuse in that scenario, and give back a large folio a bit
3687 	 * sooner.
3688 	 */
3689 	if (folio_test_large(folio))
3690 		return false;
3691 
3692 	/*
3693 	 * We have to verify under folio lock: these early checks are
3694 	 * just an optimization to avoid locking the folio and freeing
3695 	 * the swapcache if there is little hope that we can reuse.
3696 	 *
3697 	 * KSM doesn't necessarily raise the folio refcount.
3698 	 */
3699 	if (folio_test_ksm(folio) || folio_ref_count(folio) > 3)
3700 		return false;
3701 	if (!folio_test_lru(folio))
3702 		/*
3703 		 * We cannot easily detect+handle references from
3704 		 * remote LRU caches or references to LRU folios.
3705 		 */
3706 		lru_add_drain();
3707 	if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio))
3708 		return false;
3709 	if (!folio_trylock(folio))
3710 		return false;
3711 	if (folio_test_swapcache(folio))
3712 		folio_free_swap(folio);
3713 	if (folio_test_ksm(folio) || folio_ref_count(folio) != 1) {
3714 		folio_unlock(folio);
3715 		return false;
3716 	}
3717 	/*
3718 	 * Ok, we've got the only folio reference from our mapping
3719 	 * and the folio is locked, it's dark out, and we're wearing
3720 	 * sunglasses. Hit it.
3721 	 */
3722 	folio_move_anon_rmap(folio, vma);
3723 	folio_unlock(folio);
3724 	return true;
3725 }
3726 
3727 /*
3728  * This routine handles present pages, when
3729  * * users try to write to a shared page (FAULT_FLAG_WRITE)
3730  * * GUP wants to take a R/O pin on a possibly shared anonymous page
3731  *   (FAULT_FLAG_UNSHARE)
3732  *
3733  * It is done by copying the page to a new address and decrementing the
3734  * shared-page counter for the old page.
3735  *
3736  * Note that this routine assumes that the protection checks have been
3737  * done by the caller (the low-level page fault routine in most cases).
3738  * Thus, with FAULT_FLAG_WRITE, we can safely just mark it writable once we've
3739  * done any necessary COW.
3740  *
3741  * In case of FAULT_FLAG_WRITE, we also mark the page dirty at this point even
3742  * though the page will change only once the write actually happens. This
3743  * avoids a few races, and potentially makes it more efficient.
3744  *
3745  * We enter with non-exclusive mmap_lock (to exclude vma changes,
3746  * but allow concurrent faults), with pte both mapped and locked.
3747  * We return with mmap_lock still held, but pte unmapped and unlocked.
3748  */
do_wp_page(struct vm_fault * vmf)3749 static vm_fault_t do_wp_page(struct vm_fault *vmf)
3750 	__releases(vmf->ptl)
3751 {
3752 	const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
3753 	struct vm_area_struct *vma = vmf->vma;
3754 	struct folio *folio = NULL;
3755 	pte_t pte;
3756 
3757 	if (likely(!unshare)) {
3758 		if (userfaultfd_pte_wp(vma, ptep_get(vmf->pte))) {
3759 			if (!userfaultfd_wp_async(vma)) {
3760 				pte_unmap_unlock(vmf->pte, vmf->ptl);
3761 				return handle_userfault(vmf, VM_UFFD_WP);
3762 			}
3763 
3764 			/*
3765 			 * Nothing needed (cache flush, TLB invalidations,
3766 			 * etc.) because we're only removing the uffd-wp bit,
3767 			 * which is completely invisible to the user.
3768 			 */
3769 			pte = pte_clear_uffd_wp(ptep_get(vmf->pte));
3770 
3771 			set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
3772 			/*
3773 			 * Update this to be prepared for following up CoW
3774 			 * handling
3775 			 */
3776 			vmf->orig_pte = pte;
3777 		}
3778 
3779 		/*
3780 		 * Userfaultfd write-protect can defer flushes. Ensure the TLB
3781 		 * is flushed in this case before copying.
3782 		 */
3783 		if (unlikely(userfaultfd_wp(vmf->vma) &&
3784 			     mm_tlb_flush_pending(vmf->vma->vm_mm)))
3785 			flush_tlb_page(vmf->vma, vmf->address);
3786 	}
3787 
3788 	vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
3789 
3790 	if (vmf->page)
3791 		folio = page_folio(vmf->page);
3792 
3793 	/*
3794 	 * Shared mapping: we are guaranteed to have VM_WRITE and
3795 	 * FAULT_FLAG_WRITE set at this point.
3796 	 */
3797 	if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
3798 		/*
3799 		 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
3800 		 * VM_PFNMAP VMA.
3801 		 *
3802 		 * We should not cow pages in a shared writeable mapping.
3803 		 * Just mark the pages writable and/or call ops->pfn_mkwrite.
3804 		 */
3805 		if (!vmf->page)
3806 			return wp_pfn_shared(vmf);
3807 		return wp_page_shared(vmf, folio);
3808 	}
3809 
3810 	/*
3811 	 * Private mapping: create an exclusive anonymous page copy if reuse
3812 	 * is impossible. We might miss VM_WRITE for FOLL_FORCE handling.
3813 	 *
3814 	 * If we encounter a page that is marked exclusive, we must reuse
3815 	 * the page without further checks.
3816 	 */
3817 	if (folio && folio_test_anon(folio) &&
3818 	    (PageAnonExclusive(vmf->page) || wp_can_reuse_anon_folio(folio, vma))) {
3819 		if (!PageAnonExclusive(vmf->page))
3820 			SetPageAnonExclusive(vmf->page);
3821 		if (unlikely(unshare)) {
3822 			pte_unmap_unlock(vmf->pte, vmf->ptl);
3823 			return 0;
3824 		}
3825 		wp_page_reuse(vmf, folio);
3826 		return 0;
3827 	}
3828 	/*
3829 	 * Ok, we need to copy. Oh, well..
3830 	 */
3831 	if (folio)
3832 		folio_get(folio);
3833 
3834 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3835 #ifdef CONFIG_KSM
3836 	if (folio && folio_test_ksm(folio))
3837 		count_vm_event(COW_KSM);
3838 #endif
3839 	return wp_page_copy(vmf);
3840 }
3841 
unmap_mapping_range_vma(struct vm_area_struct * vma,unsigned long start_addr,unsigned long end_addr,struct zap_details * details)3842 static void unmap_mapping_range_vma(struct vm_area_struct *vma,
3843 		unsigned long start_addr, unsigned long end_addr,
3844 		struct zap_details *details)
3845 {
3846 	zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
3847 }
3848 
unmap_mapping_range_tree(struct rb_root_cached * root,pgoff_t first_index,pgoff_t last_index,struct zap_details * details)3849 static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
3850 					    pgoff_t first_index,
3851 					    pgoff_t last_index,
3852 					    struct zap_details *details)
3853 {
3854 	struct vm_area_struct *vma;
3855 	pgoff_t vba, vea, zba, zea;
3856 
3857 	vma_interval_tree_foreach(vma, root, first_index, last_index) {
3858 		vba = vma->vm_pgoff;
3859 		vea = vba + vma_pages(vma) - 1;
3860 		zba = max(first_index, vba);
3861 		zea = min(last_index, vea);
3862 
3863 		unmap_mapping_range_vma(vma,
3864 			((zba - vba) << PAGE_SHIFT) + vma->vm_start,
3865 			((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
3866 				details);
3867 	}
3868 }
3869 
3870 /**
3871  * unmap_mapping_folio() - Unmap single folio from processes.
3872  * @folio: The locked folio to be unmapped.
3873  *
3874  * Unmap this folio from any userspace process which still has it mmaped.
3875  * Typically, for efficiency, the range of nearby pages has already been
3876  * unmapped by unmap_mapping_pages() or unmap_mapping_range().  But once
3877  * truncation or invalidation holds the lock on a folio, it may find that
3878  * the page has been remapped again: and then uses unmap_mapping_folio()
3879  * to unmap it finally.
3880  */
unmap_mapping_folio(struct folio * folio)3881 void unmap_mapping_folio(struct folio *folio)
3882 {
3883 	struct address_space *mapping = folio->mapping;
3884 	struct zap_details details = { };
3885 	pgoff_t	first_index;
3886 	pgoff_t	last_index;
3887 
3888 	VM_BUG_ON(!folio_test_locked(folio));
3889 
3890 	first_index = folio->index;
3891 	last_index = folio_next_index(folio) - 1;
3892 
3893 	details.even_cows = false;
3894 	details.single_folio = folio;
3895 	details.zap_flags = ZAP_FLAG_DROP_MARKER;
3896 
3897 	i_mmap_lock_read(mapping);
3898 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3899 		unmap_mapping_range_tree(&mapping->i_mmap, first_index,
3900 					 last_index, &details);
3901 	i_mmap_unlock_read(mapping);
3902 }
3903 
3904 /**
3905  * unmap_mapping_pages() - Unmap pages from processes.
3906  * @mapping: The address space containing pages to be unmapped.
3907  * @start: Index of first page to be unmapped.
3908  * @nr: Number of pages to be unmapped.  0 to unmap to end of file.
3909  * @even_cows: Whether to unmap even private COWed pages.
3910  *
3911  * Unmap the pages in this address space from any userspace process which
3912  * has them mmaped.  Generally, you want to remove COWed pages as well when
3913  * a file is being truncated, but not when invalidating pages from the page
3914  * cache.
3915  */
unmap_mapping_pages(struct address_space * mapping,pgoff_t start,pgoff_t nr,bool even_cows)3916 void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
3917 		pgoff_t nr, bool even_cows)
3918 {
3919 	struct zap_details details = { };
3920 	pgoff_t	first_index = start;
3921 	pgoff_t	last_index = start + nr - 1;
3922 
3923 	details.even_cows = even_cows;
3924 	if (last_index < first_index)
3925 		last_index = ULONG_MAX;
3926 
3927 	i_mmap_lock_read(mapping);
3928 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3929 		unmap_mapping_range_tree(&mapping->i_mmap, first_index,
3930 					 last_index, &details);
3931 	i_mmap_unlock_read(mapping);
3932 }
3933 EXPORT_SYMBOL_GPL(unmap_mapping_pages);
3934 
3935 /**
3936  * unmap_mapping_range - unmap the portion of all mmaps in the specified
3937  * address_space corresponding to the specified byte range in the underlying
3938  * file.
3939  *
3940  * @mapping: the address space containing mmaps to be unmapped.
3941  * @holebegin: byte in first page to unmap, relative to the start of
3942  * the underlying file.  This will be rounded down to a PAGE_SIZE
3943  * boundary.  Note that this is different from truncate_pagecache(), which
3944  * must keep the partial page.  In contrast, we must get rid of
3945  * partial pages.
3946  * @holelen: size of prospective hole in bytes.  This will be rounded
3947  * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
3948  * end of the file.
3949  * @even_cows: 1 when truncating a file, unmap even private COWed pages;
3950  * but 0 when invalidating pagecache, don't throw away private data.
3951  */
unmap_mapping_range(struct address_space * mapping,loff_t const holebegin,loff_t const holelen,int even_cows)3952 void unmap_mapping_range(struct address_space *mapping,
3953 		loff_t const holebegin, loff_t const holelen, int even_cows)
3954 {
3955 	pgoff_t hba = (pgoff_t)(holebegin) >> PAGE_SHIFT;
3956 	pgoff_t hlen = ((pgoff_t)(holelen) + PAGE_SIZE - 1) >> PAGE_SHIFT;
3957 
3958 	/* Check for overflow. */
3959 	if (sizeof(holelen) > sizeof(hlen)) {
3960 		long long holeend =
3961 			(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3962 		if (holeend & ~(long long)ULONG_MAX)
3963 			hlen = ULONG_MAX - hba + 1;
3964 	}
3965 
3966 	unmap_mapping_pages(mapping, hba, hlen, even_cows);
3967 }
3968 EXPORT_SYMBOL(unmap_mapping_range);
3969 
3970 /*
3971  * Restore a potential device exclusive pte to a working pte entry
3972  */
remove_device_exclusive_entry(struct vm_fault * vmf)3973 static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
3974 {
3975 	struct folio *folio = page_folio(vmf->page);
3976 	struct vm_area_struct *vma = vmf->vma;
3977 	struct mmu_notifier_range range;
3978 	vm_fault_t ret;
3979 
3980 	/*
3981 	 * We need a reference to lock the folio because we don't hold
3982 	 * the PTL so a racing thread can remove the device-exclusive
3983 	 * entry and unmap it. If the folio is free the entry must
3984 	 * have been removed already. If it happens to have already
3985 	 * been re-allocated after being freed all we do is lock and
3986 	 * unlock it.
3987 	 */
3988 	if (!folio_try_get(folio))
3989 		return 0;
3990 
3991 	ret = folio_lock_or_retry(folio, vmf);
3992 	if (ret) {
3993 		folio_put(folio);
3994 		return ret;
3995 	}
3996 	mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0,
3997 				vma->vm_mm, vmf->address & PAGE_MASK,
3998 				(vmf->address & PAGE_MASK) + PAGE_SIZE, NULL);
3999 	mmu_notifier_invalidate_range_start(&range);
4000 
4001 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
4002 				&vmf->ptl);
4003 	if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
4004 		restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte);
4005 
4006 	if (vmf->pte)
4007 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4008 	folio_unlock(folio);
4009 	folio_put(folio);
4010 
4011 	mmu_notifier_invalidate_range_end(&range);
4012 	return 0;
4013 }
4014 
should_try_to_free_swap(struct folio * folio,struct vm_area_struct * vma,unsigned int fault_flags)4015 static inline bool should_try_to_free_swap(struct folio *folio,
4016 					   struct vm_area_struct *vma,
4017 					   unsigned int fault_flags)
4018 {
4019 	if (!folio_test_swapcache(folio))
4020 		return false;
4021 	if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) ||
4022 	    folio_test_mlocked(folio))
4023 		return true;
4024 	/*
4025 	 * If we want to map a page that's in the swapcache writable, we
4026 	 * have to detect via the refcount if we're really the exclusive
4027 	 * user. Try freeing the swapcache to get rid of the swapcache
4028 	 * reference only in case it's likely that we'll be the exlusive user.
4029 	 */
4030 	return (fault_flags & FAULT_FLAG_WRITE) && !folio_test_ksm(folio) &&
4031 		folio_ref_count(folio) == (1 + folio_nr_pages(folio));
4032 }
4033 
pte_marker_clear(struct vm_fault * vmf)4034 static vm_fault_t pte_marker_clear(struct vm_fault *vmf)
4035 {
4036 	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
4037 				       vmf->address, &vmf->ptl);
4038 	if (!vmf->pte)
4039 		return 0;
4040 	/*
4041 	 * Be careful so that we will only recover a special uffd-wp pte into a
4042 	 * none pte.  Otherwise it means the pte could have changed, so retry.
4043 	 *
4044 	 * This should also cover the case where e.g. the pte changed
4045 	 * quickly from a PTE_MARKER_UFFD_WP into PTE_MARKER_POISONED.
4046 	 * So is_pte_marker() check is not enough to safely drop the pte.
4047 	 */
4048 	if (pte_same(vmf->orig_pte, ptep_get(vmf->pte)))
4049 		pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte);
4050 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4051 	return 0;
4052 }
4053 
do_pte_missing(struct vm_fault * vmf)4054 static vm_fault_t do_pte_missing(struct vm_fault *vmf)
4055 {
4056 	if (vma_is_anonymous(vmf->vma))
4057 		return do_anonymous_page(vmf);
4058 	else
4059 		return do_fault(vmf);
4060 }
4061 
4062 /*
4063  * This is actually a page-missing access, but with uffd-wp special pte
4064  * installed.  It means this pte was wr-protected before being unmapped.
4065  */
pte_marker_handle_uffd_wp(struct vm_fault * vmf)4066 static vm_fault_t pte_marker_handle_uffd_wp(struct vm_fault *vmf)
4067 {
4068 	/*
4069 	 * Just in case there're leftover special ptes even after the region
4070 	 * got unregistered - we can simply clear them.
4071 	 */
4072 	if (unlikely(!userfaultfd_wp(vmf->vma)))
4073 		return pte_marker_clear(vmf);
4074 
4075 	return do_pte_missing(vmf);
4076 }
4077 
handle_pte_marker(struct vm_fault * vmf)4078 static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
4079 {
4080 	swp_entry_t entry = pte_to_swp_entry(vmf->orig_pte);
4081 	unsigned long marker = pte_marker_get(entry);
4082 
4083 	/*
4084 	 * PTE markers should never be empty.  If anything weird happened,
4085 	 * the best thing to do is to kill the process along with its mm.
4086 	 */
4087 	if (WARN_ON_ONCE(!marker))
4088 		return VM_FAULT_SIGBUS;
4089 
4090 	/* Higher priority than uffd-wp when data corrupted */
4091 	if (marker & PTE_MARKER_POISONED)
4092 		return VM_FAULT_HWPOISON;
4093 
4094 	/* Hitting a guard page is always a fatal condition. */
4095 	if (marker & PTE_MARKER_GUARD)
4096 		return VM_FAULT_SIGSEGV;
4097 
4098 	if (pte_marker_entry_uffd_wp(entry))
4099 		return pte_marker_handle_uffd_wp(vmf);
4100 
4101 	/* This is an unknown pte marker */
4102 	return VM_FAULT_SIGBUS;
4103 }
4104 
__alloc_swap_folio(struct vm_fault * vmf)4105 static struct folio *__alloc_swap_folio(struct vm_fault *vmf)
4106 {
4107 	struct vm_area_struct *vma = vmf->vma;
4108 	struct folio *folio;
4109 	swp_entry_t entry;
4110 
4111 	folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vmf->address);
4112 	if (!folio)
4113 		return NULL;
4114 
4115 	entry = pte_to_swp_entry(vmf->orig_pte);
4116 	if (mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
4117 					   GFP_KERNEL, entry)) {
4118 		folio_put(folio);
4119 		return NULL;
4120 	}
4121 
4122 	return folio;
4123 }
4124 
4125 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
non_swapcache_batch(swp_entry_t entry,int max_nr)4126 static inline int non_swapcache_batch(swp_entry_t entry, int max_nr)
4127 {
4128 	struct swap_info_struct *si = swp_swap_info(entry);
4129 	pgoff_t offset = swp_offset(entry);
4130 	int i;
4131 
4132 	/*
4133 	 * While allocating a large folio and doing swap_read_folio, which is
4134 	 * the case the being faulted pte doesn't have swapcache. We need to
4135 	 * ensure all PTEs have no cache as well, otherwise, we might go to
4136 	 * swap devices while the content is in swapcache.
4137 	 */
4138 	for (i = 0; i < max_nr; i++) {
4139 		if ((si->swap_map[offset + i] & SWAP_HAS_CACHE))
4140 			return i;
4141 	}
4142 
4143 	return i;
4144 }
4145 
4146 /*
4147  * Check if the PTEs within a range are contiguous swap entries
4148  * and have consistent swapcache, zeromap.
4149  */
can_swapin_thp(struct vm_fault * vmf,pte_t * ptep,int nr_pages)4150 static bool can_swapin_thp(struct vm_fault *vmf, pte_t *ptep, int nr_pages)
4151 {
4152 	unsigned long addr;
4153 	swp_entry_t entry;
4154 	int idx;
4155 	pte_t pte;
4156 
4157 	addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE);
4158 	idx = (vmf->address - addr) / PAGE_SIZE;
4159 	pte = ptep_get(ptep);
4160 
4161 	if (!pte_same(pte, pte_move_swp_offset(vmf->orig_pte, -idx)))
4162 		return false;
4163 	entry = pte_to_swp_entry(pte);
4164 	if (swap_pte_batch(ptep, nr_pages, pte) != nr_pages)
4165 		return false;
4166 
4167 	/*
4168 	 * swap_read_folio() can't handle the case a large folio is hybridly
4169 	 * from different backends. And they are likely corner cases. Similar
4170 	 * things might be added once zswap support large folios.
4171 	 */
4172 	if (unlikely(swap_zeromap_batch(entry, nr_pages, NULL) != nr_pages))
4173 		return false;
4174 	if (unlikely(non_swapcache_batch(entry, nr_pages) != nr_pages))
4175 		return false;
4176 
4177 	return true;
4178 }
4179 
thp_swap_suitable_orders(pgoff_t swp_offset,unsigned long addr,unsigned long orders)4180 static inline unsigned long thp_swap_suitable_orders(pgoff_t swp_offset,
4181 						     unsigned long addr,
4182 						     unsigned long orders)
4183 {
4184 	int order, nr;
4185 
4186 	order = highest_order(orders);
4187 
4188 	/*
4189 	 * To swap in a THP with nr pages, we require that its first swap_offset
4190 	 * is aligned with that number, as it was when the THP was swapped out.
4191 	 * This helps filter out most invalid entries.
4192 	 */
4193 	while (orders) {
4194 		nr = 1 << order;
4195 		if ((addr >> PAGE_SHIFT) % nr == swp_offset % nr)
4196 			break;
4197 		order = next_order(&orders, order);
4198 	}
4199 
4200 	return orders;
4201 }
4202 
alloc_swap_folio(struct vm_fault * vmf)4203 static struct folio *alloc_swap_folio(struct vm_fault *vmf)
4204 {
4205 	struct vm_area_struct *vma = vmf->vma;
4206 	unsigned long orders;
4207 	struct folio *folio;
4208 	unsigned long addr;
4209 	swp_entry_t entry;
4210 	spinlock_t *ptl;
4211 	pte_t *pte;
4212 	gfp_t gfp;
4213 	int order;
4214 
4215 	/*
4216 	 * If uffd is active for the vma we need per-page fault fidelity to
4217 	 * maintain the uffd semantics.
4218 	 */
4219 	if (unlikely(userfaultfd_armed(vma)))
4220 		goto fallback;
4221 
4222 	/*
4223 	 * A large swapped out folio could be partially or fully in zswap. We
4224 	 * lack handling for such cases, so fallback to swapping in order-0
4225 	 * folio.
4226 	 */
4227 	if (!zswap_never_enabled())
4228 		goto fallback;
4229 
4230 	entry = pte_to_swp_entry(vmf->orig_pte);
4231 	/*
4232 	 * Get a list of all the (large) orders below PMD_ORDER that are enabled
4233 	 * and suitable for swapping THP.
4234 	 */
4235 	orders = thp_vma_allowable_orders(vma, vma->vm_flags,
4236 			TVA_IN_PF | TVA_ENFORCE_SYSFS, BIT(PMD_ORDER) - 1);
4237 	orders = thp_vma_suitable_orders(vma, vmf->address, orders);
4238 	orders = thp_swap_suitable_orders(swp_offset(entry),
4239 					  vmf->address, orders);
4240 
4241 	if (!orders)
4242 		goto fallback;
4243 
4244 	pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
4245 				  vmf->address & PMD_MASK, &ptl);
4246 	if (unlikely(!pte))
4247 		goto fallback;
4248 
4249 	/*
4250 	 * For do_swap_page, find the highest order where the aligned range is
4251 	 * completely swap entries with contiguous swap offsets.
4252 	 */
4253 	order = highest_order(orders);
4254 	while (orders) {
4255 		addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
4256 		if (can_swapin_thp(vmf, pte + pte_index(addr), 1 << order))
4257 			break;
4258 		order = next_order(&orders, order);
4259 	}
4260 
4261 	pte_unmap_unlock(pte, ptl);
4262 
4263 	/* Try allocating the highest of the remaining orders. */
4264 	gfp = vma_thp_gfp_mask(vma);
4265 	while (orders) {
4266 		addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
4267 		folio = vma_alloc_folio(gfp, order, vma, addr);
4268 		if (folio) {
4269 			if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
4270 							    gfp, entry))
4271 				return folio;
4272 			count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK_CHARGE);
4273 			folio_put(folio);
4274 		}
4275 		count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK);
4276 		order = next_order(&orders, order);
4277 	}
4278 
4279 fallback:
4280 	return __alloc_swap_folio(vmf);
4281 }
4282 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
alloc_swap_folio(struct vm_fault * vmf)4283 static struct folio *alloc_swap_folio(struct vm_fault *vmf)
4284 {
4285 	return __alloc_swap_folio(vmf);
4286 }
4287 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4288 
4289 static DECLARE_WAIT_QUEUE_HEAD(swapcache_wq);
4290 
4291 /*
4292  * We enter with non-exclusive mmap_lock (to exclude vma changes,
4293  * but allow concurrent faults), and pte mapped but not yet locked.
4294  * We return with pte unmapped and unlocked.
4295  *
4296  * We return with the mmap_lock locked or unlocked in the same cases
4297  * as does filemap_fault().
4298  */
do_swap_page(struct vm_fault * vmf)4299 vm_fault_t do_swap_page(struct vm_fault *vmf)
4300 {
4301 	struct vm_area_struct *vma = vmf->vma;
4302 	struct folio *swapcache, *folio = NULL;
4303 	DECLARE_WAITQUEUE(wait, current);
4304 	struct page *page;
4305 	struct swap_info_struct *si = NULL;
4306 	rmap_t rmap_flags = RMAP_NONE;
4307 	bool need_clear_cache = false;
4308 	bool exclusive = false;
4309 	swp_entry_t entry;
4310 	pte_t pte;
4311 	vm_fault_t ret = 0;
4312 	void *shadow = NULL;
4313 	int nr_pages;
4314 	unsigned long page_idx;
4315 	unsigned long address;
4316 	pte_t *ptep;
4317 
4318 	if (!pte_unmap_same(vmf))
4319 		goto out;
4320 
4321 	entry = pte_to_swp_entry(vmf->orig_pte);
4322 	if (unlikely(non_swap_entry(entry))) {
4323 		if (is_migration_entry(entry)) {
4324 			migration_entry_wait(vma->vm_mm, vmf->pmd,
4325 					     vmf->address);
4326 		} else if (is_device_exclusive_entry(entry)) {
4327 			vmf->page = pfn_swap_entry_to_page(entry);
4328 			ret = remove_device_exclusive_entry(vmf);
4329 		} else if (is_device_private_entry(entry)) {
4330 			if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
4331 				/*
4332 				 * migrate_to_ram is not yet ready to operate
4333 				 * under VMA lock.
4334 				 */
4335 				vma_end_read(vma);
4336 				ret = VM_FAULT_RETRY;
4337 				goto out;
4338 			}
4339 
4340 			vmf->page = pfn_swap_entry_to_page(entry);
4341 			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4342 					vmf->address, &vmf->ptl);
4343 			if (unlikely(!vmf->pte ||
4344 				     !pte_same(ptep_get(vmf->pte),
4345 							vmf->orig_pte)))
4346 				goto unlock;
4347 
4348 			/*
4349 			 * Get a page reference while we know the page can't be
4350 			 * freed.
4351 			 */
4352 			get_page(vmf->page);
4353 			pte_unmap_unlock(vmf->pte, vmf->ptl);
4354 			ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
4355 			put_page(vmf->page);
4356 		} else if (is_hwpoison_entry(entry)) {
4357 			ret = VM_FAULT_HWPOISON;
4358 		} else if (is_pte_marker_entry(entry)) {
4359 			ret = handle_pte_marker(vmf);
4360 		} else {
4361 			print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
4362 			ret = VM_FAULT_SIGBUS;
4363 		}
4364 		goto out;
4365 	}
4366 
4367 	/* Prevent swapoff from happening to us. */
4368 	si = get_swap_device(entry);
4369 	if (unlikely(!si))
4370 		goto out;
4371 
4372 	folio = swap_cache_get_folio(entry, vma, vmf->address);
4373 	if (folio)
4374 		page = folio_file_page(folio, swp_offset(entry));
4375 	swapcache = folio;
4376 
4377 	if (!folio) {
4378 		if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
4379 		    __swap_count(entry) == 1) {
4380 			/* skip swapcache */
4381 			folio = alloc_swap_folio(vmf);
4382 			if (folio) {
4383 				__folio_set_locked(folio);
4384 				__folio_set_swapbacked(folio);
4385 
4386 				nr_pages = folio_nr_pages(folio);
4387 				if (folio_test_large(folio))
4388 					entry.val = ALIGN_DOWN(entry.val, nr_pages);
4389 				/*
4390 				 * Prevent parallel swapin from proceeding with
4391 				 * the cache flag. Otherwise, another thread
4392 				 * may finish swapin first, free the entry, and
4393 				 * swapout reusing the same entry. It's
4394 				 * undetectable as pte_same() returns true due
4395 				 * to entry reuse.
4396 				 */
4397 				if (swapcache_prepare(entry, nr_pages)) {
4398 					/*
4399 					 * Relax a bit to prevent rapid
4400 					 * repeated page faults.
4401 					 */
4402 					add_wait_queue(&swapcache_wq, &wait);
4403 					schedule_timeout_uninterruptible(1);
4404 					remove_wait_queue(&swapcache_wq, &wait);
4405 					goto out_page;
4406 				}
4407 				need_clear_cache = true;
4408 
4409 				mem_cgroup_swapin_uncharge_swap(entry, nr_pages);
4410 
4411 				shadow = get_shadow_from_swap_cache(entry);
4412 				if (shadow)
4413 					workingset_refault(folio, shadow);
4414 
4415 				folio_add_lru(folio);
4416 
4417 				/* To provide entry to swap_read_folio() */
4418 				folio->swap = entry;
4419 				swap_read_folio(folio, NULL);
4420 				folio->private = NULL;
4421 			}
4422 		} else {
4423 			folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
4424 						vmf);
4425 			swapcache = folio;
4426 		}
4427 
4428 		if (!folio) {
4429 			/*
4430 			 * Back out if somebody else faulted in this pte
4431 			 * while we released the pte lock.
4432 			 */
4433 			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4434 					vmf->address, &vmf->ptl);
4435 			if (likely(vmf->pte &&
4436 				   pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
4437 				ret = VM_FAULT_OOM;
4438 			goto unlock;
4439 		}
4440 
4441 		/* Had to read the page from swap area: Major fault */
4442 		ret = VM_FAULT_MAJOR;
4443 		count_vm_event(PGMAJFAULT);
4444 		count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
4445 		page = folio_file_page(folio, swp_offset(entry));
4446 	} else if (PageHWPoison(page)) {
4447 		/*
4448 		 * hwpoisoned dirty swapcache pages are kept for killing
4449 		 * owner processes (which may be unknown at hwpoison time)
4450 		 */
4451 		ret = VM_FAULT_HWPOISON;
4452 		goto out_release;
4453 	}
4454 
4455 	ret |= folio_lock_or_retry(folio, vmf);
4456 	if (ret & VM_FAULT_RETRY)
4457 		goto out_release;
4458 
4459 	if (swapcache) {
4460 		/*
4461 		 * Make sure folio_free_swap() or swapoff did not release the
4462 		 * swapcache from under us.  The page pin, and pte_same test
4463 		 * below, are not enough to exclude that.  Even if it is still
4464 		 * swapcache, we need to check that the page's swap has not
4465 		 * changed.
4466 		 */
4467 		if (unlikely(!folio_test_swapcache(folio) ||
4468 			     page_swap_entry(page).val != entry.val))
4469 			goto out_page;
4470 
4471 		/*
4472 		 * KSM sometimes has to copy on read faults, for example, if
4473 		 * page->index of !PageKSM() pages would be nonlinear inside the
4474 		 * anon VMA -- PageKSM() is lost on actual swapout.
4475 		 */
4476 		folio = ksm_might_need_to_copy(folio, vma, vmf->address);
4477 		if (unlikely(!folio)) {
4478 			ret = VM_FAULT_OOM;
4479 			folio = swapcache;
4480 			goto out_page;
4481 		} else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
4482 			ret = VM_FAULT_HWPOISON;
4483 			folio = swapcache;
4484 			goto out_page;
4485 		}
4486 		if (folio != swapcache)
4487 			page = folio_page(folio, 0);
4488 
4489 		/*
4490 		 * If we want to map a page that's in the swapcache writable, we
4491 		 * have to detect via the refcount if we're really the exclusive
4492 		 * owner. Try removing the extra reference from the local LRU
4493 		 * caches if required.
4494 		 */
4495 		if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache &&
4496 		    !folio_test_ksm(folio) && !folio_test_lru(folio))
4497 			lru_add_drain();
4498 	}
4499 
4500 	folio_throttle_swaprate(folio, GFP_KERNEL);
4501 
4502 	/*
4503 	 * Back out if somebody else already faulted in this pte.
4504 	 */
4505 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
4506 			&vmf->ptl);
4507 	if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
4508 		goto out_nomap;
4509 
4510 	if (unlikely(!folio_test_uptodate(folio))) {
4511 		ret = VM_FAULT_SIGBUS;
4512 		goto out_nomap;
4513 	}
4514 
4515 	/* allocated large folios for SWP_SYNCHRONOUS_IO */
4516 	if (folio_test_large(folio) && !folio_test_swapcache(folio)) {
4517 		unsigned long nr = folio_nr_pages(folio);
4518 		unsigned long folio_start = ALIGN_DOWN(vmf->address, nr * PAGE_SIZE);
4519 		unsigned long idx = (vmf->address - folio_start) / PAGE_SIZE;
4520 		pte_t *folio_ptep = vmf->pte - idx;
4521 		pte_t folio_pte = ptep_get(folio_ptep);
4522 
4523 		if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
4524 		    swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
4525 			goto out_nomap;
4526 
4527 		page_idx = idx;
4528 		address = folio_start;
4529 		ptep = folio_ptep;
4530 		goto check_folio;
4531 	}
4532 
4533 	nr_pages = 1;
4534 	page_idx = 0;
4535 	address = vmf->address;
4536 	ptep = vmf->pte;
4537 	if (folio_test_large(folio) && folio_test_swapcache(folio)) {
4538 		int nr = folio_nr_pages(folio);
4539 		unsigned long idx = folio_page_idx(folio, page);
4540 		unsigned long folio_start = address - idx * PAGE_SIZE;
4541 		unsigned long folio_end = folio_start + nr * PAGE_SIZE;
4542 		pte_t *folio_ptep;
4543 		pte_t folio_pte;
4544 
4545 		if (unlikely(folio_start < max(address & PMD_MASK, vma->vm_start)))
4546 			goto check_folio;
4547 		if (unlikely(folio_end > pmd_addr_end(address, vma->vm_end)))
4548 			goto check_folio;
4549 
4550 		folio_ptep = vmf->pte - idx;
4551 		folio_pte = ptep_get(folio_ptep);
4552 		if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
4553 		    swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
4554 			goto check_folio;
4555 
4556 		page_idx = idx;
4557 		address = folio_start;
4558 		ptep = folio_ptep;
4559 		nr_pages = nr;
4560 		entry = folio->swap;
4561 		page = &folio->page;
4562 	}
4563 
4564 check_folio:
4565 	/*
4566 	 * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte
4567 	 * must never point at an anonymous page in the swapcache that is
4568 	 * PG_anon_exclusive. Sanity check that this holds and especially, that
4569 	 * no filesystem set PG_mappedtodisk on a page in the swapcache. Sanity
4570 	 * check after taking the PT lock and making sure that nobody
4571 	 * concurrently faulted in this page and set PG_anon_exclusive.
4572 	 */
4573 	BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio));
4574 	BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page));
4575 
4576 	/*
4577 	 * Check under PT lock (to protect against concurrent fork() sharing
4578 	 * the swap entry concurrently) for certainly exclusive pages.
4579 	 */
4580 	if (!folio_test_ksm(folio)) {
4581 		exclusive = pte_swp_exclusive(vmf->orig_pte);
4582 		if (folio != swapcache) {
4583 			/*
4584 			 * We have a fresh page that is not exposed to the
4585 			 * swapcache -> certainly exclusive.
4586 			 */
4587 			exclusive = true;
4588 		} else if (exclusive && folio_test_writeback(folio) &&
4589 			  data_race(si->flags & SWP_STABLE_WRITES)) {
4590 			/*
4591 			 * This is tricky: not all swap backends support
4592 			 * concurrent page modifications while under writeback.
4593 			 *
4594 			 * So if we stumble over such a page in the swapcache
4595 			 * we must not set the page exclusive, otherwise we can
4596 			 * map it writable without further checks and modify it
4597 			 * while still under writeback.
4598 			 *
4599 			 * For these problematic swap backends, simply drop the
4600 			 * exclusive marker: this is perfectly fine as we start
4601 			 * writeback only if we fully unmapped the page and
4602 			 * there are no unexpected references on the page after
4603 			 * unmapping succeeded. After fully unmapped, no
4604 			 * further GUP references (FOLL_GET and FOLL_PIN) can
4605 			 * appear, so dropping the exclusive marker and mapping
4606 			 * it only R/O is fine.
4607 			 */
4608 			exclusive = false;
4609 		}
4610 	}
4611 
4612 	/*
4613 	 * Some architectures may have to restore extra metadata to the page
4614 	 * when reading from swap. This metadata may be indexed by swap entry
4615 	 * so this must be called before swap_free().
4616 	 */
4617 	arch_swap_restore(folio_swap(entry, folio), folio);
4618 
4619 	/*
4620 	 * Remove the swap entry and conditionally try to free up the swapcache.
4621 	 * We're already holding a reference on the page but haven't mapped it
4622 	 * yet.
4623 	 */
4624 	swap_free_nr(entry, nr_pages);
4625 	if (should_try_to_free_swap(folio, vma, vmf->flags))
4626 		folio_free_swap(folio);
4627 
4628 	add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
4629 	add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
4630 	pte = mk_pte(page, vma->vm_page_prot);
4631 	if (pte_swp_soft_dirty(vmf->orig_pte))
4632 		pte = pte_mksoft_dirty(pte);
4633 	if (pte_swp_uffd_wp(vmf->orig_pte))
4634 		pte = pte_mkuffd_wp(pte);
4635 
4636 	/*
4637 	 * Same logic as in do_wp_page(); however, optimize for pages that are
4638 	 * certainly not shared either because we just allocated them without
4639 	 * exposing them to the swapcache or because the swap entry indicates
4640 	 * exclusivity.
4641 	 */
4642 	if (!folio_test_ksm(folio) &&
4643 	    (exclusive || folio_ref_count(folio) == 1)) {
4644 		if ((vma->vm_flags & VM_WRITE) && !userfaultfd_pte_wp(vma, pte) &&
4645 		    !pte_needs_soft_dirty_wp(vma, pte)) {
4646 			pte = pte_mkwrite(pte, vma);
4647 			if (vmf->flags & FAULT_FLAG_WRITE) {
4648 				pte = pte_mkdirty(pte);
4649 				vmf->flags &= ~FAULT_FLAG_WRITE;
4650 			}
4651 		}
4652 		rmap_flags |= RMAP_EXCLUSIVE;
4653 	}
4654 	folio_ref_add(folio, nr_pages - 1);
4655 	flush_icache_pages(vma, page, nr_pages);
4656 	vmf->orig_pte = pte_advance_pfn(pte, page_idx);
4657 
4658 	/* ksm created a completely new copy */
4659 	if (unlikely(folio != swapcache && swapcache)) {
4660 		folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE);
4661 		folio_add_lru_vma(folio, vma);
4662 	} else if (!folio_test_anon(folio)) {
4663 		/*
4664 		 * We currently only expect small !anon folios which are either
4665 		 * fully exclusive or fully shared, or new allocated large
4666 		 * folios which are fully exclusive. If we ever get large
4667 		 * folios within swapcache here, we have to be careful.
4668 		 */
4669 		VM_WARN_ON_ONCE(folio_test_large(folio) && folio_test_swapcache(folio));
4670 		VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
4671 		folio_add_new_anon_rmap(folio, vma, address, rmap_flags);
4672 	} else {
4673 		folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address,
4674 					rmap_flags);
4675 	}
4676 
4677 	VM_BUG_ON(!folio_test_anon(folio) ||
4678 			(pte_write(pte) && !PageAnonExclusive(page)));
4679 	set_ptes(vma->vm_mm, address, ptep, pte, nr_pages);
4680 	arch_do_swap_page_nr(vma->vm_mm, vma, address,
4681 			pte, pte, nr_pages);
4682 
4683 	folio_unlock(folio);
4684 	if (folio != swapcache && swapcache) {
4685 		/*
4686 		 * Hold the lock to avoid the swap entry to be reused
4687 		 * until we take the PT lock for the pte_same() check
4688 		 * (to avoid false positives from pte_same). For
4689 		 * further safety release the lock after the swap_free
4690 		 * so that the swap count won't change under a
4691 		 * parallel locked swapcache.
4692 		 */
4693 		folio_unlock(swapcache);
4694 		folio_put(swapcache);
4695 	}
4696 
4697 	if (vmf->flags & FAULT_FLAG_WRITE) {
4698 		ret |= do_wp_page(vmf);
4699 		if (ret & VM_FAULT_ERROR)
4700 			ret &= VM_FAULT_ERROR;
4701 		goto out;
4702 	}
4703 
4704 	/* No need to invalidate - it was non-present before */
4705 	update_mmu_cache_range(vmf, vma, address, ptep, nr_pages);
4706 unlock:
4707 	if (vmf->pte)
4708 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4709 out:
4710 	/* Clear the swap cache pin for direct swapin after PTL unlock */
4711 	if (need_clear_cache) {
4712 		swapcache_clear(si, entry, nr_pages);
4713 		if (waitqueue_active(&swapcache_wq))
4714 			wake_up(&swapcache_wq);
4715 	}
4716 	if (si)
4717 		put_swap_device(si);
4718 	return ret;
4719 out_nomap:
4720 	if (vmf->pte)
4721 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4722 out_page:
4723 	folio_unlock(folio);
4724 out_release:
4725 	folio_put(folio);
4726 	if (folio != swapcache && swapcache) {
4727 		folio_unlock(swapcache);
4728 		folio_put(swapcache);
4729 	}
4730 	if (need_clear_cache) {
4731 		swapcache_clear(si, entry, nr_pages);
4732 		if (waitqueue_active(&swapcache_wq))
4733 			wake_up(&swapcache_wq);
4734 	}
4735 	if (si)
4736 		put_swap_device(si);
4737 	return ret;
4738 }
4739 
pte_range_none(pte_t * pte,int nr_pages)4740 static bool pte_range_none(pte_t *pte, int nr_pages)
4741 {
4742 	int i;
4743 
4744 	for (i = 0; i < nr_pages; i++) {
4745 		if (!pte_none(ptep_get_lockless(pte + i)))
4746 			return false;
4747 	}
4748 
4749 	return true;
4750 }
4751 
alloc_anon_folio(struct vm_fault * vmf)4752 static struct folio *alloc_anon_folio(struct vm_fault *vmf)
4753 {
4754 	struct vm_area_struct *vma = vmf->vma;
4755 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4756 	unsigned long orders;
4757 	struct folio *folio;
4758 	unsigned long addr;
4759 	pte_t *pte;
4760 	gfp_t gfp;
4761 	int order;
4762 
4763 	/*
4764 	 * If uffd is active for the vma we need per-page fault fidelity to
4765 	 * maintain the uffd semantics.
4766 	 */
4767 	if (unlikely(userfaultfd_armed(vma)))
4768 		goto fallback;
4769 
4770 	/*
4771 	 * Get a list of all the (large) orders below PMD_ORDER that are enabled
4772 	 * for this vma. Then filter out the orders that can't be allocated over
4773 	 * the faulting address and still be fully contained in the vma.
4774 	 */
4775 	orders = thp_vma_allowable_orders(vma, vma->vm_flags,
4776 			TVA_IN_PF | TVA_ENFORCE_SYSFS, BIT(PMD_ORDER) - 1);
4777 	orders = thp_vma_suitable_orders(vma, vmf->address, orders);
4778 
4779 	if (!orders)
4780 		goto fallback;
4781 
4782 	pte = pte_offset_map(vmf->pmd, vmf->address & PMD_MASK);
4783 	if (!pte)
4784 		return ERR_PTR(-EAGAIN);
4785 
4786 	/*
4787 	 * Find the highest order where the aligned range is completely
4788 	 * pte_none(). Note that all remaining orders will be completely
4789 	 * pte_none().
4790 	 */
4791 	order = highest_order(orders);
4792 	while (orders) {
4793 		addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
4794 		if (pte_range_none(pte + pte_index(addr), 1 << order))
4795 			break;
4796 		order = next_order(&orders, order);
4797 	}
4798 
4799 	pte_unmap(pte);
4800 
4801 	if (!orders)
4802 		goto fallback;
4803 
4804 	/* Try allocating the highest of the remaining orders. */
4805 	gfp = vma_thp_gfp_mask(vma);
4806 	while (orders) {
4807 		addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
4808 		folio = vma_alloc_folio(gfp, order, vma, addr);
4809 		if (folio) {
4810 			if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
4811 				count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
4812 				folio_put(folio);
4813 				goto next;
4814 			}
4815 			folio_throttle_swaprate(folio, gfp);
4816 			/*
4817 			 * When a folio is not zeroed during allocation
4818 			 * (__GFP_ZERO not used) or user folios require special
4819 			 * handling, folio_zero_user() is used to make sure
4820 			 * that the page corresponding to the faulting address
4821 			 * will be hot in the cache after zeroing.
4822 			 */
4823 			if (user_alloc_needs_zeroing())
4824 				folio_zero_user(folio, vmf->address);
4825 			return folio;
4826 		}
4827 next:
4828 		count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK);
4829 		order = next_order(&orders, order);
4830 	}
4831 
4832 fallback:
4833 #endif
4834 	return folio_prealloc(vma->vm_mm, vma, vmf->address, true);
4835 }
4836 
4837 /*
4838  * We enter with non-exclusive mmap_lock (to exclude vma changes,
4839  * but allow concurrent faults), and pte mapped but not yet locked.
4840  * We return with mmap_lock still held, but pte unmapped and unlocked.
4841  */
do_anonymous_page(struct vm_fault * vmf)4842 static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
4843 {
4844 	struct vm_area_struct *vma = vmf->vma;
4845 	unsigned long addr = vmf->address;
4846 	struct folio *folio;
4847 	vm_fault_t ret = 0;
4848 	int nr_pages = 1;
4849 	pte_t entry;
4850 
4851 	/* File mapping without ->vm_ops ? */
4852 	if (vma->vm_flags & VM_SHARED)
4853 		return VM_FAULT_SIGBUS;
4854 
4855 	/*
4856 	 * Use pte_alloc() instead of pte_alloc_map(), so that OOM can
4857 	 * be distinguished from a transient failure of pte_offset_map().
4858 	 */
4859 	if (pte_alloc(vma->vm_mm, vmf->pmd))
4860 		return VM_FAULT_OOM;
4861 
4862 	/* Use the zero-page for reads */
4863 	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
4864 			!mm_forbids_zeropage(vma->vm_mm)) {
4865 		entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
4866 						vma->vm_page_prot));
4867 		vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4868 				vmf->address, &vmf->ptl);
4869 		if (!vmf->pte)
4870 			goto unlock;
4871 		if (vmf_pte_changed(vmf)) {
4872 			update_mmu_tlb(vma, vmf->address, vmf->pte);
4873 			goto unlock;
4874 		}
4875 		ret = check_stable_address_space(vma->vm_mm);
4876 		if (ret)
4877 			goto unlock;
4878 		/* Deliver the page fault to userland, check inside PT lock */
4879 		if (userfaultfd_missing(vma)) {
4880 			pte_unmap_unlock(vmf->pte, vmf->ptl);
4881 			return handle_userfault(vmf, VM_UFFD_MISSING);
4882 		}
4883 		goto setpte;
4884 	}
4885 
4886 	/* Allocate our own private page. */
4887 	ret = vmf_anon_prepare(vmf);
4888 	if (ret)
4889 		return ret;
4890 	/* Returns NULL on OOM or ERR_PTR(-EAGAIN) if we must retry the fault */
4891 	folio = alloc_anon_folio(vmf);
4892 	if (IS_ERR(folio))
4893 		return 0;
4894 	if (!folio)
4895 		goto oom;
4896 
4897 	nr_pages = folio_nr_pages(folio);
4898 	addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE);
4899 
4900 	/*
4901 	 * The memory barrier inside __folio_mark_uptodate makes sure that
4902 	 * preceding stores to the page contents become visible before
4903 	 * the set_pte_at() write.
4904 	 */
4905 	__folio_mark_uptodate(folio);
4906 
4907 	entry = mk_pte(&folio->page, vma->vm_page_prot);
4908 	entry = pte_sw_mkyoung(entry);
4909 	if (vma->vm_flags & VM_WRITE)
4910 		entry = pte_mkwrite(pte_mkdirty(entry), vma);
4911 
4912 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
4913 	if (!vmf->pte)
4914 		goto release;
4915 	if (nr_pages == 1 && vmf_pte_changed(vmf)) {
4916 		update_mmu_tlb(vma, addr, vmf->pte);
4917 		goto release;
4918 	} else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) {
4919 		update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages);
4920 		goto release;
4921 	}
4922 
4923 	ret = check_stable_address_space(vma->vm_mm);
4924 	if (ret)
4925 		goto release;
4926 
4927 	/* Deliver the page fault to userland, check inside PT lock */
4928 	if (userfaultfd_missing(vma)) {
4929 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4930 		folio_put(folio);
4931 		return handle_userfault(vmf, VM_UFFD_MISSING);
4932 	}
4933 
4934 	folio_ref_add(folio, nr_pages - 1);
4935 	add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
4936 	count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_FAULT_ALLOC);
4937 	folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
4938 	folio_add_lru_vma(folio, vma);
4939 setpte:
4940 	if (vmf_orig_pte_uffd_wp(vmf))
4941 		entry = pte_mkuffd_wp(entry);
4942 	set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr_pages);
4943 
4944 	/* No need to invalidate - it was non-present before */
4945 	update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr_pages);
4946 unlock:
4947 	if (vmf->pte)
4948 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4949 	return ret;
4950 release:
4951 	folio_put(folio);
4952 	goto unlock;
4953 oom:
4954 	return VM_FAULT_OOM;
4955 }
4956 
4957 /*
4958  * The mmap_lock must have been held on entry, and may have been
4959  * released depending on flags and vma->vm_ops->fault() return value.
4960  * See filemap_fault() and __lock_page_retry().
4961  */
__do_fault(struct vm_fault * vmf)4962 static vm_fault_t __do_fault(struct vm_fault *vmf)
4963 {
4964 	struct vm_area_struct *vma = vmf->vma;
4965 	struct folio *folio;
4966 	vm_fault_t ret;
4967 
4968 	/*
4969 	 * Preallocate pte before we take page_lock because this might lead to
4970 	 * deadlocks for memcg reclaim which waits for pages under writeback:
4971 	 *				lock_page(A)
4972 	 *				SetPageWriteback(A)
4973 	 *				unlock_page(A)
4974 	 * lock_page(B)
4975 	 *				lock_page(B)
4976 	 * pte_alloc_one
4977 	 *   shrink_folio_list
4978 	 *     wait_on_page_writeback(A)
4979 	 *				SetPageWriteback(B)
4980 	 *				unlock_page(B)
4981 	 *				# flush A, B to clear the writeback
4982 	 */
4983 	if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
4984 		vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
4985 		if (!vmf->prealloc_pte)
4986 			return VM_FAULT_OOM;
4987 	}
4988 
4989 	ret = vma->vm_ops->fault(vmf);
4990 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
4991 			    VM_FAULT_DONE_COW)))
4992 		return ret;
4993 
4994 	folio = page_folio(vmf->page);
4995 	if (unlikely(PageHWPoison(vmf->page))) {
4996 		vm_fault_t poisonret = VM_FAULT_HWPOISON;
4997 		if (ret & VM_FAULT_LOCKED) {
4998 			if (page_mapped(vmf->page))
4999 				unmap_mapping_folio(folio);
5000 			/* Retry if a clean folio was removed from the cache. */
5001 			if (mapping_evict_folio(folio->mapping, folio))
5002 				poisonret = VM_FAULT_NOPAGE;
5003 			folio_unlock(folio);
5004 		}
5005 		folio_put(folio);
5006 		vmf->page = NULL;
5007 		return poisonret;
5008 	}
5009 
5010 	if (unlikely(!(ret & VM_FAULT_LOCKED)))
5011 		folio_lock(folio);
5012 	else
5013 		VM_BUG_ON_PAGE(!folio_test_locked(folio), vmf->page);
5014 
5015 	return ret;
5016 }
5017 
5018 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
deposit_prealloc_pte(struct vm_fault * vmf)5019 static void deposit_prealloc_pte(struct vm_fault *vmf)
5020 {
5021 	struct vm_area_struct *vma = vmf->vma;
5022 
5023 	pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
5024 	/*
5025 	 * We are going to consume the prealloc table,
5026 	 * count that as nr_ptes.
5027 	 */
5028 	mm_inc_nr_ptes(vma->vm_mm);
5029 	vmf->prealloc_pte = NULL;
5030 }
5031 
do_set_pmd(struct vm_fault * vmf,struct page * page)5032 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
5033 {
5034 	struct folio *folio = page_folio(page);
5035 	struct vm_area_struct *vma = vmf->vma;
5036 	bool write = vmf->flags & FAULT_FLAG_WRITE;
5037 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
5038 	pmd_t entry;
5039 	vm_fault_t ret = VM_FAULT_FALLBACK;
5040 
5041 	/*
5042 	 * It is too late to allocate a small folio, we already have a large
5043 	 * folio in the pagecache: especially s390 KVM cannot tolerate any
5044 	 * PMD mappings, but PTE-mapped THP are fine. So let's simply refuse any
5045 	 * PMD mappings if THPs are disabled.
5046 	 */
5047 	if (thp_disabled_by_hw() || vma_thp_disabled(vma, vma->vm_flags))
5048 		return ret;
5049 
5050 	if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
5051 		return ret;
5052 
5053 	if (folio_order(folio) != HPAGE_PMD_ORDER)
5054 		return ret;
5055 	page = &folio->page;
5056 
5057 	/*
5058 	 * Just backoff if any subpage of a THP is corrupted otherwise
5059 	 * the corrupted page may mapped by PMD silently to escape the
5060 	 * check.  This kind of THP just can be PTE mapped.  Access to
5061 	 * the corrupted subpage should trigger SIGBUS as expected.
5062 	 */
5063 	if (unlikely(folio_test_has_hwpoisoned(folio)))
5064 		return ret;
5065 
5066 	/*
5067 	 * Archs like ppc64 need additional space to store information
5068 	 * related to pte entry. Use the preallocated table for that.
5069 	 */
5070 	if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
5071 		vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
5072 		if (!vmf->prealloc_pte)
5073 			return VM_FAULT_OOM;
5074 	}
5075 
5076 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
5077 	if (unlikely(!pmd_none(*vmf->pmd)))
5078 		goto out;
5079 
5080 	flush_icache_pages(vma, page, HPAGE_PMD_NR);
5081 
5082 	entry = mk_huge_pmd(page, vma->vm_page_prot);
5083 	if (write)
5084 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
5085 
5086 	add_mm_counter(vma->vm_mm, mm_counter_file(folio), HPAGE_PMD_NR);
5087 	folio_add_file_rmap_pmd(folio, page, vma);
5088 
5089 	/*
5090 	 * deposit and withdraw with pmd lock held
5091 	 */
5092 	if (arch_needs_pgtable_deposit())
5093 		deposit_prealloc_pte(vmf);
5094 
5095 	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
5096 
5097 	update_mmu_cache_pmd(vma, haddr, vmf->pmd);
5098 
5099 	/* fault is handled */
5100 	ret = 0;
5101 	count_vm_event(THP_FILE_MAPPED);
5102 out:
5103 	spin_unlock(vmf->ptl);
5104 	return ret;
5105 }
5106 #else
do_set_pmd(struct vm_fault * vmf,struct page * page)5107 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
5108 {
5109 	return VM_FAULT_FALLBACK;
5110 }
5111 #endif
5112 
5113 /**
5114  * set_pte_range - Set a range of PTEs to point to pages in a folio.
5115  * @vmf: Fault decription.
5116  * @folio: The folio that contains @page.
5117  * @page: The first page to create a PTE for.
5118  * @nr: The number of PTEs to create.
5119  * @addr: The first address to create a PTE for.
5120  */
set_pte_range(struct vm_fault * vmf,struct folio * folio,struct page * page,unsigned int nr,unsigned long addr)5121 void set_pte_range(struct vm_fault *vmf, struct folio *folio,
5122 		struct page *page, unsigned int nr, unsigned long addr)
5123 {
5124 	struct vm_area_struct *vma = vmf->vma;
5125 	bool write = vmf->flags & FAULT_FLAG_WRITE;
5126 	bool prefault = !in_range(vmf->address, addr, nr * PAGE_SIZE);
5127 	pte_t entry;
5128 
5129 	flush_icache_pages(vma, page, nr);
5130 	entry = mk_pte(page, vma->vm_page_prot);
5131 
5132 	if (prefault && arch_wants_old_prefaulted_pte())
5133 		entry = pte_mkold(entry);
5134 	else
5135 		entry = pte_sw_mkyoung(entry);
5136 
5137 	if (write)
5138 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
5139 	if (unlikely(vmf_orig_pte_uffd_wp(vmf)))
5140 		entry = pte_mkuffd_wp(entry);
5141 	/* copy-on-write page */
5142 	if (write && !(vma->vm_flags & VM_SHARED)) {
5143 		VM_BUG_ON_FOLIO(nr != 1, folio);
5144 		folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
5145 		folio_add_lru_vma(folio, vma);
5146 	} else {
5147 		folio_add_file_rmap_ptes(folio, page, nr, vma);
5148 	}
5149 	set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr);
5150 
5151 	/* no need to invalidate: a not-present page won't be cached */
5152 	update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr);
5153 }
5154 
vmf_pte_changed(struct vm_fault * vmf)5155 static bool vmf_pte_changed(struct vm_fault *vmf)
5156 {
5157 	if (vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)
5158 		return !pte_same(ptep_get(vmf->pte), vmf->orig_pte);
5159 
5160 	return !pte_none(ptep_get(vmf->pte));
5161 }
5162 
5163 /**
5164  * finish_fault - finish page fault once we have prepared the page to fault
5165  *
5166  * @vmf: structure describing the fault
5167  *
5168  * This function handles all that is needed to finish a page fault once the
5169  * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
5170  * given page, adds reverse page mapping, handles memcg charges and LRU
5171  * addition.
5172  *
5173  * The function expects the page to be locked and on success it consumes a
5174  * reference of a page being mapped (for the PTE which maps it).
5175  *
5176  * Return: %0 on success, %VM_FAULT_ code in case of error.
5177  */
finish_fault(struct vm_fault * vmf)5178 vm_fault_t finish_fault(struct vm_fault *vmf)
5179 {
5180 	struct vm_area_struct *vma = vmf->vma;
5181 	struct page *page;
5182 	struct folio *folio;
5183 	vm_fault_t ret;
5184 	bool is_cow = (vmf->flags & FAULT_FLAG_WRITE) &&
5185 		      !(vma->vm_flags & VM_SHARED);
5186 	int type, nr_pages;
5187 	unsigned long addr;
5188 	bool needs_fallback = false;
5189 
5190 fallback:
5191 	addr = vmf->address;
5192 
5193 	/* Did we COW the page? */
5194 	if (is_cow)
5195 		page = vmf->cow_page;
5196 	else
5197 		page = vmf->page;
5198 
5199 	/*
5200 	 * check even for read faults because we might have lost our CoWed
5201 	 * page
5202 	 */
5203 	if (!(vma->vm_flags & VM_SHARED)) {
5204 		ret = check_stable_address_space(vma->vm_mm);
5205 		if (ret)
5206 			return ret;
5207 	}
5208 
5209 	if (pmd_none(*vmf->pmd)) {
5210 		if (PageTransCompound(page)) {
5211 			ret = do_set_pmd(vmf, page);
5212 			if (ret != VM_FAULT_FALLBACK)
5213 				return ret;
5214 		}
5215 
5216 		if (vmf->prealloc_pte)
5217 			pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte);
5218 		else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd)))
5219 			return VM_FAULT_OOM;
5220 	}
5221 
5222 	folio = page_folio(page);
5223 	nr_pages = folio_nr_pages(folio);
5224 
5225 	/*
5226 	 * Using per-page fault to maintain the uffd semantics, and same
5227 	 * approach also applies to non-anonymous-shmem faults to avoid
5228 	 * inflating the RSS of the process.
5229 	 */
5230 	if (!vma_is_anon_shmem(vma) || unlikely(userfaultfd_armed(vma)) ||
5231 	    unlikely(needs_fallback)) {
5232 		nr_pages = 1;
5233 	} else if (nr_pages > 1) {
5234 		pgoff_t idx = folio_page_idx(folio, page);
5235 		/* The page offset of vmf->address within the VMA. */
5236 		pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff;
5237 		/* The index of the entry in the pagetable for fault page. */
5238 		pgoff_t pte_off = pte_index(vmf->address);
5239 
5240 		/*
5241 		 * Fallback to per-page fault in case the folio size in page
5242 		 * cache beyond the VMA limits and PMD pagetable limits.
5243 		 */
5244 		if (unlikely(vma_off < idx ||
5245 			    vma_off + (nr_pages - idx) > vma_pages(vma) ||
5246 			    pte_off < idx ||
5247 			    pte_off + (nr_pages - idx)  > PTRS_PER_PTE)) {
5248 			nr_pages = 1;
5249 		} else {
5250 			/* Now we can set mappings for the whole large folio. */
5251 			addr = vmf->address - idx * PAGE_SIZE;
5252 			page = &folio->page;
5253 		}
5254 	}
5255 
5256 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
5257 				       addr, &vmf->ptl);
5258 	if (!vmf->pte)
5259 		return VM_FAULT_NOPAGE;
5260 
5261 	/* Re-check under ptl */
5262 	if (nr_pages == 1 && unlikely(vmf_pte_changed(vmf))) {
5263 		update_mmu_tlb(vma, addr, vmf->pte);
5264 		ret = VM_FAULT_NOPAGE;
5265 		goto unlock;
5266 	} else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) {
5267 		needs_fallback = true;
5268 		pte_unmap_unlock(vmf->pte, vmf->ptl);
5269 		goto fallback;
5270 	}
5271 
5272 	folio_ref_add(folio, nr_pages - 1);
5273 	set_pte_range(vmf, folio, page, nr_pages, addr);
5274 	type = is_cow ? MM_ANONPAGES : mm_counter_file(folio);
5275 	add_mm_counter(vma->vm_mm, type, nr_pages);
5276 	ret = 0;
5277 
5278 unlock:
5279 	pte_unmap_unlock(vmf->pte, vmf->ptl);
5280 	return ret;
5281 }
5282 
5283 static unsigned long fault_around_pages __read_mostly =
5284 	65536 >> PAGE_SHIFT;
5285 
5286 #ifdef CONFIG_DEBUG_FS
fault_around_bytes_get(void * data,u64 * val)5287 static int fault_around_bytes_get(void *data, u64 *val)
5288 {
5289 	*val = fault_around_pages << PAGE_SHIFT;
5290 	return 0;
5291 }
5292 
5293 /*
5294  * fault_around_bytes must be rounded down to the nearest page order as it's
5295  * what do_fault_around() expects to see.
5296  */
fault_around_bytes_set(void * data,u64 val)5297 static int fault_around_bytes_set(void *data, u64 val)
5298 {
5299 	if (val / PAGE_SIZE > PTRS_PER_PTE)
5300 		return -EINVAL;
5301 
5302 	/*
5303 	 * The minimum value is 1 page, however this results in no fault-around
5304 	 * at all. See should_fault_around().
5305 	 */
5306 	val = max(val, PAGE_SIZE);
5307 	fault_around_pages = rounddown_pow_of_two(val) >> PAGE_SHIFT;
5308 
5309 	return 0;
5310 }
5311 DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
5312 		fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
5313 
fault_around_debugfs(void)5314 static int __init fault_around_debugfs(void)
5315 {
5316 	debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
5317 				   &fault_around_bytes_fops);
5318 	return 0;
5319 }
5320 late_initcall(fault_around_debugfs);
5321 #endif
5322 
5323 /*
5324  * do_fault_around() tries to map few pages around the fault address. The hope
5325  * is that the pages will be needed soon and this will lower the number of
5326  * faults to handle.
5327  *
5328  * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
5329  * not ready to be mapped: not up-to-date, locked, etc.
5330  *
5331  * This function doesn't cross VMA or page table boundaries, in order to call
5332  * map_pages() and acquire a PTE lock only once.
5333  *
5334  * fault_around_pages defines how many pages we'll try to map.
5335  * do_fault_around() expects it to be set to a power of two less than or equal
5336  * to PTRS_PER_PTE.
5337  *
5338  * The virtual address of the area that we map is naturally aligned to
5339  * fault_around_pages * PAGE_SIZE rounded down to the machine page size
5340  * (and therefore to page order).  This way it's easier to guarantee
5341  * that we don't cross page table boundaries.
5342  */
do_fault_around(struct vm_fault * vmf)5343 static vm_fault_t do_fault_around(struct vm_fault *vmf)
5344 {
5345 	pgoff_t nr_pages = READ_ONCE(fault_around_pages);
5346 	pgoff_t pte_off = pte_index(vmf->address);
5347 	/* The page offset of vmf->address within the VMA. */
5348 	pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff;
5349 	pgoff_t from_pte, to_pte;
5350 	vm_fault_t ret;
5351 
5352 	/* The PTE offset of the start address, clamped to the VMA. */
5353 	from_pte = max(ALIGN_DOWN(pte_off, nr_pages),
5354 		       pte_off - min(pte_off, vma_off));
5355 
5356 	/* The PTE offset of the end address, clamped to the VMA and PTE. */
5357 	to_pte = min3(from_pte + nr_pages, (pgoff_t)PTRS_PER_PTE,
5358 		      pte_off + vma_pages(vmf->vma) - vma_off) - 1;
5359 
5360 	if (pmd_none(*vmf->pmd)) {
5361 		vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
5362 		if (!vmf->prealloc_pte)
5363 			return VM_FAULT_OOM;
5364 	}
5365 
5366 	rcu_read_lock();
5367 	ret = vmf->vma->vm_ops->map_pages(vmf,
5368 			vmf->pgoff + from_pte - pte_off,
5369 			vmf->pgoff + to_pte - pte_off);
5370 	rcu_read_unlock();
5371 
5372 	return ret;
5373 }
5374 
5375 /* Return true if we should do read fault-around, false otherwise */
should_fault_around(struct vm_fault * vmf)5376 static inline bool should_fault_around(struct vm_fault *vmf)
5377 {
5378 	/* No ->map_pages?  No way to fault around... */
5379 	if (!vmf->vma->vm_ops->map_pages)
5380 		return false;
5381 
5382 	if (uffd_disable_fault_around(vmf->vma))
5383 		return false;
5384 
5385 	/* A single page implies no faulting 'around' at all. */
5386 	return fault_around_pages > 1;
5387 }
5388 
do_read_fault(struct vm_fault * vmf)5389 static vm_fault_t do_read_fault(struct vm_fault *vmf)
5390 {
5391 	vm_fault_t ret = 0;
5392 	struct folio *folio;
5393 
5394 	/*
5395 	 * Let's call ->map_pages() first and use ->fault() as fallback
5396 	 * if page by the offset is not ready to be mapped (cold cache or
5397 	 * something).
5398 	 */
5399 	if (should_fault_around(vmf)) {
5400 		ret = do_fault_around(vmf);
5401 		if (ret)
5402 			return ret;
5403 	}
5404 
5405 	ret = vmf_can_call_fault(vmf);
5406 	if (ret)
5407 		return ret;
5408 
5409 	ret = __do_fault(vmf);
5410 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5411 		return ret;
5412 
5413 	ret |= finish_fault(vmf);
5414 	folio = page_folio(vmf->page);
5415 	folio_unlock(folio);
5416 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5417 		folio_put(folio);
5418 	return ret;
5419 }
5420 
do_cow_fault(struct vm_fault * vmf)5421 static vm_fault_t do_cow_fault(struct vm_fault *vmf)
5422 {
5423 	struct vm_area_struct *vma = vmf->vma;
5424 	struct folio *folio;
5425 	vm_fault_t ret;
5426 
5427 	ret = vmf_can_call_fault(vmf);
5428 	if (!ret)
5429 		ret = vmf_anon_prepare(vmf);
5430 	if (ret)
5431 		return ret;
5432 
5433 	folio = folio_prealloc(vma->vm_mm, vma, vmf->address, false);
5434 	if (!folio)
5435 		return VM_FAULT_OOM;
5436 
5437 	vmf->cow_page = &folio->page;
5438 
5439 	ret = __do_fault(vmf);
5440 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5441 		goto uncharge_out;
5442 	if (ret & VM_FAULT_DONE_COW)
5443 		return ret;
5444 
5445 	if (copy_mc_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma)) {
5446 		ret = VM_FAULT_HWPOISON;
5447 		goto unlock;
5448 	}
5449 	__folio_mark_uptodate(folio);
5450 
5451 	ret |= finish_fault(vmf);
5452 unlock:
5453 	unlock_page(vmf->page);
5454 	put_page(vmf->page);
5455 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5456 		goto uncharge_out;
5457 	return ret;
5458 uncharge_out:
5459 	folio_put(folio);
5460 	return ret;
5461 }
5462 
do_shared_fault(struct vm_fault * vmf)5463 static vm_fault_t do_shared_fault(struct vm_fault *vmf)
5464 {
5465 	struct vm_area_struct *vma = vmf->vma;
5466 	vm_fault_t ret, tmp;
5467 	struct folio *folio;
5468 
5469 	ret = vmf_can_call_fault(vmf);
5470 	if (ret)
5471 		return ret;
5472 
5473 	ret = __do_fault(vmf);
5474 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5475 		return ret;
5476 
5477 	folio = page_folio(vmf->page);
5478 
5479 	/*
5480 	 * Check if the backing address space wants to know that the page is
5481 	 * about to become writable
5482 	 */
5483 	if (vma->vm_ops->page_mkwrite) {
5484 		folio_unlock(folio);
5485 		tmp = do_page_mkwrite(vmf, folio);
5486 		if (unlikely(!tmp ||
5487 				(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
5488 			folio_put(folio);
5489 			return tmp;
5490 		}
5491 	}
5492 
5493 	ret |= finish_fault(vmf);
5494 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
5495 					VM_FAULT_RETRY))) {
5496 		folio_unlock(folio);
5497 		folio_put(folio);
5498 		return ret;
5499 	}
5500 
5501 	ret |= fault_dirty_shared_page(vmf);
5502 	return ret;
5503 }
5504 
5505 /*
5506  * We enter with non-exclusive mmap_lock (to exclude vma changes,
5507  * but allow concurrent faults).
5508  * The mmap_lock may have been released depending on flags and our
5509  * return value.  See filemap_fault() and __folio_lock_or_retry().
5510  * If mmap_lock is released, vma may become invalid (for example
5511  * by other thread calling munmap()).
5512  */
do_fault(struct vm_fault * vmf)5513 static vm_fault_t do_fault(struct vm_fault *vmf)
5514 {
5515 	struct vm_area_struct *vma = vmf->vma;
5516 	struct mm_struct *vm_mm = vma->vm_mm;
5517 	vm_fault_t ret;
5518 
5519 	/*
5520 	 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
5521 	 */
5522 	if (!vma->vm_ops->fault) {
5523 		vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
5524 					       vmf->address, &vmf->ptl);
5525 		if (unlikely(!vmf->pte))
5526 			ret = VM_FAULT_SIGBUS;
5527 		else {
5528 			/*
5529 			 * Make sure this is not a temporary clearing of pte
5530 			 * by holding ptl and checking again. A R/M/W update
5531 			 * of pte involves: take ptl, clearing the pte so that
5532 			 * we don't have concurrent modification by hardware
5533 			 * followed by an update.
5534 			 */
5535 			if (unlikely(pte_none(ptep_get(vmf->pte))))
5536 				ret = VM_FAULT_SIGBUS;
5537 			else
5538 				ret = VM_FAULT_NOPAGE;
5539 
5540 			pte_unmap_unlock(vmf->pte, vmf->ptl);
5541 		}
5542 	} else if (!(vmf->flags & FAULT_FLAG_WRITE))
5543 		ret = do_read_fault(vmf);
5544 	else if (!(vma->vm_flags & VM_SHARED))
5545 		ret = do_cow_fault(vmf);
5546 	else
5547 		ret = do_shared_fault(vmf);
5548 
5549 	/* preallocated pagetable is unused: free it */
5550 	if (vmf->prealloc_pte) {
5551 		pte_free(vm_mm, vmf->prealloc_pte);
5552 		vmf->prealloc_pte = NULL;
5553 	}
5554 	return ret;
5555 }
5556 
numa_migrate_check(struct folio * folio,struct vm_fault * vmf,unsigned long addr,int * flags,bool writable,int * last_cpupid)5557 int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
5558 		      unsigned long addr, int *flags,
5559 		      bool writable, int *last_cpupid)
5560 {
5561 	struct vm_area_struct *vma = vmf->vma;
5562 
5563 	/*
5564 	 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
5565 	 * much anyway since they can be in shared cache state. This misses
5566 	 * the case where a mapping is writable but the process never writes
5567 	 * to it but pte_write gets cleared during protection updates and
5568 	 * pte_dirty has unpredictable behaviour between PTE scan updates,
5569 	 * background writeback, dirty balancing and application behaviour.
5570 	 */
5571 	if (!writable)
5572 		*flags |= TNF_NO_GROUP;
5573 
5574 	/*
5575 	 * Flag if the folio is shared between multiple address spaces. This
5576 	 * is later used when determining whether to group tasks together
5577 	 */
5578 	if (folio_likely_mapped_shared(folio) && (vma->vm_flags & VM_SHARED))
5579 		*flags |= TNF_SHARED;
5580 	/*
5581 	 * For memory tiering mode, cpupid of slow memory page is used
5582 	 * to record page access time.  So use default value.
5583 	 */
5584 	if (folio_use_access_time(folio))
5585 		*last_cpupid = (-1 & LAST_CPUPID_MASK);
5586 	else
5587 		*last_cpupid = folio_last_cpupid(folio);
5588 
5589 	/* Record the current PID acceesing VMA */
5590 	vma_set_access_pid_bit(vma);
5591 
5592 	count_vm_numa_event(NUMA_HINT_FAULTS);
5593 #ifdef CONFIG_NUMA_BALANCING
5594 	count_memcg_folio_events(folio, NUMA_HINT_FAULTS, 1);
5595 #endif
5596 	if (folio_nid(folio) == numa_node_id()) {
5597 		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
5598 		*flags |= TNF_FAULT_LOCAL;
5599 	}
5600 
5601 	return mpol_misplaced(folio, vmf, addr);
5602 }
5603 
numa_rebuild_single_mapping(struct vm_fault * vmf,struct vm_area_struct * vma,unsigned long fault_addr,pte_t * fault_pte,bool writable)5604 static void numa_rebuild_single_mapping(struct vm_fault *vmf, struct vm_area_struct *vma,
5605 					unsigned long fault_addr, pte_t *fault_pte,
5606 					bool writable)
5607 {
5608 	pte_t pte, old_pte;
5609 
5610 	old_pte = ptep_modify_prot_start(vma, fault_addr, fault_pte);
5611 	pte = pte_modify(old_pte, vma->vm_page_prot);
5612 	pte = pte_mkyoung(pte);
5613 	if (writable)
5614 		pte = pte_mkwrite(pte, vma);
5615 	ptep_modify_prot_commit(vma, fault_addr, fault_pte, old_pte, pte);
5616 	update_mmu_cache_range(vmf, vma, fault_addr, fault_pte, 1);
5617 }
5618 
numa_rebuild_large_mapping(struct vm_fault * vmf,struct vm_area_struct * vma,struct folio * folio,pte_t fault_pte,bool ignore_writable,bool pte_write_upgrade)5619 static void numa_rebuild_large_mapping(struct vm_fault *vmf, struct vm_area_struct *vma,
5620 				       struct folio *folio, pte_t fault_pte,
5621 				       bool ignore_writable, bool pte_write_upgrade)
5622 {
5623 	int nr = pte_pfn(fault_pte) - folio_pfn(folio);
5624 	unsigned long start, end, addr = vmf->address;
5625 	unsigned long addr_start = addr - (nr << PAGE_SHIFT);
5626 	unsigned long pt_start = ALIGN_DOWN(addr, PMD_SIZE);
5627 	pte_t *start_ptep;
5628 
5629 	/* Stay within the VMA and within the page table. */
5630 	start = max3(addr_start, pt_start, vma->vm_start);
5631 	end = min3(addr_start + folio_size(folio), pt_start + PMD_SIZE,
5632 		   vma->vm_end);
5633 	start_ptep = vmf->pte - ((addr - start) >> PAGE_SHIFT);
5634 
5635 	/* Restore all PTEs' mapping of the large folio */
5636 	for (addr = start; addr != end; start_ptep++, addr += PAGE_SIZE) {
5637 		pte_t ptent = ptep_get(start_ptep);
5638 		bool writable = false;
5639 
5640 		if (!pte_present(ptent) || !pte_protnone(ptent))
5641 			continue;
5642 
5643 		if (pfn_folio(pte_pfn(ptent)) != folio)
5644 			continue;
5645 
5646 		if (!ignore_writable) {
5647 			ptent = pte_modify(ptent, vma->vm_page_prot);
5648 			writable = pte_write(ptent);
5649 			if (!writable && pte_write_upgrade &&
5650 			    can_change_pte_writable(vma, addr, ptent))
5651 				writable = true;
5652 		}
5653 
5654 		numa_rebuild_single_mapping(vmf, vma, addr, start_ptep, writable);
5655 	}
5656 }
5657 
do_numa_page(struct vm_fault * vmf)5658 static vm_fault_t do_numa_page(struct vm_fault *vmf)
5659 {
5660 	struct vm_area_struct *vma = vmf->vma;
5661 	struct folio *folio = NULL;
5662 	int nid = NUMA_NO_NODE;
5663 	bool writable = false, ignore_writable = false;
5664 	bool pte_write_upgrade = vma_wants_manual_pte_write_upgrade(vma);
5665 	int last_cpupid;
5666 	int target_nid;
5667 	pte_t pte, old_pte;
5668 	int flags = 0, nr_pages;
5669 
5670 	/*
5671 	 * The pte cannot be used safely until we verify, while holding the page
5672 	 * table lock, that its contents have not changed during fault handling.
5673 	 */
5674 	spin_lock(vmf->ptl);
5675 	/* Read the live PTE from the page tables: */
5676 	old_pte = ptep_get(vmf->pte);
5677 
5678 	if (unlikely(!pte_same(old_pte, vmf->orig_pte))) {
5679 		pte_unmap_unlock(vmf->pte, vmf->ptl);
5680 		return 0;
5681 	}
5682 
5683 	pte = pte_modify(old_pte, vma->vm_page_prot);
5684 
5685 	/*
5686 	 * Detect now whether the PTE could be writable; this information
5687 	 * is only valid while holding the PT lock.
5688 	 */
5689 	writable = pte_write(pte);
5690 	if (!writable && pte_write_upgrade &&
5691 	    can_change_pte_writable(vma, vmf->address, pte))
5692 		writable = true;
5693 
5694 	folio = vm_normal_folio(vma, vmf->address, pte);
5695 	if (!folio || folio_is_zone_device(folio))
5696 		goto out_map;
5697 
5698 	nid = folio_nid(folio);
5699 	nr_pages = folio_nr_pages(folio);
5700 
5701 	target_nid = numa_migrate_check(folio, vmf, vmf->address, &flags,
5702 					writable, &last_cpupid);
5703 	if (target_nid == NUMA_NO_NODE)
5704 		goto out_map;
5705 	if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
5706 		flags |= TNF_MIGRATE_FAIL;
5707 		goto out_map;
5708 	}
5709 	/* The folio is isolated and isolation code holds a folio reference. */
5710 	pte_unmap_unlock(vmf->pte, vmf->ptl);
5711 	writable = false;
5712 	ignore_writable = true;
5713 
5714 	/* Migrate to the requested node */
5715 	if (!migrate_misplaced_folio(folio, target_nid)) {
5716 		nid = target_nid;
5717 		flags |= TNF_MIGRATED;
5718 		task_numa_fault(last_cpupid, nid, nr_pages, flags);
5719 		return 0;
5720 	}
5721 
5722 	flags |= TNF_MIGRATE_FAIL;
5723 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
5724 				       vmf->address, &vmf->ptl);
5725 	if (unlikely(!vmf->pte))
5726 		return 0;
5727 	if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
5728 		pte_unmap_unlock(vmf->pte, vmf->ptl);
5729 		return 0;
5730 	}
5731 out_map:
5732 	/*
5733 	 * Make it present again, depending on how arch implements
5734 	 * non-accessible ptes, some can allow access by kernel mode.
5735 	 */
5736 	if (folio && folio_test_large(folio))
5737 		numa_rebuild_large_mapping(vmf, vma, folio, pte, ignore_writable,
5738 					   pte_write_upgrade);
5739 	else
5740 		numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte,
5741 					    writable);
5742 	pte_unmap_unlock(vmf->pte, vmf->ptl);
5743 
5744 	if (nid != NUMA_NO_NODE)
5745 		task_numa_fault(last_cpupid, nid, nr_pages, flags);
5746 	return 0;
5747 }
5748 
create_huge_pmd(struct vm_fault * vmf)5749 static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
5750 {
5751 	struct vm_area_struct *vma = vmf->vma;
5752 	if (vma_is_anonymous(vma))
5753 		return do_huge_pmd_anonymous_page(vmf);
5754 	if (vma->vm_ops->huge_fault)
5755 		return vma->vm_ops->huge_fault(vmf, PMD_ORDER);
5756 	return VM_FAULT_FALLBACK;
5757 }
5758 
5759 /* `inline' is required to avoid gcc 4.1.2 build error */
wp_huge_pmd(struct vm_fault * vmf)5760 static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf)
5761 {
5762 	struct vm_area_struct *vma = vmf->vma;
5763 	const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
5764 	vm_fault_t ret;
5765 
5766 	if (vma_is_anonymous(vma)) {
5767 		if (likely(!unshare) &&
5768 		    userfaultfd_huge_pmd_wp(vma, vmf->orig_pmd)) {
5769 			if (userfaultfd_wp_async(vmf->vma))
5770 				goto split;
5771 			return handle_userfault(vmf, VM_UFFD_WP);
5772 		}
5773 		return do_huge_pmd_wp_page(vmf);
5774 	}
5775 
5776 	if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
5777 		if (vma->vm_ops->huge_fault) {
5778 			ret = vma->vm_ops->huge_fault(vmf, PMD_ORDER);
5779 			if (!(ret & VM_FAULT_FALLBACK))
5780 				return ret;
5781 		}
5782 	}
5783 
5784 split:
5785 	/* COW or write-notify handled on pte level: split pmd. */
5786 	__split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
5787 
5788 	return VM_FAULT_FALLBACK;
5789 }
5790 
create_huge_pud(struct vm_fault * vmf)5791 static vm_fault_t create_huge_pud(struct vm_fault *vmf)
5792 {
5793 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&			\
5794 	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
5795 	struct vm_area_struct *vma = vmf->vma;
5796 	/* No support for anonymous transparent PUD pages yet */
5797 	if (vma_is_anonymous(vma))
5798 		return VM_FAULT_FALLBACK;
5799 	if (vma->vm_ops->huge_fault)
5800 		return vma->vm_ops->huge_fault(vmf, PUD_ORDER);
5801 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
5802 	return VM_FAULT_FALLBACK;
5803 }
5804 
wp_huge_pud(struct vm_fault * vmf,pud_t orig_pud)5805 static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
5806 {
5807 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&			\
5808 	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
5809 	struct vm_area_struct *vma = vmf->vma;
5810 	vm_fault_t ret;
5811 
5812 	/* No support for anonymous transparent PUD pages yet */
5813 	if (vma_is_anonymous(vma))
5814 		goto split;
5815 	if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
5816 		if (vma->vm_ops->huge_fault) {
5817 			ret = vma->vm_ops->huge_fault(vmf, PUD_ORDER);
5818 			if (!(ret & VM_FAULT_FALLBACK))
5819 				return ret;
5820 		}
5821 	}
5822 split:
5823 	/* COW or write-notify not handled on PUD level: split pud.*/
5824 	__split_huge_pud(vma, vmf->pud, vmf->address);
5825 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
5826 	return VM_FAULT_FALLBACK;
5827 }
5828 
5829 /*
5830  * These routines also need to handle stuff like marking pages dirty
5831  * and/or accessed for architectures that don't do it in hardware (most
5832  * RISC architectures).  The early dirtying is also good on the i386.
5833  *
5834  * There is also a hook called "update_mmu_cache()" that architectures
5835  * with external mmu caches can use to update those (ie the Sparc or
5836  * PowerPC hashed page tables that act as extended TLBs).
5837  *
5838  * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow
5839  * concurrent faults).
5840  *
5841  * The mmap_lock may have been released depending on flags and our return value.
5842  * See filemap_fault() and __folio_lock_or_retry().
5843  */
handle_pte_fault(struct vm_fault * vmf)5844 static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
5845 {
5846 	pte_t entry;
5847 
5848 	if (unlikely(pmd_none(*vmf->pmd))) {
5849 		/*
5850 		 * Leave __pte_alloc() until later: because vm_ops->fault may
5851 		 * want to allocate huge page, and if we expose page table
5852 		 * for an instant, it will be difficult to retract from
5853 		 * concurrent faults and from rmap lookups.
5854 		 */
5855 		vmf->pte = NULL;
5856 		vmf->flags &= ~FAULT_FLAG_ORIG_PTE_VALID;
5857 	} else {
5858 		pmd_t dummy_pmdval;
5859 
5860 		/*
5861 		 * A regular pmd is established and it can't morph into a huge
5862 		 * pmd by anon khugepaged, since that takes mmap_lock in write
5863 		 * mode; but shmem or file collapse to THP could still morph
5864 		 * it into a huge pmd: just retry later if so.
5865 		 *
5866 		 * Use the maywrite version to indicate that vmf->pte may be
5867 		 * modified, but since we will use pte_same() to detect the
5868 		 * change of the !pte_none() entry, there is no need to recheck
5869 		 * the pmdval. Here we chooes to pass a dummy variable instead
5870 		 * of NULL, which helps new user think about why this place is
5871 		 * special.
5872 		 */
5873 		vmf->pte = pte_offset_map_rw_nolock(vmf->vma->vm_mm, vmf->pmd,
5874 						    vmf->address, &dummy_pmdval,
5875 						    &vmf->ptl);
5876 		if (unlikely(!vmf->pte))
5877 			return 0;
5878 		vmf->orig_pte = ptep_get_lockless(vmf->pte);
5879 		vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID;
5880 
5881 		if (pte_none(vmf->orig_pte)) {
5882 			pte_unmap(vmf->pte);
5883 			vmf->pte = NULL;
5884 		}
5885 	}
5886 
5887 	if (!vmf->pte)
5888 		return do_pte_missing(vmf);
5889 
5890 	if (!pte_present(vmf->orig_pte))
5891 		return do_swap_page(vmf);
5892 
5893 	if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
5894 		return do_numa_page(vmf);
5895 
5896 	spin_lock(vmf->ptl);
5897 	entry = vmf->orig_pte;
5898 	if (unlikely(!pte_same(ptep_get(vmf->pte), entry))) {
5899 		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
5900 		goto unlock;
5901 	}
5902 	if (vmf->flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
5903 		if (!pte_write(entry))
5904 			return do_wp_page(vmf);
5905 		else if (likely(vmf->flags & FAULT_FLAG_WRITE))
5906 			entry = pte_mkdirty(entry);
5907 	}
5908 	entry = pte_mkyoung(entry);
5909 	if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
5910 				vmf->flags & FAULT_FLAG_WRITE)) {
5911 		update_mmu_cache_range(vmf, vmf->vma, vmf->address,
5912 				vmf->pte, 1);
5913 	} else {
5914 		/* Skip spurious TLB flush for retried page fault */
5915 		if (vmf->flags & FAULT_FLAG_TRIED)
5916 			goto unlock;
5917 		/*
5918 		 * This is needed only for protection faults but the arch code
5919 		 * is not yet telling us if this is a protection fault or not.
5920 		 * This still avoids useless tlb flushes for .text page faults
5921 		 * with threads.
5922 		 */
5923 		if (vmf->flags & FAULT_FLAG_WRITE)
5924 			flush_tlb_fix_spurious_fault(vmf->vma, vmf->address,
5925 						     vmf->pte);
5926 	}
5927 unlock:
5928 	pte_unmap_unlock(vmf->pte, vmf->ptl);
5929 	return 0;
5930 }
5931 
5932 /*
5933  * On entry, we hold either the VMA lock or the mmap_lock
5934  * (FAULT_FLAG_VMA_LOCK tells you which).  If VM_FAULT_RETRY is set in
5935  * the result, the mmap_lock is not held on exit.  See filemap_fault()
5936  * and __folio_lock_or_retry().
5937  */
__handle_mm_fault(struct vm_area_struct * vma,unsigned long address,unsigned int flags)5938 static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
5939 		unsigned long address, unsigned int flags)
5940 {
5941 	struct vm_fault vmf = {
5942 		.vma = vma,
5943 		.address = address & PAGE_MASK,
5944 		.real_address = address,
5945 		.flags = flags,
5946 		.pgoff = linear_page_index(vma, address),
5947 		.gfp_mask = __get_fault_gfp_mask(vma),
5948 	};
5949 	struct mm_struct *mm = vma->vm_mm;
5950 	unsigned long vm_flags = vma->vm_flags;
5951 	pgd_t *pgd;
5952 	p4d_t *p4d;
5953 	vm_fault_t ret;
5954 
5955 	pgd = pgd_offset(mm, address);
5956 	p4d = p4d_alloc(mm, pgd, address);
5957 	if (!p4d)
5958 		return VM_FAULT_OOM;
5959 
5960 	vmf.pud = pud_alloc(mm, p4d, address);
5961 	if (!vmf.pud)
5962 		return VM_FAULT_OOM;
5963 retry_pud:
5964 	if (pud_none(*vmf.pud) &&
5965 	    thp_vma_allowable_order(vma, vm_flags,
5966 				TVA_IN_PF | TVA_ENFORCE_SYSFS, PUD_ORDER)) {
5967 		ret = create_huge_pud(&vmf);
5968 		if (!(ret & VM_FAULT_FALLBACK))
5969 			return ret;
5970 	} else {
5971 		pud_t orig_pud = *vmf.pud;
5972 
5973 		barrier();
5974 		if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
5975 
5976 			/*
5977 			 * TODO once we support anonymous PUDs: NUMA case and
5978 			 * FAULT_FLAG_UNSHARE handling.
5979 			 */
5980 			if ((flags & FAULT_FLAG_WRITE) && !pud_write(orig_pud)) {
5981 				ret = wp_huge_pud(&vmf, orig_pud);
5982 				if (!(ret & VM_FAULT_FALLBACK))
5983 					return ret;
5984 			} else {
5985 				huge_pud_set_accessed(&vmf, orig_pud);
5986 				return 0;
5987 			}
5988 		}
5989 	}
5990 
5991 	vmf.pmd = pmd_alloc(mm, vmf.pud, address);
5992 	if (!vmf.pmd)
5993 		return VM_FAULT_OOM;
5994 
5995 	/* Huge pud page fault raced with pmd_alloc? */
5996 	if (pud_trans_unstable(vmf.pud))
5997 		goto retry_pud;
5998 
5999 	if (pmd_none(*vmf.pmd) &&
6000 	    thp_vma_allowable_order(vma, vm_flags,
6001 				TVA_IN_PF | TVA_ENFORCE_SYSFS, PMD_ORDER)) {
6002 		ret = create_huge_pmd(&vmf);
6003 		if (!(ret & VM_FAULT_FALLBACK))
6004 			return ret;
6005 	} else {
6006 		vmf.orig_pmd = pmdp_get_lockless(vmf.pmd);
6007 
6008 		if (unlikely(is_swap_pmd(vmf.orig_pmd))) {
6009 			VM_BUG_ON(thp_migration_supported() &&
6010 					  !is_pmd_migration_entry(vmf.orig_pmd));
6011 			if (is_pmd_migration_entry(vmf.orig_pmd))
6012 				pmd_migration_entry_wait(mm, vmf.pmd);
6013 			return 0;
6014 		}
6015 		if (pmd_trans_huge(vmf.orig_pmd) || pmd_devmap(vmf.orig_pmd)) {
6016 			if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma))
6017 				return do_huge_pmd_numa_page(&vmf);
6018 
6019 			if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
6020 			    !pmd_write(vmf.orig_pmd)) {
6021 				ret = wp_huge_pmd(&vmf);
6022 				if (!(ret & VM_FAULT_FALLBACK))
6023 					return ret;
6024 			} else {
6025 				huge_pmd_set_accessed(&vmf);
6026 				return 0;
6027 			}
6028 		}
6029 	}
6030 
6031 	return handle_pte_fault(&vmf);
6032 }
6033 
6034 /**
6035  * mm_account_fault - Do page fault accounting
6036  * @mm: mm from which memcg should be extracted. It can be NULL.
6037  * @regs: the pt_regs struct pointer.  When set to NULL, will skip accounting
6038  *        of perf event counters, but we'll still do the per-task accounting to
6039  *        the task who triggered this page fault.
6040  * @address: the faulted address.
6041  * @flags: the fault flags.
6042  * @ret: the fault retcode.
6043  *
6044  * This will take care of most of the page fault accounting.  Meanwhile, it
6045  * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter
6046  * updates.  However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should
6047  * still be in per-arch page fault handlers at the entry of page fault.
6048  */
mm_account_fault(struct mm_struct * mm,struct pt_regs * regs,unsigned long address,unsigned int flags,vm_fault_t ret)6049 static inline void mm_account_fault(struct mm_struct *mm, struct pt_regs *regs,
6050 				    unsigned long address, unsigned int flags,
6051 				    vm_fault_t ret)
6052 {
6053 	bool major;
6054 
6055 	/* Incomplete faults will be accounted upon completion. */
6056 	if (ret & VM_FAULT_RETRY)
6057 		return;
6058 
6059 	/*
6060 	 * To preserve the behavior of older kernels, PGFAULT counters record
6061 	 * both successful and failed faults, as opposed to perf counters,
6062 	 * which ignore failed cases.
6063 	 */
6064 	count_vm_event(PGFAULT);
6065 	count_memcg_event_mm(mm, PGFAULT);
6066 
6067 	/*
6068 	 * Do not account for unsuccessful faults (e.g. when the address wasn't
6069 	 * valid).  That includes arch_vma_access_permitted() failing before
6070 	 * reaching here. So this is not a "this many hardware page faults"
6071 	 * counter.  We should use the hw profiling for that.
6072 	 */
6073 	if (ret & VM_FAULT_ERROR)
6074 		return;
6075 
6076 	/*
6077 	 * We define the fault as a major fault when the final successful fault
6078 	 * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't
6079 	 * handle it immediately previously).
6080 	 */
6081 	major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED);
6082 
6083 	if (major)
6084 		current->maj_flt++;
6085 	else
6086 		current->min_flt++;
6087 
6088 	/*
6089 	 * If the fault is done for GUP, regs will be NULL.  We only do the
6090 	 * accounting for the per thread fault counters who triggered the
6091 	 * fault, and we skip the perf event updates.
6092 	 */
6093 	if (!regs)
6094 		return;
6095 
6096 	if (major)
6097 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
6098 	else
6099 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
6100 }
6101 
6102 #ifdef CONFIG_LRU_GEN
lru_gen_enter_fault(struct vm_area_struct * vma)6103 static void lru_gen_enter_fault(struct vm_area_struct *vma)
6104 {
6105 	/* the LRU algorithm only applies to accesses with recency */
6106 	current->in_lru_fault = vma_has_recency(vma);
6107 }
6108 
lru_gen_exit_fault(void)6109 static void lru_gen_exit_fault(void)
6110 {
6111 	current->in_lru_fault = false;
6112 }
6113 #else
lru_gen_enter_fault(struct vm_area_struct * vma)6114 static void lru_gen_enter_fault(struct vm_area_struct *vma)
6115 {
6116 }
6117 
lru_gen_exit_fault(void)6118 static void lru_gen_exit_fault(void)
6119 {
6120 }
6121 #endif /* CONFIG_LRU_GEN */
6122 
sanitize_fault_flags(struct vm_area_struct * vma,unsigned int * flags)6123 static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma,
6124 				       unsigned int *flags)
6125 {
6126 	if (unlikely(*flags & FAULT_FLAG_UNSHARE)) {
6127 		if (WARN_ON_ONCE(*flags & FAULT_FLAG_WRITE))
6128 			return VM_FAULT_SIGSEGV;
6129 		/*
6130 		 * FAULT_FLAG_UNSHARE only applies to COW mappings. Let's
6131 		 * just treat it like an ordinary read-fault otherwise.
6132 		 */
6133 		if (!is_cow_mapping(vma->vm_flags))
6134 			*flags &= ~FAULT_FLAG_UNSHARE;
6135 	} else if (*flags & FAULT_FLAG_WRITE) {
6136 		/* Write faults on read-only mappings are impossible ... */
6137 		if (WARN_ON_ONCE(!(vma->vm_flags & VM_MAYWRITE)))
6138 			return VM_FAULT_SIGSEGV;
6139 		/* ... and FOLL_FORCE only applies to COW mappings. */
6140 		if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE) &&
6141 				 !is_cow_mapping(vma->vm_flags)))
6142 			return VM_FAULT_SIGSEGV;
6143 	}
6144 #ifdef CONFIG_PER_VMA_LOCK
6145 	/*
6146 	 * Per-VMA locks can't be used with FAULT_FLAG_RETRY_NOWAIT because of
6147 	 * the assumption that lock is dropped on VM_FAULT_RETRY.
6148 	 */
6149 	if (WARN_ON_ONCE((*flags &
6150 			(FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT)) ==
6151 			(FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT)))
6152 		return VM_FAULT_SIGSEGV;
6153 #endif
6154 
6155 	return 0;
6156 }
6157 
6158 /*
6159  * By the time we get here, we already hold either the VMA lock or the
6160  * mmap_lock (FAULT_FLAG_VMA_LOCK tells you which).
6161  *
6162  * The mmap_lock may have been released depending on flags and our
6163  * return value.  See filemap_fault() and __folio_lock_or_retry().
6164  */
handle_mm_fault(struct vm_area_struct * vma,unsigned long address,unsigned int flags,struct pt_regs * regs)6165 vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
6166 			   unsigned int flags, struct pt_regs *regs)
6167 {
6168 	/* If the fault handler drops the mmap_lock, vma may be freed */
6169 	struct mm_struct *mm = vma->vm_mm;
6170 	vm_fault_t ret;
6171 	bool is_droppable;
6172 
6173 	__set_current_state(TASK_RUNNING);
6174 
6175 	ret = sanitize_fault_flags(vma, &flags);
6176 	if (ret)
6177 		goto out;
6178 
6179 	if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
6180 					    flags & FAULT_FLAG_INSTRUCTION,
6181 					    flags & FAULT_FLAG_REMOTE)) {
6182 		ret = VM_FAULT_SIGSEGV;
6183 		goto out;
6184 	}
6185 
6186 	is_droppable = !!(vma->vm_flags & VM_DROPPABLE);
6187 
6188 	/*
6189 	 * Enable the memcg OOM handling for faults triggered in user
6190 	 * space.  Kernel faults are handled more gracefully.
6191 	 */
6192 	if (flags & FAULT_FLAG_USER)
6193 		mem_cgroup_enter_user_fault();
6194 
6195 	lru_gen_enter_fault(vma);
6196 
6197 	if (unlikely(is_vm_hugetlb_page(vma)))
6198 		ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
6199 	else
6200 		ret = __handle_mm_fault(vma, address, flags);
6201 
6202 	/*
6203 	 * Warning: It is no longer safe to dereference vma-> after this point,
6204 	 * because mmap_lock might have been dropped by __handle_mm_fault(), so
6205 	 * vma might be destroyed from underneath us.
6206 	 */
6207 
6208 	lru_gen_exit_fault();
6209 
6210 	/* If the mapping is droppable, then errors due to OOM aren't fatal. */
6211 	if (is_droppable)
6212 		ret &= ~VM_FAULT_OOM;
6213 
6214 	if (flags & FAULT_FLAG_USER) {
6215 		mem_cgroup_exit_user_fault();
6216 		/*
6217 		 * The task may have entered a memcg OOM situation but
6218 		 * if the allocation error was handled gracefully (no
6219 		 * VM_FAULT_OOM), there is no need to kill anything.
6220 		 * Just clean up the OOM state peacefully.
6221 		 */
6222 		if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
6223 			mem_cgroup_oom_synchronize(false);
6224 	}
6225 out:
6226 	mm_account_fault(mm, regs, address, flags, ret);
6227 
6228 	return ret;
6229 }
6230 EXPORT_SYMBOL_GPL(handle_mm_fault);
6231 
6232 #ifdef CONFIG_LOCK_MM_AND_FIND_VMA
6233 #include <linux/extable.h>
6234 
get_mmap_lock_carefully(struct mm_struct * mm,struct pt_regs * regs)6235 static inline bool get_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs)
6236 {
6237 	if (likely(mmap_read_trylock(mm)))
6238 		return true;
6239 
6240 	if (regs && !user_mode(regs)) {
6241 		unsigned long ip = exception_ip(regs);
6242 		if (!search_exception_tables(ip))
6243 			return false;
6244 	}
6245 
6246 	return !mmap_read_lock_killable(mm);
6247 }
6248 
mmap_upgrade_trylock(struct mm_struct * mm)6249 static inline bool mmap_upgrade_trylock(struct mm_struct *mm)
6250 {
6251 	/*
6252 	 * We don't have this operation yet.
6253 	 *
6254 	 * It should be easy enough to do: it's basically a
6255 	 *    atomic_long_try_cmpxchg_acquire()
6256 	 * from RWSEM_READER_BIAS -> RWSEM_WRITER_LOCKED, but
6257 	 * it also needs the proper lockdep magic etc.
6258 	 */
6259 	return false;
6260 }
6261 
upgrade_mmap_lock_carefully(struct mm_struct * mm,struct pt_regs * regs)6262 static inline bool upgrade_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs)
6263 {
6264 	mmap_read_unlock(mm);
6265 	if (regs && !user_mode(regs)) {
6266 		unsigned long ip = exception_ip(regs);
6267 		if (!search_exception_tables(ip))
6268 			return false;
6269 	}
6270 	return !mmap_write_lock_killable(mm);
6271 }
6272 
6273 /*
6274  * Helper for page fault handling.
6275  *
6276  * This is kind of equivalent to "mmap_read_lock()" followed
6277  * by "find_extend_vma()", except it's a lot more careful about
6278  * the locking (and will drop the lock on failure).
6279  *
6280  * For example, if we have a kernel bug that causes a page
6281  * fault, we don't want to just use mmap_read_lock() to get
6282  * the mm lock, because that would deadlock if the bug were
6283  * to happen while we're holding the mm lock for writing.
6284  *
6285  * So this checks the exception tables on kernel faults in
6286  * order to only do this all for instructions that are actually
6287  * expected to fault.
6288  *
6289  * We can also actually take the mm lock for writing if we
6290  * need to extend the vma, which helps the VM layer a lot.
6291  */
lock_mm_and_find_vma(struct mm_struct * mm,unsigned long addr,struct pt_regs * regs)6292 struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
6293 			unsigned long addr, struct pt_regs *regs)
6294 {
6295 	struct vm_area_struct *vma;
6296 
6297 	if (!get_mmap_lock_carefully(mm, regs))
6298 		return NULL;
6299 
6300 	vma = find_vma(mm, addr);
6301 	if (likely(vma && (vma->vm_start <= addr)))
6302 		return vma;
6303 
6304 	/*
6305 	 * Well, dang. We might still be successful, but only
6306 	 * if we can extend a vma to do so.
6307 	 */
6308 	if (!vma || !(vma->vm_flags & VM_GROWSDOWN)) {
6309 		mmap_read_unlock(mm);
6310 		return NULL;
6311 	}
6312 
6313 	/*
6314 	 * We can try to upgrade the mmap lock atomically,
6315 	 * in which case we can continue to use the vma
6316 	 * we already looked up.
6317 	 *
6318 	 * Otherwise we'll have to drop the mmap lock and
6319 	 * re-take it, and also look up the vma again,
6320 	 * re-checking it.
6321 	 */
6322 	if (!mmap_upgrade_trylock(mm)) {
6323 		if (!upgrade_mmap_lock_carefully(mm, regs))
6324 			return NULL;
6325 
6326 		vma = find_vma(mm, addr);
6327 		if (!vma)
6328 			goto fail;
6329 		if (vma->vm_start <= addr)
6330 			goto success;
6331 		if (!(vma->vm_flags & VM_GROWSDOWN))
6332 			goto fail;
6333 	}
6334 
6335 	if (expand_stack_locked(vma, addr))
6336 		goto fail;
6337 
6338 success:
6339 	mmap_write_downgrade(mm);
6340 	return vma;
6341 
6342 fail:
6343 	mmap_write_unlock(mm);
6344 	return NULL;
6345 }
6346 #endif
6347 
6348 #ifdef CONFIG_PER_VMA_LOCK
6349 /*
6350  * Lookup and lock a VMA under RCU protection. Returned VMA is guaranteed to be
6351  * stable and not isolated. If the VMA is not found or is being modified the
6352  * function returns NULL.
6353  */
lock_vma_under_rcu(struct mm_struct * mm,unsigned long address)6354 struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
6355 					  unsigned long address)
6356 {
6357 	MA_STATE(mas, &mm->mm_mt, address, address);
6358 	struct vm_area_struct *vma;
6359 
6360 	rcu_read_lock();
6361 retry:
6362 	vma = mas_walk(&mas);
6363 	if (!vma)
6364 		goto inval;
6365 
6366 	if (!vma_start_read(vma))
6367 		goto inval;
6368 
6369 	/* Check if the VMA got isolated after we found it */
6370 	if (vma->detached) {
6371 		vma_end_read(vma);
6372 		count_vm_vma_lock_event(VMA_LOCK_MISS);
6373 		/* The area was replaced with another one */
6374 		goto retry;
6375 	}
6376 	/*
6377 	 * At this point, we have a stable reference to a VMA: The VMA is
6378 	 * locked and we know it hasn't already been isolated.
6379 	 * From here on, we can access the VMA without worrying about which
6380 	 * fields are accessible for RCU readers.
6381 	 */
6382 
6383 	/* Check since vm_start/vm_end might change before we lock the VMA */
6384 	if (unlikely(address < vma->vm_start || address >= vma->vm_end))
6385 		goto inval_end_read;
6386 
6387 	rcu_read_unlock();
6388 	return vma;
6389 
6390 inval_end_read:
6391 	vma_end_read(vma);
6392 inval:
6393 	rcu_read_unlock();
6394 	count_vm_vma_lock_event(VMA_LOCK_ABORT);
6395 	return NULL;
6396 }
6397 #endif /* CONFIG_PER_VMA_LOCK */
6398 
6399 #ifndef __PAGETABLE_P4D_FOLDED
6400 /*
6401  * Allocate p4d page table.
6402  * We've already handled the fast-path in-line.
6403  */
__p4d_alloc(struct mm_struct * mm,pgd_t * pgd,unsigned long address)6404 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
6405 {
6406 	p4d_t *new = p4d_alloc_one(mm, address);
6407 	if (!new)
6408 		return -ENOMEM;
6409 
6410 	spin_lock(&mm->page_table_lock);
6411 	if (pgd_present(*pgd)) {	/* Another has populated it */
6412 		p4d_free(mm, new);
6413 	} else {
6414 		smp_wmb(); /* See comment in pmd_install() */
6415 		pgd_populate(mm, pgd, new);
6416 	}
6417 	spin_unlock(&mm->page_table_lock);
6418 	return 0;
6419 }
6420 #endif /* __PAGETABLE_P4D_FOLDED */
6421 
6422 #ifndef __PAGETABLE_PUD_FOLDED
6423 /*
6424  * Allocate page upper directory.
6425  * We've already handled the fast-path in-line.
6426  */
__pud_alloc(struct mm_struct * mm,p4d_t * p4d,unsigned long address)6427 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
6428 {
6429 	pud_t *new = pud_alloc_one(mm, address);
6430 	if (!new)
6431 		return -ENOMEM;
6432 
6433 	spin_lock(&mm->page_table_lock);
6434 	if (!p4d_present(*p4d)) {
6435 		mm_inc_nr_puds(mm);
6436 		smp_wmb(); /* See comment in pmd_install() */
6437 		p4d_populate(mm, p4d, new);
6438 	} else	/* Another has populated it */
6439 		pud_free(mm, new);
6440 	spin_unlock(&mm->page_table_lock);
6441 	return 0;
6442 }
6443 #endif /* __PAGETABLE_PUD_FOLDED */
6444 
6445 #ifndef __PAGETABLE_PMD_FOLDED
6446 /*
6447  * Allocate page middle directory.
6448  * We've already handled the fast-path in-line.
6449  */
__pmd_alloc(struct mm_struct * mm,pud_t * pud,unsigned long address)6450 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
6451 {
6452 	spinlock_t *ptl;
6453 	pmd_t *new = pmd_alloc_one(mm, address);
6454 	if (!new)
6455 		return -ENOMEM;
6456 
6457 	ptl = pud_lock(mm, pud);
6458 	if (!pud_present(*pud)) {
6459 		mm_inc_nr_pmds(mm);
6460 		smp_wmb(); /* See comment in pmd_install() */
6461 		pud_populate(mm, pud, new);
6462 	} else {	/* Another has populated it */
6463 		pmd_free(mm, new);
6464 	}
6465 	spin_unlock(ptl);
6466 	return 0;
6467 }
6468 #endif /* __PAGETABLE_PMD_FOLDED */
6469 
pfnmap_args_setup(struct follow_pfnmap_args * args,spinlock_t * lock,pte_t * ptep,pgprot_t pgprot,unsigned long pfn_base,unsigned long addr_mask,bool writable,bool special)6470 static inline void pfnmap_args_setup(struct follow_pfnmap_args *args,
6471 				     spinlock_t *lock, pte_t *ptep,
6472 				     pgprot_t pgprot, unsigned long pfn_base,
6473 				     unsigned long addr_mask, bool writable,
6474 				     bool special)
6475 {
6476 	args->lock = lock;
6477 	args->ptep = ptep;
6478 	args->pfn = pfn_base + ((args->address & ~addr_mask) >> PAGE_SHIFT);
6479 	args->pgprot = pgprot;
6480 	args->writable = writable;
6481 	args->special = special;
6482 }
6483 
pfnmap_lockdep_assert(struct vm_area_struct * vma)6484 static inline void pfnmap_lockdep_assert(struct vm_area_struct *vma)
6485 {
6486 #ifdef CONFIG_LOCKDEP
6487 	struct file *file = vma->vm_file;
6488 	struct address_space *mapping = file ? file->f_mapping : NULL;
6489 
6490 	if (mapping)
6491 		lockdep_assert(lockdep_is_held(&mapping->i_mmap_rwsem) ||
6492 			       lockdep_is_held(&vma->vm_mm->mmap_lock));
6493 	else
6494 		lockdep_assert(lockdep_is_held(&vma->vm_mm->mmap_lock));
6495 #endif
6496 }
6497 
6498 /**
6499  * follow_pfnmap_start() - Look up a pfn mapping at a user virtual address
6500  * @args: Pointer to struct @follow_pfnmap_args
6501  *
6502  * The caller needs to setup args->vma and args->address to point to the
6503  * virtual address as the target of such lookup.  On a successful return,
6504  * the results will be put into other output fields.
6505  *
6506  * After the caller finished using the fields, the caller must invoke
6507  * another follow_pfnmap_end() to proper releases the locks and resources
6508  * of such look up request.
6509  *
6510  * During the start() and end() calls, the results in @args will be valid
6511  * as proper locks will be held.  After the end() is called, all the fields
6512  * in @follow_pfnmap_args will be invalid to be further accessed.  Further
6513  * use of such information after end() may require proper synchronizations
6514  * by the caller with page table updates, otherwise it can create a
6515  * security bug.
6516  *
6517  * If the PTE maps a refcounted page, callers are responsible to protect
6518  * against invalidation with MMU notifiers; otherwise access to the PFN at
6519  * a later point in time can trigger use-after-free.
6520  *
6521  * Only IO mappings and raw PFN mappings are allowed.  The mmap semaphore
6522  * should be taken for read, and the mmap semaphore cannot be released
6523  * before the end() is invoked.
6524  *
6525  * This function must not be used to modify PTE content.
6526  *
6527  * Return: zero on success, negative otherwise.
6528  */
follow_pfnmap_start(struct follow_pfnmap_args * args)6529 int follow_pfnmap_start(struct follow_pfnmap_args *args)
6530 {
6531 	struct vm_area_struct *vma = args->vma;
6532 	unsigned long address = args->address;
6533 	struct mm_struct *mm = vma->vm_mm;
6534 	spinlock_t *lock;
6535 	pgd_t *pgdp;
6536 	p4d_t *p4dp, p4d;
6537 	pud_t *pudp, pud;
6538 	pmd_t *pmdp, pmd;
6539 	pte_t *ptep, pte;
6540 
6541 	pfnmap_lockdep_assert(vma);
6542 
6543 	if (unlikely(address < vma->vm_start || address >= vma->vm_end))
6544 		goto out;
6545 
6546 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
6547 		goto out;
6548 retry:
6549 	pgdp = pgd_offset(mm, address);
6550 	if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
6551 		goto out;
6552 
6553 	p4dp = p4d_offset(pgdp, address);
6554 	p4d = READ_ONCE(*p4dp);
6555 	if (p4d_none(p4d) || unlikely(p4d_bad(p4d)))
6556 		goto out;
6557 
6558 	pudp = pud_offset(p4dp, address);
6559 	pud = READ_ONCE(*pudp);
6560 	if (pud_none(pud))
6561 		goto out;
6562 	if (pud_leaf(pud)) {
6563 		lock = pud_lock(mm, pudp);
6564 		if (!unlikely(pud_leaf(pud))) {
6565 			spin_unlock(lock);
6566 			goto retry;
6567 		}
6568 		pfnmap_args_setup(args, lock, NULL, pud_pgprot(pud),
6569 				  pud_pfn(pud), PUD_MASK, pud_write(pud),
6570 				  pud_special(pud));
6571 		return 0;
6572 	}
6573 
6574 	pmdp = pmd_offset(pudp, address);
6575 	pmd = pmdp_get_lockless(pmdp);
6576 	if (pmd_leaf(pmd)) {
6577 		lock = pmd_lock(mm, pmdp);
6578 		if (!unlikely(pmd_leaf(pmd))) {
6579 			spin_unlock(lock);
6580 			goto retry;
6581 		}
6582 		pfnmap_args_setup(args, lock, NULL, pmd_pgprot(pmd),
6583 				  pmd_pfn(pmd), PMD_MASK, pmd_write(pmd),
6584 				  pmd_special(pmd));
6585 		return 0;
6586 	}
6587 
6588 	ptep = pte_offset_map_lock(mm, pmdp, address, &lock);
6589 	if (!ptep)
6590 		goto out;
6591 	pte = ptep_get(ptep);
6592 	if (!pte_present(pte))
6593 		goto unlock;
6594 	pfnmap_args_setup(args, lock, ptep, pte_pgprot(pte),
6595 			  pte_pfn(pte), PAGE_MASK, pte_write(pte),
6596 			  pte_special(pte));
6597 	return 0;
6598 unlock:
6599 	pte_unmap_unlock(ptep, lock);
6600 out:
6601 	return -EINVAL;
6602 }
6603 EXPORT_SYMBOL_GPL(follow_pfnmap_start);
6604 
6605 /**
6606  * follow_pfnmap_end(): End a follow_pfnmap_start() process
6607  * @args: Pointer to struct @follow_pfnmap_args
6608  *
6609  * Must be used in pair of follow_pfnmap_start().  See the start() function
6610  * above for more information.
6611  */
follow_pfnmap_end(struct follow_pfnmap_args * args)6612 void follow_pfnmap_end(struct follow_pfnmap_args *args)
6613 {
6614 	if (args->lock)
6615 		spin_unlock(args->lock);
6616 	if (args->ptep)
6617 		pte_unmap(args->ptep);
6618 }
6619 EXPORT_SYMBOL_GPL(follow_pfnmap_end);
6620 
6621 #ifdef CONFIG_HAVE_IOREMAP_PROT
6622 /**
6623  * generic_access_phys - generic implementation for iomem mmap access
6624  * @vma: the vma to access
6625  * @addr: userspace address, not relative offset within @vma
6626  * @buf: buffer to read/write
6627  * @len: length of transfer
6628  * @write: set to FOLL_WRITE when writing, otherwise reading
6629  *
6630  * This is a generic implementation for &vm_operations_struct.access for an
6631  * iomem mapping. This callback is used by access_process_vm() when the @vma is
6632  * not page based.
6633  */
generic_access_phys(struct vm_area_struct * vma,unsigned long addr,void * buf,int len,int write)6634 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
6635 			void *buf, int len, int write)
6636 {
6637 	resource_size_t phys_addr;
6638 	unsigned long prot = 0;
6639 	void __iomem *maddr;
6640 	int offset = offset_in_page(addr);
6641 	int ret = -EINVAL;
6642 	bool writable;
6643 	struct follow_pfnmap_args args = { .vma = vma, .address = addr };
6644 
6645 retry:
6646 	if (follow_pfnmap_start(&args))
6647 		return -EINVAL;
6648 	prot = pgprot_val(args.pgprot);
6649 	phys_addr = (resource_size_t)args.pfn << PAGE_SHIFT;
6650 	writable = args.writable;
6651 	follow_pfnmap_end(&args);
6652 
6653 	if ((write & FOLL_WRITE) && !writable)
6654 		return -EINVAL;
6655 
6656 	maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
6657 	if (!maddr)
6658 		return -ENOMEM;
6659 
6660 	if (follow_pfnmap_start(&args))
6661 		goto out_unmap;
6662 
6663 	if ((prot != pgprot_val(args.pgprot)) ||
6664 	    (phys_addr != (args.pfn << PAGE_SHIFT)) ||
6665 	    (writable != args.writable)) {
6666 		follow_pfnmap_end(&args);
6667 		iounmap(maddr);
6668 		goto retry;
6669 	}
6670 
6671 	if (write)
6672 		memcpy_toio(maddr + offset, buf, len);
6673 	else
6674 		memcpy_fromio(buf, maddr + offset, len);
6675 	ret = len;
6676 	follow_pfnmap_end(&args);
6677 out_unmap:
6678 	iounmap(maddr);
6679 
6680 	return ret;
6681 }
6682 EXPORT_SYMBOL_GPL(generic_access_phys);
6683 #endif
6684 
6685 /*
6686  * Access another process' address space as given in mm.
6687  */
__access_remote_vm(struct mm_struct * mm,unsigned long addr,void * buf,int len,unsigned int gup_flags)6688 static int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
6689 			      void *buf, int len, unsigned int gup_flags)
6690 {
6691 	void *old_buf = buf;
6692 	int write = gup_flags & FOLL_WRITE;
6693 
6694 	if (mmap_read_lock_killable(mm))
6695 		return 0;
6696 
6697 	/* Untag the address before looking up the VMA */
6698 	addr = untagged_addr_remote(mm, addr);
6699 
6700 	/* Avoid triggering the temporary warning in __get_user_pages */
6701 	if (!vma_lookup(mm, addr) && !expand_stack(mm, addr))
6702 		return 0;
6703 
6704 	/* ignore errors, just check how much was successfully transferred */
6705 	while (len) {
6706 		int bytes, offset;
6707 		void *maddr;
6708 		struct vm_area_struct *vma = NULL;
6709 		struct page *page = get_user_page_vma_remote(mm, addr,
6710 							     gup_flags, &vma);
6711 
6712 		if (IS_ERR(page)) {
6713 			/* We might need to expand the stack to access it */
6714 			vma = vma_lookup(mm, addr);
6715 			if (!vma) {
6716 				vma = expand_stack(mm, addr);
6717 
6718 				/* mmap_lock was dropped on failure */
6719 				if (!vma)
6720 					return buf - old_buf;
6721 
6722 				/* Try again if stack expansion worked */
6723 				continue;
6724 			}
6725 
6726 			/*
6727 			 * Check if this is a VM_IO | VM_PFNMAP VMA, which
6728 			 * we can access using slightly different code.
6729 			 */
6730 			bytes = 0;
6731 #ifdef CONFIG_HAVE_IOREMAP_PROT
6732 			if (vma->vm_ops && vma->vm_ops->access)
6733 				bytes = vma->vm_ops->access(vma, addr, buf,
6734 							    len, write);
6735 #endif
6736 			if (bytes <= 0)
6737 				break;
6738 		} else {
6739 			bytes = len;
6740 			offset = addr & (PAGE_SIZE-1);
6741 			if (bytes > PAGE_SIZE-offset)
6742 				bytes = PAGE_SIZE-offset;
6743 
6744 			maddr = kmap_local_page(page);
6745 			if (write) {
6746 				copy_to_user_page(vma, page, addr,
6747 						  maddr + offset, buf, bytes);
6748 				set_page_dirty_lock(page);
6749 			} else {
6750 				copy_from_user_page(vma, page, addr,
6751 						    buf, maddr + offset, bytes);
6752 			}
6753 			unmap_and_put_page(page, maddr);
6754 		}
6755 		len -= bytes;
6756 		buf += bytes;
6757 		addr += bytes;
6758 	}
6759 	mmap_read_unlock(mm);
6760 
6761 	return buf - old_buf;
6762 }
6763 
6764 /**
6765  * access_remote_vm - access another process' address space
6766  * @mm:		the mm_struct of the target address space
6767  * @addr:	start address to access
6768  * @buf:	source or destination buffer
6769  * @len:	number of bytes to transfer
6770  * @gup_flags:	flags modifying lookup behaviour
6771  *
6772  * The caller must hold a reference on @mm.
6773  *
6774  * Return: number of bytes copied from source to destination.
6775  */
access_remote_vm(struct mm_struct * mm,unsigned long addr,void * buf,int len,unsigned int gup_flags)6776 int access_remote_vm(struct mm_struct *mm, unsigned long addr,
6777 		void *buf, int len, unsigned int gup_flags)
6778 {
6779 	return __access_remote_vm(mm, addr, buf, len, gup_flags);
6780 }
6781 
6782 /*
6783  * Access another process' address space.
6784  * Source/target buffer must be kernel space,
6785  * Do not walk the page table directly, use get_user_pages
6786  */
access_process_vm(struct task_struct * tsk,unsigned long addr,void * buf,int len,unsigned int gup_flags)6787 int access_process_vm(struct task_struct *tsk, unsigned long addr,
6788 		void *buf, int len, unsigned int gup_flags)
6789 {
6790 	struct mm_struct *mm;
6791 	int ret;
6792 
6793 	mm = get_task_mm(tsk);
6794 	if (!mm)
6795 		return 0;
6796 
6797 	ret = __access_remote_vm(mm, addr, buf, len, gup_flags);
6798 
6799 	mmput(mm);
6800 
6801 	return ret;
6802 }
6803 EXPORT_SYMBOL_GPL(access_process_vm);
6804 
6805 /*
6806  * Print the name of a VMA.
6807  */
print_vma_addr(char * prefix,unsigned long ip)6808 void print_vma_addr(char *prefix, unsigned long ip)
6809 {
6810 	struct mm_struct *mm = current->mm;
6811 	struct vm_area_struct *vma;
6812 
6813 	/*
6814 	 * we might be running from an atomic context so we cannot sleep
6815 	 */
6816 	if (!mmap_read_trylock(mm))
6817 		return;
6818 
6819 	vma = vma_lookup(mm, ip);
6820 	if (vma && vma->vm_file) {
6821 		struct file *f = vma->vm_file;
6822 		ip -= vma->vm_start;
6823 		ip += vma->vm_pgoff << PAGE_SHIFT;
6824 		printk("%s%pD[%lx,%lx+%lx]", prefix, f, ip,
6825 				vma->vm_start,
6826 				vma->vm_end - vma->vm_start);
6827 	}
6828 	mmap_read_unlock(mm);
6829 }
6830 
6831 #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
__might_fault(const char * file,int line)6832 void __might_fault(const char *file, int line)
6833 {
6834 	if (pagefault_disabled())
6835 		return;
6836 	__might_sleep(file, line);
6837 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
6838 	if (current->mm)
6839 		might_lock_read(&current->mm->mmap_lock);
6840 #endif
6841 }
6842 EXPORT_SYMBOL(__might_fault);
6843 #endif
6844 
6845 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
6846 /*
6847  * Process all subpages of the specified huge page with the specified
6848  * operation.  The target subpage will be processed last to keep its
6849  * cache lines hot.
6850  */
process_huge_page(unsigned long addr_hint,unsigned int nr_pages,int (* process_subpage)(unsigned long addr,int idx,void * arg),void * arg)6851 static inline int process_huge_page(
6852 	unsigned long addr_hint, unsigned int nr_pages,
6853 	int (*process_subpage)(unsigned long addr, int idx, void *arg),
6854 	void *arg)
6855 {
6856 	int i, n, base, l, ret;
6857 	unsigned long addr = addr_hint &
6858 		~(((unsigned long)nr_pages << PAGE_SHIFT) - 1);
6859 
6860 	/* Process target subpage last to keep its cache lines hot */
6861 	might_sleep();
6862 	n = (addr_hint - addr) / PAGE_SIZE;
6863 	if (2 * n <= nr_pages) {
6864 		/* If target subpage in first half of huge page */
6865 		base = 0;
6866 		l = n;
6867 		/* Process subpages at the end of huge page */
6868 		for (i = nr_pages - 1; i >= 2 * n; i--) {
6869 			cond_resched();
6870 			ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
6871 			if (ret)
6872 				return ret;
6873 		}
6874 	} else {
6875 		/* If target subpage in second half of huge page */
6876 		base = nr_pages - 2 * (nr_pages - n);
6877 		l = nr_pages - n;
6878 		/* Process subpages at the begin of huge page */
6879 		for (i = 0; i < base; i++) {
6880 			cond_resched();
6881 			ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
6882 			if (ret)
6883 				return ret;
6884 		}
6885 	}
6886 	/*
6887 	 * Process remaining subpages in left-right-left-right pattern
6888 	 * towards the target subpage
6889 	 */
6890 	for (i = 0; i < l; i++) {
6891 		int left_idx = base + i;
6892 		int right_idx = base + 2 * l - 1 - i;
6893 
6894 		cond_resched();
6895 		ret = process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
6896 		if (ret)
6897 			return ret;
6898 		cond_resched();
6899 		ret = process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
6900 		if (ret)
6901 			return ret;
6902 	}
6903 	return 0;
6904 }
6905 
clear_gigantic_page(struct folio * folio,unsigned long addr_hint,unsigned int nr_pages)6906 static void clear_gigantic_page(struct folio *folio, unsigned long addr_hint,
6907 				unsigned int nr_pages)
6908 {
6909 	unsigned long addr = ALIGN_DOWN(addr_hint, folio_size(folio));
6910 	int i;
6911 
6912 	might_sleep();
6913 	for (i = 0; i < nr_pages; i++) {
6914 		cond_resched();
6915 		clear_user_highpage(folio_page(folio, i), addr + i * PAGE_SIZE);
6916 	}
6917 }
6918 
clear_subpage(unsigned long addr,int idx,void * arg)6919 static int clear_subpage(unsigned long addr, int idx, void *arg)
6920 {
6921 	struct folio *folio = arg;
6922 
6923 	clear_user_highpage(folio_page(folio, idx), addr);
6924 	return 0;
6925 }
6926 
6927 /**
6928  * folio_zero_user - Zero a folio which will be mapped to userspace.
6929  * @folio: The folio to zero.
6930  * @addr_hint: The address will be accessed or the base address if uncelar.
6931  */
folio_zero_user(struct folio * folio,unsigned long addr_hint)6932 void folio_zero_user(struct folio *folio, unsigned long addr_hint)
6933 {
6934 	unsigned int nr_pages = folio_nr_pages(folio);
6935 
6936 	if (unlikely(nr_pages > MAX_ORDER_NR_PAGES))
6937 		clear_gigantic_page(folio, addr_hint, nr_pages);
6938 	else
6939 		process_huge_page(addr_hint, nr_pages, clear_subpage, folio);
6940 }
6941 
copy_user_gigantic_page(struct folio * dst,struct folio * src,unsigned long addr_hint,struct vm_area_struct * vma,unsigned int nr_pages)6942 static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
6943 				   unsigned long addr_hint,
6944 				   struct vm_area_struct *vma,
6945 				   unsigned int nr_pages)
6946 {
6947 	unsigned long addr = ALIGN_DOWN(addr_hint, folio_size(dst));
6948 	struct page *dst_page;
6949 	struct page *src_page;
6950 	int i;
6951 
6952 	for (i = 0; i < nr_pages; i++) {
6953 		dst_page = folio_page(dst, i);
6954 		src_page = folio_page(src, i);
6955 
6956 		cond_resched();
6957 		if (copy_mc_user_highpage(dst_page, src_page,
6958 					  addr + i*PAGE_SIZE, vma))
6959 			return -EHWPOISON;
6960 	}
6961 	return 0;
6962 }
6963 
6964 struct copy_subpage_arg {
6965 	struct folio *dst;
6966 	struct folio *src;
6967 	struct vm_area_struct *vma;
6968 };
6969 
copy_subpage(unsigned long addr,int idx,void * arg)6970 static int copy_subpage(unsigned long addr, int idx, void *arg)
6971 {
6972 	struct copy_subpage_arg *copy_arg = arg;
6973 	struct page *dst = folio_page(copy_arg->dst, idx);
6974 	struct page *src = folio_page(copy_arg->src, idx);
6975 
6976 	if (copy_mc_user_highpage(dst, src, addr, copy_arg->vma))
6977 		return -EHWPOISON;
6978 	return 0;
6979 }
6980 
copy_user_large_folio(struct folio * dst,struct folio * src,unsigned long addr_hint,struct vm_area_struct * vma)6981 int copy_user_large_folio(struct folio *dst, struct folio *src,
6982 			  unsigned long addr_hint, struct vm_area_struct *vma)
6983 {
6984 	unsigned int nr_pages = folio_nr_pages(dst);
6985 	struct copy_subpage_arg arg = {
6986 		.dst = dst,
6987 		.src = src,
6988 		.vma = vma,
6989 	};
6990 
6991 	if (unlikely(nr_pages > MAX_ORDER_NR_PAGES))
6992 		return copy_user_gigantic_page(dst, src, addr_hint, vma, nr_pages);
6993 
6994 	return process_huge_page(addr_hint, nr_pages, copy_subpage, &arg);
6995 }
6996 
copy_folio_from_user(struct folio * dst_folio,const void __user * usr_src,bool allow_pagefault)6997 long copy_folio_from_user(struct folio *dst_folio,
6998 			   const void __user *usr_src,
6999 			   bool allow_pagefault)
7000 {
7001 	void *kaddr;
7002 	unsigned long i, rc = 0;
7003 	unsigned int nr_pages = folio_nr_pages(dst_folio);
7004 	unsigned long ret_val = nr_pages * PAGE_SIZE;
7005 	struct page *subpage;
7006 
7007 	for (i = 0; i < nr_pages; i++) {
7008 		subpage = folio_page(dst_folio, i);
7009 		kaddr = kmap_local_page(subpage);
7010 		if (!allow_pagefault)
7011 			pagefault_disable();
7012 		rc = copy_from_user(kaddr, usr_src + i * PAGE_SIZE, PAGE_SIZE);
7013 		if (!allow_pagefault)
7014 			pagefault_enable();
7015 		kunmap_local(kaddr);
7016 
7017 		ret_val -= (PAGE_SIZE - rc);
7018 		if (rc)
7019 			break;
7020 
7021 		flush_dcache_page(subpage);
7022 
7023 		cond_resched();
7024 	}
7025 	return ret_val;
7026 }
7027 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
7028 
7029 #if defined(CONFIG_SPLIT_PTE_PTLOCKS) && ALLOC_SPLIT_PTLOCKS
7030 
7031 static struct kmem_cache *page_ptl_cachep;
7032 
ptlock_cache_init(void)7033 void __init ptlock_cache_init(void)
7034 {
7035 	page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
7036 			SLAB_PANIC, NULL);
7037 }
7038 
ptlock_alloc(struct ptdesc * ptdesc)7039 bool ptlock_alloc(struct ptdesc *ptdesc)
7040 {
7041 	spinlock_t *ptl;
7042 
7043 	ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
7044 	if (!ptl)
7045 		return false;
7046 	ptdesc->ptl = ptl;
7047 	return true;
7048 }
7049 
ptlock_free(struct ptdesc * ptdesc)7050 void ptlock_free(struct ptdesc *ptdesc)
7051 {
7052 	if (ptdesc->ptl)
7053 		kmem_cache_free(page_ptl_cachep, ptdesc->ptl);
7054 }
7055 #endif
7056 
vma_pgtable_walk_begin(struct vm_area_struct * vma)7057 void vma_pgtable_walk_begin(struct vm_area_struct *vma)
7058 {
7059 	if (is_vm_hugetlb_page(vma))
7060 		hugetlb_vma_lock_read(vma);
7061 }
7062 
vma_pgtable_walk_end(struct vm_area_struct * vma)7063 void vma_pgtable_walk_end(struct vm_area_struct *vma)
7064 {
7065 	if (is_vm_hugetlb_page(vma))
7066 		hugetlb_vma_unlock_read(vma);
7067 }
7068