xref: /linux/mm/memory.c (revision 6bc0987d0b508b3768808efafa1e90041713526b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/memory.c
4  *
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  */
7 
8 /*
9  * demand-loading started 01.12.91 - seems it is high on the list of
10  * things wanted, and it should be easy to implement. - Linus
11  */
12 
13 /*
14  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
15  * pages started 02.12.91, seems to work. - Linus.
16  *
17  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
18  * would have taken more than the 6M I have free, but it worked well as
19  * far as I could see.
20  *
21  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
22  */
23 
24 /*
25  * Real VM (paging to/from disk) started 18.12.91. Much more work and
26  * thought has to go into this. Oh, well..
27  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
28  *		Found it. Everything seems to work now.
29  * 20.12.91  -  Ok, making the swap-device changeable like the root.
30  */
31 
32 /*
33  * 05.04.94  -  Multi-page memory management added for v1.1.
34  *              Idea by Alex Bligh (alex@cconcepts.co.uk)
35  *
36  * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
37  *		(Gerhard.Wichert@pdb.siemens.de)
38  *
39  * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
40  */
41 
42 #include <linux/kernel_stat.h>
43 #include <linux/mm.h>
44 #include <linux/mm_inline.h>
45 #include <linux/sched/mm.h>
46 #include <linux/sched/numa_balancing.h>
47 #include <linux/sched/task.h>
48 #include <linux/hugetlb.h>
49 #include <linux/mman.h>
50 #include <linux/swap.h>
51 #include <linux/highmem.h>
52 #include <linux/pagemap.h>
53 #include <linux/memremap.h>
54 #include <linux/kmsan.h>
55 #include <linux/ksm.h>
56 #include <linux/rmap.h>
57 #include <linux/export.h>
58 #include <linux/delayacct.h>
59 #include <linux/init.h>
60 #include <linux/writeback.h>
61 #include <linux/memcontrol.h>
62 #include <linux/mmu_notifier.h>
63 #include <linux/leafops.h>
64 #include <linux/elf.h>
65 #include <linux/gfp.h>
66 #include <linux/migrate.h>
67 #include <linux/string.h>
68 #include <linux/shmem_fs.h>
69 #include <linux/memory-tiers.h>
70 #include <linux/debugfs.h>
71 #include <linux/userfaultfd_k.h>
72 #include <linux/dax.h>
73 #include <linux/oom.h>
74 #include <linux/numa.h>
75 #include <linux/perf_event.h>
76 #include <linux/ptrace.h>
77 #include <linux/vmalloc.h>
78 #include <linux/sched/sysctl.h>
79 #include <linux/pgalloc.h>
80 #include <linux/uaccess.h>
81 
82 #include <trace/events/kmem.h>
83 
84 #include <asm/io.h>
85 #include <asm/mmu_context.h>
86 #include <asm/tlb.h>
87 #include <asm/tlbflush.h>
88 
89 #include "pgalloc-track.h"
90 #include "internal.h"
91 #include "swap.h"
92 
93 #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
94 #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
95 #endif
96 
97 static vm_fault_t do_fault(struct vm_fault *vmf);
98 static vm_fault_t do_anonymous_page(struct vm_fault *vmf);
99 static bool vmf_pte_changed(struct vm_fault *vmf);
100 
101 /*
102  * Return true if the original pte was a uffd-wp pte marker (so the pte was
103  * wr-protected).
104  */
105 static __always_inline bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf)
106 {
107 	if (!userfaultfd_wp(vmf->vma))
108 		return false;
109 	if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID))
110 		return false;
111 
112 	return pte_is_uffd_wp_marker(vmf->orig_pte);
113 }
114 
115 /*
116  * Randomize the address space (stacks, mmaps, brk, etc.).
117  *
118  * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
119  *   as ancient (libc5 based) binaries can segfault. )
120  */
121 int randomize_va_space __read_mostly =
122 #ifdef CONFIG_COMPAT_BRK
123 					1;
124 #else
125 					2;
126 #endif
127 
128 static const struct ctl_table mmu_sysctl_table[] = {
129 	{
130 		.procname	= "randomize_va_space",
131 		.data		= &randomize_va_space,
132 		.maxlen		= sizeof(int),
133 		.mode		= 0644,
134 		.proc_handler	= proc_dointvec,
135 	},
136 };
137 
138 static int __init init_mm_sysctl(void)
139 {
140 	register_sysctl_init("kernel", mmu_sysctl_table);
141 	return 0;
142 }
143 
144 subsys_initcall(init_mm_sysctl);
145 
146 #ifndef arch_wants_old_prefaulted_pte
147 static inline bool arch_wants_old_prefaulted_pte(void)
148 {
149 	/*
150 	 * Transitioning a PTE from 'old' to 'young' can be expensive on
151 	 * some architectures, even if it's performed in hardware. By
152 	 * default, "false" means prefaulted entries will be 'young'.
153 	 */
154 	return false;
155 }
156 #endif
157 
158 static int __init disable_randmaps(char *s)
159 {
160 	randomize_va_space = 0;
161 	return 1;
162 }
163 __setup("norandmaps", disable_randmaps);
164 
165 unsigned long highest_memmap_pfn __read_mostly;
166 
167 void mm_trace_rss_stat(struct mm_struct *mm, int member)
168 {
169 	trace_rss_stat(mm, member);
170 }
171 
172 /*
173  * Note: this doesn't free the actual pages themselves. That
174  * has been handled earlier when unmapping all the memory regions.
175  */
176 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
177 			   unsigned long addr)
178 {
179 	pgtable_t token = pmd_pgtable(*pmd);
180 	pmd_clear(pmd);
181 	pte_free_tlb(tlb, token, addr);
182 	mm_dec_nr_ptes(tlb->mm);
183 }
184 
185 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
186 				unsigned long addr, unsigned long end,
187 				unsigned long floor, unsigned long ceiling)
188 {
189 	pmd_t *pmd;
190 	unsigned long next;
191 	unsigned long start;
192 
193 	start = addr;
194 	pmd = pmd_offset(pud, addr);
195 	do {
196 		next = pmd_addr_end(addr, end);
197 		if (pmd_none_or_clear_bad(pmd))
198 			continue;
199 		free_pte_range(tlb, pmd, addr);
200 	} while (pmd++, addr = next, addr != end);
201 
202 	start &= PUD_MASK;
203 	if (start < floor)
204 		return;
205 	if (ceiling) {
206 		ceiling &= PUD_MASK;
207 		if (!ceiling)
208 			return;
209 	}
210 	if (end - 1 > ceiling - 1)
211 		return;
212 
213 	pmd = pmd_offset(pud, start);
214 	pud_clear(pud);
215 	pmd_free_tlb(tlb, pmd, start);
216 	mm_dec_nr_pmds(tlb->mm);
217 }
218 
219 static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
220 				unsigned long addr, unsigned long end,
221 				unsigned long floor, unsigned long ceiling)
222 {
223 	pud_t *pud;
224 	unsigned long next;
225 	unsigned long start;
226 
227 	start = addr;
228 	pud = pud_offset(p4d, addr);
229 	do {
230 		next = pud_addr_end(addr, end);
231 		if (pud_none_or_clear_bad(pud))
232 			continue;
233 		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
234 	} while (pud++, addr = next, addr != end);
235 
236 	start &= P4D_MASK;
237 	if (start < floor)
238 		return;
239 	if (ceiling) {
240 		ceiling &= P4D_MASK;
241 		if (!ceiling)
242 			return;
243 	}
244 	if (end - 1 > ceiling - 1)
245 		return;
246 
247 	pud = pud_offset(p4d, start);
248 	p4d_clear(p4d);
249 	pud_free_tlb(tlb, pud, start);
250 	mm_dec_nr_puds(tlb->mm);
251 }
252 
253 static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
254 				unsigned long addr, unsigned long end,
255 				unsigned long floor, unsigned long ceiling)
256 {
257 	p4d_t *p4d;
258 	unsigned long next;
259 	unsigned long start;
260 
261 	start = addr;
262 	p4d = p4d_offset(pgd, addr);
263 	do {
264 		next = p4d_addr_end(addr, end);
265 		if (p4d_none_or_clear_bad(p4d))
266 			continue;
267 		free_pud_range(tlb, p4d, addr, next, floor, ceiling);
268 	} while (p4d++, addr = next, addr != end);
269 
270 	start &= PGDIR_MASK;
271 	if (start < floor)
272 		return;
273 	if (ceiling) {
274 		ceiling &= PGDIR_MASK;
275 		if (!ceiling)
276 			return;
277 	}
278 	if (end - 1 > ceiling - 1)
279 		return;
280 
281 	p4d = p4d_offset(pgd, start);
282 	pgd_clear(pgd);
283 	p4d_free_tlb(tlb, p4d, start);
284 }
285 
286 /**
287  * free_pgd_range - Unmap and free page tables in the range
288  * @tlb: the mmu_gather containing pending TLB flush info
289  * @addr: virtual address start
290  * @end: virtual address end
291  * @floor: lowest address boundary
292  * @ceiling: highest address boundary
293  *
294  * This function tears down all user-level page tables in the
295  * specified virtual address range [@addr..@end). It is part of
296  * the memory unmap flow.
297  */
298 void free_pgd_range(struct mmu_gather *tlb,
299 			unsigned long addr, unsigned long end,
300 			unsigned long floor, unsigned long ceiling)
301 {
302 	pgd_t *pgd;
303 	unsigned long next;
304 
305 	/*
306 	 * The next few lines have given us lots of grief...
307 	 *
308 	 * Why are we testing PMD* at this top level?  Because often
309 	 * there will be no work to do at all, and we'd prefer not to
310 	 * go all the way down to the bottom just to discover that.
311 	 *
312 	 * Why all these "- 1"s?  Because 0 represents both the bottom
313 	 * of the address space and the top of it (using -1 for the
314 	 * top wouldn't help much: the masks would do the wrong thing).
315 	 * The rule is that addr 0 and floor 0 refer to the bottom of
316 	 * the address space, but end 0 and ceiling 0 refer to the top
317 	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
318 	 * that end 0 case should be mythical).
319 	 *
320 	 * Wherever addr is brought up or ceiling brought down, we must
321 	 * be careful to reject "the opposite 0" before it confuses the
322 	 * subsequent tests.  But what about where end is brought down
323 	 * by PMD_SIZE below? no, end can't go down to 0 there.
324 	 *
325 	 * Whereas we round start (addr) and ceiling down, by different
326 	 * masks at different levels, in order to test whether a table
327 	 * now has no other vmas using it, so can be freed, we don't
328 	 * bother to round floor or end up - the tests don't need that.
329 	 */
330 
331 	addr &= PMD_MASK;
332 	if (addr < floor) {
333 		addr += PMD_SIZE;
334 		if (!addr)
335 			return;
336 	}
337 	if (ceiling) {
338 		ceiling &= PMD_MASK;
339 		if (!ceiling)
340 			return;
341 	}
342 	if (end - 1 > ceiling - 1)
343 		end -= PMD_SIZE;
344 	if (addr > end - 1)
345 		return;
346 	/*
347 	 * We add page table cache pages with PAGE_SIZE,
348 	 * (see pte_free_tlb()), flush the tlb if we need
349 	 */
350 	tlb_change_page_size(tlb, PAGE_SIZE);
351 	pgd = pgd_offset(tlb->mm, addr);
352 	do {
353 		next = pgd_addr_end(addr, end);
354 		if (pgd_none_or_clear_bad(pgd))
355 			continue;
356 		free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
357 	} while (pgd++, addr = next, addr != end);
358 }
359 
360 /**
361  * free_pgtables() - Free a range of page tables
362  * @tlb: The mmu gather
363  * @unmap: The unmap_desc
364  *
365  * Note: pg_start and pg_end are provided to indicate the absolute range of the
366  * page tables that should be removed.  This can differ from the vma mappings on
367  * some archs that may have mappings that need to be removed outside the vmas.
368  * Note that the prev->vm_end and next->vm_start are often used.
369  *
370  * The vma_end differs from the pg_end when a dup_mmap() failed and the tree has
371  * unrelated data to the mm_struct being torn down.
372  */
373 void free_pgtables(struct mmu_gather *tlb, struct unmap_desc *unmap)
374 {
375 	struct unlink_vma_file_batch vb;
376 	struct ma_state *mas = unmap->mas;
377 	struct vm_area_struct *vma = unmap->first;
378 
379 	/*
380 	 * Note: USER_PGTABLES_CEILING may be passed as the value of pg_end and
381 	 * may be 0.  Underflow is expected in this case.  Otherwise the
382 	 * pagetable end is exclusive.  vma_end is exclusive.  The last vma
383 	 * address should never be larger than the pagetable end.
384 	 */
385 	WARN_ON_ONCE(unmap->vma_end - 1 > unmap->pg_end - 1);
386 
387 	tlb_free_vmas(tlb);
388 
389 	do {
390 		unsigned long addr = vma->vm_start;
391 		struct vm_area_struct *next;
392 
393 		next = mas_find(mas, unmap->tree_end - 1);
394 
395 		/*
396 		 * Hide vma from rmap and truncate_pagecache before freeing
397 		 * pgtables
398 		 */
399 		if (unmap->mm_wr_locked)
400 			vma_start_write(vma);
401 		unlink_anon_vmas(vma);
402 
403 		unlink_file_vma_batch_init(&vb);
404 		unlink_file_vma_batch_add(&vb, vma);
405 
406 		/*
407 		 * Optimization: gather nearby vmas into one call down
408 		 */
409 		while (next && next->vm_start <= vma->vm_end + PMD_SIZE) {
410 			vma = next;
411 			next = mas_find(mas, unmap->tree_end - 1);
412 			if (unmap->mm_wr_locked)
413 				vma_start_write(vma);
414 			unlink_anon_vmas(vma);
415 			unlink_file_vma_batch_add(&vb, vma);
416 		}
417 		unlink_file_vma_batch_final(&vb);
418 
419 		free_pgd_range(tlb, addr, vma->vm_end, unmap->pg_start,
420 			       next ? next->vm_start : unmap->pg_end);
421 		vma = next;
422 	} while (vma);
423 }
424 
425 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
426 {
427 	spinlock_t *ptl = pmd_lock(mm, pmd);
428 
429 	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
430 		mm_inc_nr_ptes(mm);
431 		/*
432 		 * Ensure all pte setup (eg. pte page lock and page clearing) are
433 		 * visible before the pte is made visible to other CPUs by being
434 		 * put into page tables.
435 		 *
436 		 * The other side of the story is the pointer chasing in the page
437 		 * table walking code (when walking the page table without locking;
438 		 * ie. most of the time). Fortunately, these data accesses consist
439 		 * of a chain of data-dependent loads, meaning most CPUs (alpha
440 		 * being the notable exception) will already guarantee loads are
441 		 * seen in-order. See the alpha page table accessors for the
442 		 * smp_rmb() barriers in page table walking code.
443 		 */
444 		smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
445 		pmd_populate(mm, pmd, *pte);
446 		*pte = NULL;
447 	}
448 	spin_unlock(ptl);
449 }
450 
451 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
452 {
453 	pgtable_t new = pte_alloc_one(mm);
454 	if (!new)
455 		return -ENOMEM;
456 
457 	pmd_install(mm, pmd, &new);
458 	if (new)
459 		pte_free(mm, new);
460 	return 0;
461 }
462 
463 int __pte_alloc_kernel(pmd_t *pmd)
464 {
465 	pte_t *new = pte_alloc_one_kernel(&init_mm);
466 	if (!new)
467 		return -ENOMEM;
468 
469 	spin_lock(&init_mm.page_table_lock);
470 	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
471 		smp_wmb(); /* See comment in pmd_install() */
472 		pmd_populate_kernel(&init_mm, pmd, new);
473 		new = NULL;
474 	}
475 	spin_unlock(&init_mm.page_table_lock);
476 	if (new)
477 		pte_free_kernel(&init_mm, new);
478 	return 0;
479 }
480 
481 static inline void init_rss_vec(int *rss)
482 {
483 	memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
484 }
485 
486 static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
487 {
488 	int i;
489 
490 	for (i = 0; i < NR_MM_COUNTERS; i++)
491 		if (rss[i])
492 			add_mm_counter(mm, i, rss[i]);
493 }
494 
495 static bool is_bad_page_map_ratelimited(void)
496 {
497 	static unsigned long resume;
498 	static unsigned long nr_shown;
499 	static unsigned long nr_unshown;
500 
501 	/*
502 	 * Allow a burst of 60 reports, then keep quiet for that minute;
503 	 * or allow a steady drip of one report per second.
504 	 */
505 	if (nr_shown == 60) {
506 		if (time_before(jiffies, resume)) {
507 			nr_unshown++;
508 			return true;
509 		}
510 		if (nr_unshown) {
511 			pr_alert("BUG: Bad page map: %lu messages suppressed\n",
512 				 nr_unshown);
513 			nr_unshown = 0;
514 		}
515 		nr_shown = 0;
516 	}
517 	if (nr_shown++ == 0)
518 		resume = jiffies + 60 * HZ;
519 	return false;
520 }
521 
522 static void __print_bad_page_map_pgtable(struct mm_struct *mm, unsigned long addr)
523 {
524 	unsigned long long pgdv, p4dv, pudv, pmdv;
525 	p4d_t p4d, *p4dp;
526 	pud_t pud, *pudp;
527 	pmd_t pmd, *pmdp;
528 	pgd_t *pgdp;
529 
530 	/*
531 	 * Although this looks like a fully lockless pgtable walk, it is not:
532 	 * see locking requirements for print_bad_page_map().
533 	 */
534 	pgdp = pgd_offset(mm, addr);
535 	pgdv = pgd_val(*pgdp);
536 
537 	if (!pgd_present(*pgdp) || pgd_leaf(*pgdp)) {
538 		pr_alert("pgd:%08llx\n", pgdv);
539 		return;
540 	}
541 
542 	p4dp = p4d_offset(pgdp, addr);
543 	p4d = p4dp_get(p4dp);
544 	p4dv = p4d_val(p4d);
545 
546 	if (!p4d_present(p4d) || p4d_leaf(p4d)) {
547 		pr_alert("pgd:%08llx p4d:%08llx\n", pgdv, p4dv);
548 		return;
549 	}
550 
551 	pudp = pud_offset(p4dp, addr);
552 	pud = pudp_get(pudp);
553 	pudv = pud_val(pud);
554 
555 	if (!pud_present(pud) || pud_leaf(pud)) {
556 		pr_alert("pgd:%08llx p4d:%08llx pud:%08llx\n", pgdv, p4dv, pudv);
557 		return;
558 	}
559 
560 	pmdp = pmd_offset(pudp, addr);
561 	pmd = pmdp_get(pmdp);
562 	pmdv = pmd_val(pmd);
563 
564 	/*
565 	 * Dumping the PTE would be nice, but it's tricky with CONFIG_HIGHPTE,
566 	 * because the table should already be mapped by the caller and
567 	 * doing another map would be bad. print_bad_page_map() should
568 	 * already take care of printing the PTE.
569 	 */
570 	pr_alert("pgd:%08llx p4d:%08llx pud:%08llx pmd:%08llx\n", pgdv,
571 		 p4dv, pudv, pmdv);
572 }
573 
574 /*
575  * This function is called to print an error when a bad page table entry (e.g.,
576  * corrupted page table entry) is found. For example, we might have a
577  * PFN-mapped pte in a region that doesn't allow it.
578  *
579  * The calling function must still handle the error.
580  *
581  * This function must be called during a proper page table walk, as it will
582  * re-walk the page table to dump information: the caller MUST prevent page
583  * table teardown (by holding mmap, vma or rmap lock) and MUST hold the leaf
584  * page table lock.
585  */
586 static void print_bad_page_map(struct vm_area_struct *vma,
587 		unsigned long addr, unsigned long long entry, struct page *page,
588 		enum pgtable_level level)
589 {
590 	struct address_space *mapping;
591 	pgoff_t index;
592 
593 	if (is_bad_page_map_ratelimited())
594 		return;
595 
596 	mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
597 	index = linear_page_index(vma, addr);
598 
599 	pr_alert("BUG: Bad page map in process %s  %s:%08llx", current->comm,
600 		 pgtable_level_to_str(level), entry);
601 	__print_bad_page_map_pgtable(vma->vm_mm, addr);
602 	if (page)
603 		dump_page(page, "bad page map");
604 	pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
605 		 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
606 	pr_alert("file:%pD fault:%ps mmap:%ps mmap_prepare: %ps read_folio:%ps\n",
607 		 vma->vm_file,
608 		 vma->vm_ops ? vma->vm_ops->fault : NULL,
609 		 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
610 		 vma->vm_file ? vma->vm_file->f_op->mmap_prepare : NULL,
611 		 mapping ? mapping->a_ops->read_folio : NULL);
612 	dump_stack();
613 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
614 }
615 #define print_bad_pte(vma, addr, pte, page) \
616 	print_bad_page_map(vma, addr, pte_val(pte), page, PGTABLE_LEVEL_PTE)
617 
618 /**
619  * __vm_normal_page() - Get the "struct page" associated with a page table entry.
620  * @vma: The VMA mapping the page table entry.
621  * @addr: The address where the page table entry is mapped.
622  * @pfn: The PFN stored in the page table entry.
623  * @special: Whether the page table entry is marked "special".
624  * @level: The page table level for error reporting purposes only.
625  * @entry: The page table entry value for error reporting purposes only.
626  *
627  * "Special" mappings do not wish to be associated with a "struct page" (either
628  * it doesn't exist, or it exists but they don't want to touch it). In this
629  * case, NULL is returned here. "Normal" mappings do have a struct page and
630  * are ordinarily refcounted.
631  *
632  * Page mappings of the shared zero folios are always considered "special", as
633  * they are not ordinarily refcounted: neither the refcount nor the mapcount
634  * of these folios is adjusted when mapping them into user page tables.
635  * Selected page table walkers (such as GUP) can still identify mappings of the
636  * shared zero folios and work with the underlying "struct page".
637  *
638  * There are 2 broad cases. Firstly, an architecture may define a "special"
639  * page table entry bit, such as pte_special(), in which case this function is
640  * trivial. Secondly, an architecture may not have a spare page table
641  * entry bit, which requires a more complicated scheme, described below.
642  *
643  * With CONFIG_FIND_NORMAL_PAGE, we might have the "special" bit set on
644  * page table entries that actually map "normal" pages: however, that page
645  * cannot be looked up through the PFN stored in the page table entry, but
646  * instead will be looked up through vm_ops->find_normal_page(). So far, this
647  * only applies to PTEs.
648  *
649  * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
650  * special mapping (even if there are underlying and valid "struct pages").
651  * COWed pages of a VM_PFNMAP are always normal.
652  *
653  * The way we recognize COWed pages within VM_PFNMAP mappings is through the
654  * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
655  * set, and the vm_pgoff will point to the first PFN mapped: thus every special
656  * mapping will always honor the rule
657  *
658  *	pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
659  *
660  * And for normal mappings this is false.
661  *
662  * This restricts such mappings to be a linear translation from virtual address
663  * to pfn. To get around this restriction, we allow arbitrary mappings so long
664  * as the vma is not a COW mapping; in that case, we know that all ptes are
665  * special (because none can have been COWed).
666  *
667  *
668  * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
669  *
670  * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
671  * page" backing, however the difference is that _all_ pages with a struct
672  * page (that is, those where pfn_valid is true, except the shared zero
673  * folios) are refcounted and considered normal pages by the VM.
674  *
675  * The disadvantage is that pages are refcounted (which can be slower and
676  * simply not an option for some PFNMAP users). The advantage is that we
677  * don't have to follow the strict linearity rule of PFNMAP mappings in
678  * order to support COWable mappings.
679  *
680  * Return: Returns the "struct page" if this is a "normal" mapping. Returns
681  *	   NULL if this is a "special" mapping.
682  */
683 static inline struct page *__vm_normal_page(struct vm_area_struct *vma,
684 		unsigned long addr, unsigned long pfn, bool special,
685 		unsigned long long entry, enum pgtable_level level)
686 {
687 	if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
688 		if (unlikely(special)) {
689 #ifdef CONFIG_FIND_NORMAL_PAGE
690 			if (vma->vm_ops && vma->vm_ops->find_normal_page)
691 				return vma->vm_ops->find_normal_page(vma, addr);
692 #endif /* CONFIG_FIND_NORMAL_PAGE */
693 			if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
694 				return NULL;
695 			if (is_zero_pfn(pfn) || is_huge_zero_pfn(pfn))
696 				return NULL;
697 
698 			print_bad_page_map(vma, addr, entry, NULL, level);
699 			return NULL;
700 		}
701 		/*
702 		 * With CONFIG_ARCH_HAS_PTE_SPECIAL, any special page table
703 		 * mappings (incl. shared zero folios) are marked accordingly.
704 		 */
705 	} else {
706 		if (unlikely(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))) {
707 			if (vma->vm_flags & VM_MIXEDMAP) {
708 				/* If it has a "struct page", it's "normal". */
709 				if (!pfn_valid(pfn))
710 					return NULL;
711 			} else {
712 				unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT;
713 
714 				/* Only CoW'ed anon folios are "normal". */
715 				if (pfn == vma->vm_pgoff + off)
716 					return NULL;
717 				if (!is_cow_mapping(vma->vm_flags))
718 					return NULL;
719 			}
720 		}
721 
722 		if (is_zero_pfn(pfn) || is_huge_zero_pfn(pfn))
723 			return NULL;
724 	}
725 
726 	if (unlikely(pfn > highest_memmap_pfn)) {
727 		/* Corrupted page table entry. */
728 		print_bad_page_map(vma, addr, entry, NULL, level);
729 		return NULL;
730 	}
731 	/*
732 	 * NOTE! We still have PageReserved() pages in the page tables.
733 	 * For example, VDSO mappings can cause them to exist.
734 	 */
735 	VM_WARN_ON_ONCE(is_zero_pfn(pfn) || is_huge_zero_pfn(pfn));
736 	return pfn_to_page(pfn);
737 }
738 
739 /**
740  * vm_normal_page() - Get the "struct page" associated with a PTE
741  * @vma: The VMA mapping the @pte.
742  * @addr: The address where the @pte is mapped.
743  * @pte: The PTE.
744  *
745  * Get the "struct page" associated with a PTE. See __vm_normal_page()
746  * for details on "normal" and "special" mappings.
747  *
748  * Return: Returns the "struct page" if this is a "normal" mapping. Returns
749  *	   NULL if this is a "special" mapping.
750  */
751 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
752 			    pte_t pte)
753 {
754 	return __vm_normal_page(vma, addr, pte_pfn(pte), pte_special(pte),
755 				pte_val(pte), PGTABLE_LEVEL_PTE);
756 }
757 
758 /**
759  * vm_normal_folio() - Get the "struct folio" associated with a PTE
760  * @vma: The VMA mapping the @pte.
761  * @addr: The address where the @pte is mapped.
762  * @pte: The PTE.
763  *
764  * Get the "struct folio" associated with a PTE. See __vm_normal_page()
765  * for details on "normal" and "special" mappings.
766  *
767  * Return: Returns the "struct folio" if this is a "normal" mapping. Returns
768  *	   NULL if this is a "special" mapping.
769  */
770 struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
771 			    pte_t pte)
772 {
773 	struct page *page = vm_normal_page(vma, addr, pte);
774 
775 	if (page)
776 		return page_folio(page);
777 	return NULL;
778 }
779 
780 #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
781 /**
782  * vm_normal_page_pmd() - Get the "struct page" associated with a PMD
783  * @vma: The VMA mapping the @pmd.
784  * @addr: The address where the @pmd is mapped.
785  * @pmd: The PMD.
786  *
787  * Get the "struct page" associated with a PTE. See __vm_normal_page()
788  * for details on "normal" and "special" mappings.
789  *
790  * Return: Returns the "struct page" if this is a "normal" mapping. Returns
791  *	   NULL if this is a "special" mapping.
792  */
793 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
794 				pmd_t pmd)
795 {
796 	return __vm_normal_page(vma, addr, pmd_pfn(pmd), pmd_special(pmd),
797 				pmd_val(pmd), PGTABLE_LEVEL_PMD);
798 }
799 
800 /**
801  * vm_normal_folio_pmd() - Get the "struct folio" associated with a PMD
802  * @vma: The VMA mapping the @pmd.
803  * @addr: The address where the @pmd is mapped.
804  * @pmd: The PMD.
805  *
806  * Get the "struct folio" associated with a PTE. See __vm_normal_page()
807  * for details on "normal" and "special" mappings.
808  *
809  * Return: Returns the "struct folio" if this is a "normal" mapping. Returns
810  *	   NULL if this is a "special" mapping.
811  */
812 struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
813 				  unsigned long addr, pmd_t pmd)
814 {
815 	struct page *page = vm_normal_page_pmd(vma, addr, pmd);
816 
817 	if (page)
818 		return page_folio(page);
819 	return NULL;
820 }
821 
822 /**
823  * vm_normal_page_pud() - Get the "struct page" associated with a PUD
824  * @vma: The VMA mapping the @pud.
825  * @addr: The address where the @pud is mapped.
826  * @pud: The PUD.
827  *
828  * Get the "struct page" associated with a PUD. See __vm_normal_page()
829  * for details on "normal" and "special" mappings.
830  *
831  * Return: Returns the "struct page" if this is a "normal" mapping. Returns
832  *	   NULL if this is a "special" mapping.
833  */
834 struct page *vm_normal_page_pud(struct vm_area_struct *vma,
835 		unsigned long addr, pud_t pud)
836 {
837 	return __vm_normal_page(vma, addr, pud_pfn(pud), pud_special(pud),
838 				pud_val(pud), PGTABLE_LEVEL_PUD);
839 }
840 #endif
841 
842 /**
843  * restore_exclusive_pte - Restore a device-exclusive entry
844  * @vma: VMA covering @address
845  * @folio: the mapped folio
846  * @page: the mapped folio page
847  * @address: the virtual address
848  * @ptep: pte pointer into the locked page table mapping the folio page
849  * @orig_pte: pte value at @ptep
850  *
851  * Restore a device-exclusive non-swap entry to an ordinary present pte.
852  *
853  * The folio and the page table must be locked, and MMU notifiers must have
854  * been called to invalidate any (exclusive) device mappings.
855  *
856  * Locking the folio makes sure that anybody who just converted the pte to
857  * a device-exclusive entry can map it into the device to make forward
858  * progress without others converting it back until the folio was unlocked.
859  *
860  * If the folio lock ever becomes an issue, we can stop relying on the folio
861  * lock; it might make some scenarios with heavy thrashing less likely to
862  * make forward progress, but these scenarios might not be valid use cases.
863  *
864  * Note that the folio lock does not protect against all cases of concurrent
865  * page table modifications (e.g., MADV_DONTNEED, mprotect), so device drivers
866  * must use MMU notifiers to sync against any concurrent changes.
867  */
868 static void restore_exclusive_pte(struct vm_area_struct *vma,
869 		struct folio *folio, struct page *page, unsigned long address,
870 		pte_t *ptep, pte_t orig_pte)
871 {
872 	pte_t pte;
873 
874 	VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
875 
876 	pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
877 	if (pte_swp_soft_dirty(orig_pte))
878 		pte = pte_mksoft_dirty(pte);
879 
880 	if (pte_swp_uffd_wp(orig_pte))
881 		pte = pte_mkuffd_wp(pte);
882 
883 	if ((vma->vm_flags & VM_WRITE) &&
884 	    can_change_pte_writable(vma, address, pte)) {
885 		if (folio_test_dirty(folio))
886 			pte = pte_mkdirty(pte);
887 		pte = pte_mkwrite(pte, vma);
888 	}
889 	set_pte_at(vma->vm_mm, address, ptep, pte);
890 
891 	/*
892 	 * No need to invalidate - it was non-present before. However
893 	 * secondary CPUs may have mappings that need invalidating.
894 	 */
895 	update_mmu_cache(vma, address, ptep);
896 }
897 
898 /*
899  * Tries to restore an exclusive pte if the page lock can be acquired without
900  * sleeping.
901  */
902 static int try_restore_exclusive_pte(struct vm_area_struct *vma,
903 		unsigned long addr, pte_t *ptep, pte_t orig_pte)
904 {
905 	const softleaf_t entry = softleaf_from_pte(orig_pte);
906 	struct page *page = softleaf_to_page(entry);
907 	struct folio *folio = page_folio(page);
908 
909 	if (folio_trylock(folio)) {
910 		restore_exclusive_pte(vma, folio, page, addr, ptep, orig_pte);
911 		folio_unlock(folio);
912 		return 0;
913 	}
914 
915 	return -EBUSY;
916 }
917 
918 /*
919  * copy one vm_area from one task to the other. Assumes the page tables
920  * already present in the new task to be cleared in the whole range
921  * covered by this vma.
922  */
923 
924 static unsigned long
925 copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
926 		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma,
927 		struct vm_area_struct *src_vma, unsigned long addr, int *rss)
928 {
929 	vm_flags_t vm_flags = dst_vma->vm_flags;
930 	pte_t orig_pte = ptep_get(src_pte);
931 	softleaf_t entry = softleaf_from_pte(orig_pte);
932 	pte_t pte = orig_pte;
933 	struct folio *folio;
934 	struct page *page;
935 
936 	if (likely(softleaf_is_swap(entry))) {
937 		if (swap_dup_entry_direct(entry) < 0)
938 			return -EIO;
939 
940 		/* make sure dst_mm is on swapoff's mmlist. */
941 		if (unlikely(list_empty(&dst_mm->mmlist))) {
942 			spin_lock(&mmlist_lock);
943 			if (list_empty(&dst_mm->mmlist))
944 				list_add(&dst_mm->mmlist,
945 						&src_mm->mmlist);
946 			spin_unlock(&mmlist_lock);
947 		}
948 		/* Mark the swap entry as shared. */
949 		if (pte_swp_exclusive(orig_pte)) {
950 			pte = pte_swp_clear_exclusive(orig_pte);
951 			set_pte_at(src_mm, addr, src_pte, pte);
952 		}
953 		rss[MM_SWAPENTS]++;
954 	} else if (softleaf_is_migration(entry)) {
955 		folio = softleaf_to_folio(entry);
956 
957 		rss[mm_counter(folio)]++;
958 
959 		if (!softleaf_is_migration_read(entry) &&
960 				is_cow_mapping(vm_flags)) {
961 			/*
962 			 * COW mappings require pages in both parent and child
963 			 * to be set to read. A previously exclusive entry is
964 			 * now shared.
965 			 */
966 			entry = make_readable_migration_entry(
967 							swp_offset(entry));
968 			pte = softleaf_to_pte(entry);
969 			if (pte_swp_soft_dirty(orig_pte))
970 				pte = pte_swp_mksoft_dirty(pte);
971 			if (pte_swp_uffd_wp(orig_pte))
972 				pte = pte_swp_mkuffd_wp(pte);
973 			set_pte_at(src_mm, addr, src_pte, pte);
974 		}
975 	} else if (softleaf_is_device_private(entry)) {
976 		page = softleaf_to_page(entry);
977 		folio = page_folio(page);
978 
979 		/*
980 		 * Update rss count even for unaddressable pages, as
981 		 * they should treated just like normal pages in this
982 		 * respect.
983 		 *
984 		 * We will likely want to have some new rss counters
985 		 * for unaddressable pages, at some point. But for now
986 		 * keep things as they are.
987 		 */
988 		folio_get(folio);
989 		rss[mm_counter(folio)]++;
990 		/* Cannot fail as these pages cannot get pinned. */
991 		folio_try_dup_anon_rmap_pte(folio, page, dst_vma, src_vma);
992 
993 		/*
994 		 * We do not preserve soft-dirty information, because so
995 		 * far, checkpoint/restore is the only feature that
996 		 * requires that. And checkpoint/restore does not work
997 		 * when a device driver is involved (you cannot easily
998 		 * save and restore device driver state).
999 		 */
1000 		if (softleaf_is_device_private_write(entry) &&
1001 		    is_cow_mapping(vm_flags)) {
1002 			entry = make_readable_device_private_entry(
1003 							swp_offset(entry));
1004 			pte = swp_entry_to_pte(entry);
1005 			if (pte_swp_uffd_wp(orig_pte))
1006 				pte = pte_swp_mkuffd_wp(pte);
1007 			set_pte_at(src_mm, addr, src_pte, pte);
1008 		}
1009 	} else if (softleaf_is_device_exclusive(entry)) {
1010 		/*
1011 		 * Make device exclusive entries present by restoring the
1012 		 * original entry then copying as for a present pte. Device
1013 		 * exclusive entries currently only support private writable
1014 		 * (ie. COW) mappings.
1015 		 */
1016 		VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags));
1017 		if (try_restore_exclusive_pte(src_vma, addr, src_pte, orig_pte))
1018 			return -EBUSY;
1019 		return -ENOENT;
1020 	} else if (softleaf_is_marker(entry)) {
1021 		pte_marker marker = copy_pte_marker(entry, dst_vma);
1022 
1023 		if (marker)
1024 			set_pte_at(dst_mm, addr, dst_pte,
1025 				   make_pte_marker(marker));
1026 		return 0;
1027 	}
1028 	if (!userfaultfd_wp(dst_vma))
1029 		pte = pte_swp_clear_uffd_wp(pte);
1030 	set_pte_at(dst_mm, addr, dst_pte, pte);
1031 	return 0;
1032 }
1033 
1034 /*
1035  * Copy a present and normal page.
1036  *
1037  * NOTE! The usual case is that this isn't required;
1038  * instead, the caller can just increase the page refcount
1039  * and re-use the pte the traditional way.
1040  *
1041  * And if we need a pre-allocated page but don't yet have
1042  * one, return a negative error to let the preallocation
1043  * code know so that it can do so outside the page table
1044  * lock.
1045  */
1046 static inline int
1047 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1048 		  pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
1049 		  struct folio **prealloc, struct page *page)
1050 {
1051 	struct folio *new_folio;
1052 	pte_t pte;
1053 
1054 	new_folio = *prealloc;
1055 	if (!new_folio)
1056 		return -EAGAIN;
1057 
1058 	/*
1059 	 * We have a prealloc page, all good!  Take it
1060 	 * over and copy the page & arm it.
1061 	 */
1062 
1063 	if (copy_mc_user_highpage(&new_folio->page, page, addr, src_vma))
1064 		return -EHWPOISON;
1065 
1066 	*prealloc = NULL;
1067 	__folio_mark_uptodate(new_folio);
1068 	folio_add_new_anon_rmap(new_folio, dst_vma, addr, RMAP_EXCLUSIVE);
1069 	folio_add_lru_vma(new_folio, dst_vma);
1070 	rss[MM_ANONPAGES]++;
1071 
1072 	/* All done, just insert the new page copy in the child */
1073 	pte = folio_mk_pte(new_folio, dst_vma->vm_page_prot);
1074 	pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma);
1075 	if (userfaultfd_pte_wp(dst_vma, ptep_get(src_pte)))
1076 		/* Uffd-wp needs to be delivered to dest pte as well */
1077 		pte = pte_mkuffd_wp(pte);
1078 	set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
1079 	return 0;
1080 }
1081 
1082 static __always_inline void __copy_present_ptes(struct vm_area_struct *dst_vma,
1083 		struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte,
1084 		pte_t pte, unsigned long addr, int nr)
1085 {
1086 	struct mm_struct *src_mm = src_vma->vm_mm;
1087 
1088 	/* If it's a COW mapping, write protect it both processes. */
1089 	if (is_cow_mapping(src_vma->vm_flags) && pte_write(pte)) {
1090 		wrprotect_ptes(src_mm, addr, src_pte, nr);
1091 		pte = pte_wrprotect(pte);
1092 	}
1093 
1094 	/* If it's a shared mapping, mark it clean in the child. */
1095 	if (src_vma->vm_flags & VM_SHARED)
1096 		pte = pte_mkclean(pte);
1097 	pte = pte_mkold(pte);
1098 
1099 	if (!userfaultfd_wp(dst_vma))
1100 		pte = pte_clear_uffd_wp(pte);
1101 
1102 	set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr);
1103 }
1104 
1105 /*
1106  * Copy one present PTE, trying to batch-process subsequent PTEs that map
1107  * consecutive pages of the same folio by copying them as well.
1108  *
1109  * Returns -EAGAIN if one preallocated page is required to copy the next PTE.
1110  * Otherwise, returns the number of copied PTEs (at least 1).
1111  */
1112 static inline int
1113 copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1114 		 pte_t *dst_pte, pte_t *src_pte, pte_t pte, unsigned long addr,
1115 		 int max_nr, int *rss, struct folio **prealloc)
1116 {
1117 	fpb_t flags = FPB_MERGE_WRITE;
1118 	struct page *page;
1119 	struct folio *folio;
1120 	int err, nr;
1121 
1122 	page = vm_normal_page(src_vma, addr, pte);
1123 	if (unlikely(!page))
1124 		goto copy_pte;
1125 
1126 	folio = page_folio(page);
1127 
1128 	/*
1129 	 * If we likely have to copy, just don't bother with batching. Make
1130 	 * sure that the common "small folio" case is as fast as possible
1131 	 * by keeping the batching logic separate.
1132 	 */
1133 	if (unlikely(!*prealloc && folio_test_large(folio) && max_nr != 1)) {
1134 		if (!(src_vma->vm_flags & VM_SHARED))
1135 			flags |= FPB_RESPECT_DIRTY;
1136 		if (vma_soft_dirty_enabled(src_vma))
1137 			flags |= FPB_RESPECT_SOFT_DIRTY;
1138 
1139 		nr = folio_pte_batch_flags(folio, src_vma, src_pte, &pte, max_nr, flags);
1140 		folio_ref_add(folio, nr);
1141 		if (folio_test_anon(folio)) {
1142 			if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page,
1143 								  nr, dst_vma, src_vma))) {
1144 				folio_ref_sub(folio, nr);
1145 				return -EAGAIN;
1146 			}
1147 			rss[MM_ANONPAGES] += nr;
1148 			VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio);
1149 		} else {
1150 			folio_dup_file_rmap_ptes(folio, page, nr, dst_vma);
1151 			rss[mm_counter_file(folio)] += nr;
1152 		}
1153 		__copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte,
1154 				    addr, nr);
1155 		return nr;
1156 	}
1157 
1158 	folio_get(folio);
1159 	if (folio_test_anon(folio)) {
1160 		/*
1161 		 * If this page may have been pinned by the parent process,
1162 		 * copy the page immediately for the child so that we'll always
1163 		 * guarantee the pinned page won't be randomly replaced in the
1164 		 * future.
1165 		 */
1166 		if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, dst_vma, src_vma))) {
1167 			/* Page may be pinned, we have to copy. */
1168 			folio_put(folio);
1169 			err = copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
1170 						addr, rss, prealloc, page);
1171 			return err ? err : 1;
1172 		}
1173 		rss[MM_ANONPAGES]++;
1174 		VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio);
1175 	} else {
1176 		folio_dup_file_rmap_pte(folio, page, dst_vma);
1177 		rss[mm_counter_file(folio)]++;
1178 	}
1179 
1180 copy_pte:
1181 	__copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte, addr, 1);
1182 	return 1;
1183 }
1184 
1185 static inline struct folio *folio_prealloc(struct mm_struct *src_mm,
1186 		struct vm_area_struct *vma, unsigned long addr, bool need_zero)
1187 {
1188 	struct folio *new_folio;
1189 
1190 	if (need_zero)
1191 		new_folio = vma_alloc_zeroed_movable_folio(vma, addr);
1192 	else
1193 		new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr);
1194 
1195 	if (!new_folio)
1196 		return NULL;
1197 
1198 	if (mem_cgroup_charge(new_folio, src_mm, GFP_KERNEL)) {
1199 		folio_put(new_folio);
1200 		return NULL;
1201 	}
1202 	folio_throttle_swaprate(new_folio, GFP_KERNEL);
1203 
1204 	return new_folio;
1205 }
1206 
1207 static int
1208 copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1209 	       pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1210 	       unsigned long end)
1211 {
1212 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1213 	struct mm_struct *src_mm = src_vma->vm_mm;
1214 	pte_t *orig_src_pte, *orig_dst_pte;
1215 	pte_t *src_pte, *dst_pte;
1216 	pmd_t dummy_pmdval;
1217 	pte_t ptent;
1218 	spinlock_t *src_ptl, *dst_ptl;
1219 	int progress, max_nr, ret = 0;
1220 	int rss[NR_MM_COUNTERS];
1221 	softleaf_t entry = softleaf_mk_none();
1222 	struct folio *prealloc = NULL;
1223 	int nr;
1224 
1225 again:
1226 	progress = 0;
1227 	init_rss_vec(rss);
1228 
1229 	/*
1230 	 * copy_pmd_range()'s prior pmd_none_or_clear_bad(src_pmd), and the
1231 	 * error handling here, assume that exclusive mmap_lock on dst and src
1232 	 * protects anon from unexpected THP transitions; with shmem and file
1233 	 * protected by mmap_lock-less collapse skipping areas with anon_vma
1234 	 * (whereas vma_needs_copy() skips areas without anon_vma).  A rework
1235 	 * can remove such assumptions later, but this is good enough for now.
1236 	 */
1237 	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
1238 	if (!dst_pte) {
1239 		ret = -ENOMEM;
1240 		goto out;
1241 	}
1242 
1243 	/*
1244 	 * We already hold the exclusive mmap_lock, the copy_pte_range() and
1245 	 * retract_page_tables() are using vma->anon_vma to be exclusive, so
1246 	 * the PTE page is stable, and there is no need to get pmdval and do
1247 	 * pmd_same() check.
1248 	 */
1249 	src_pte = pte_offset_map_rw_nolock(src_mm, src_pmd, addr, &dummy_pmdval,
1250 					   &src_ptl);
1251 	if (!src_pte) {
1252 		pte_unmap_unlock(dst_pte, dst_ptl);
1253 		/* ret == 0 */
1254 		goto out;
1255 	}
1256 	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1257 	orig_src_pte = src_pte;
1258 	orig_dst_pte = dst_pte;
1259 	lazy_mmu_mode_enable();
1260 
1261 	do {
1262 		nr = 1;
1263 
1264 		/*
1265 		 * We are holding two locks at this point - either of them
1266 		 * could generate latencies in another task on another CPU.
1267 		 */
1268 		if (progress >= 32) {
1269 			progress = 0;
1270 			if (need_resched() ||
1271 			    spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
1272 				break;
1273 		}
1274 		ptent = ptep_get(src_pte);
1275 		if (pte_none(ptent)) {
1276 			progress++;
1277 			continue;
1278 		}
1279 		if (unlikely(!pte_present(ptent))) {
1280 			ret = copy_nonpresent_pte(dst_mm, src_mm,
1281 						  dst_pte, src_pte,
1282 						  dst_vma, src_vma,
1283 						  addr, rss);
1284 			if (ret == -EIO) {
1285 				entry = softleaf_from_pte(ptep_get(src_pte));
1286 				break;
1287 			} else if (ret == -EBUSY) {
1288 				break;
1289 			} else if (!ret) {
1290 				progress += 8;
1291 				continue;
1292 			}
1293 			ptent = ptep_get(src_pte);
1294 			VM_WARN_ON_ONCE(!pte_present(ptent));
1295 
1296 			/*
1297 			 * Device exclusive entry restored, continue by copying
1298 			 * the now present pte.
1299 			 */
1300 			WARN_ON_ONCE(ret != -ENOENT);
1301 		}
1302 		/* copy_present_ptes() will clear `*prealloc' if consumed */
1303 		max_nr = (end - addr) / PAGE_SIZE;
1304 		ret = copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte,
1305 					ptent, addr, max_nr, rss, &prealloc);
1306 		/*
1307 		 * If we need a pre-allocated page for this pte, drop the
1308 		 * locks, allocate, and try again.
1309 		 * If copy failed due to hwpoison in source page, break out.
1310 		 */
1311 		if (unlikely(ret == -EAGAIN || ret == -EHWPOISON))
1312 			break;
1313 		if (unlikely(prealloc)) {
1314 			/*
1315 			 * pre-alloc page cannot be reused by next time so as
1316 			 * to strictly follow mempolicy (e.g., alloc_page_vma()
1317 			 * will allocate page according to address).  This
1318 			 * could only happen if one pinned pte changed.
1319 			 */
1320 			folio_put(prealloc);
1321 			prealloc = NULL;
1322 		}
1323 		nr = ret;
1324 		progress += 8 * nr;
1325 	} while (dst_pte += nr, src_pte += nr, addr += PAGE_SIZE * nr,
1326 		 addr != end);
1327 
1328 	lazy_mmu_mode_disable();
1329 	pte_unmap_unlock(orig_src_pte, src_ptl);
1330 	add_mm_rss_vec(dst_mm, rss);
1331 	pte_unmap_unlock(orig_dst_pte, dst_ptl);
1332 	cond_resched();
1333 
1334 	if (ret == -EIO) {
1335 		VM_WARN_ON_ONCE(!entry.val);
1336 		if (swap_retry_table_alloc(entry, GFP_KERNEL) < 0) {
1337 			ret = -ENOMEM;
1338 			goto out;
1339 		}
1340 		entry.val = 0;
1341 	} else if (ret == -EBUSY || unlikely(ret == -EHWPOISON)) {
1342 		goto out;
1343 	} else if (ret ==  -EAGAIN) {
1344 		prealloc = folio_prealloc(src_mm, src_vma, addr, false);
1345 		if (!prealloc)
1346 			return -ENOMEM;
1347 	} else if (ret < 0) {
1348 		VM_WARN_ON_ONCE(1);
1349 	}
1350 
1351 	/* We've captured and resolved the error. Reset, try again. */
1352 	ret = 0;
1353 
1354 	if (addr != end)
1355 		goto again;
1356 out:
1357 	if (unlikely(prealloc))
1358 		folio_put(prealloc);
1359 	return ret;
1360 }
1361 
1362 static inline int
1363 copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1364 	       pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1365 	       unsigned long end)
1366 {
1367 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1368 	struct mm_struct *src_mm = src_vma->vm_mm;
1369 	pmd_t *src_pmd, *dst_pmd;
1370 	unsigned long next;
1371 
1372 	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
1373 	if (!dst_pmd)
1374 		return -ENOMEM;
1375 	src_pmd = pmd_offset(src_pud, addr);
1376 	do {
1377 		next = pmd_addr_end(addr, end);
1378 		if (pmd_is_huge(*src_pmd)) {
1379 			int err;
1380 
1381 			VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
1382 			err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd,
1383 					    addr, dst_vma, src_vma);
1384 			if (err == -ENOMEM)
1385 				return -ENOMEM;
1386 			if (!err)
1387 				continue;
1388 			/* fall through */
1389 		}
1390 		if (pmd_none_or_clear_bad(src_pmd))
1391 			continue;
1392 		if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd,
1393 				   addr, next))
1394 			return -ENOMEM;
1395 	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
1396 	return 0;
1397 }
1398 
1399 static inline int
1400 copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1401 	       p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr,
1402 	       unsigned long end)
1403 {
1404 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1405 	struct mm_struct *src_mm = src_vma->vm_mm;
1406 	pud_t *src_pud, *dst_pud;
1407 	unsigned long next;
1408 
1409 	dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
1410 	if (!dst_pud)
1411 		return -ENOMEM;
1412 	src_pud = pud_offset(src_p4d, addr);
1413 	do {
1414 		next = pud_addr_end(addr, end);
1415 		if (pud_trans_huge(*src_pud)) {
1416 			int err;
1417 
1418 			VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma);
1419 			err = copy_huge_pud(dst_mm, src_mm,
1420 					    dst_pud, src_pud, addr, src_vma);
1421 			if (err == -ENOMEM)
1422 				return -ENOMEM;
1423 			if (!err)
1424 				continue;
1425 			/* fall through */
1426 		}
1427 		if (pud_none_or_clear_bad(src_pud))
1428 			continue;
1429 		if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud,
1430 				   addr, next))
1431 			return -ENOMEM;
1432 	} while (dst_pud++, src_pud++, addr = next, addr != end);
1433 	return 0;
1434 }
1435 
1436 static inline int
1437 copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1438 	       pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr,
1439 	       unsigned long end)
1440 {
1441 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1442 	p4d_t *src_p4d, *dst_p4d;
1443 	unsigned long next;
1444 
1445 	dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
1446 	if (!dst_p4d)
1447 		return -ENOMEM;
1448 	src_p4d = p4d_offset(src_pgd, addr);
1449 	do {
1450 		next = p4d_addr_end(addr, end);
1451 		if (p4d_none_or_clear_bad(src_p4d))
1452 			continue;
1453 		if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d,
1454 				   addr, next))
1455 			return -ENOMEM;
1456 	} while (dst_p4d++, src_p4d++, addr = next, addr != end);
1457 	return 0;
1458 }
1459 
1460 /*
1461  * Return true if the vma needs to copy the pgtable during this fork().  Return
1462  * false when we can speed up fork() by allowing lazy page faults later until
1463  * when the child accesses the memory range.
1464  */
1465 static bool
1466 vma_needs_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1467 {
1468 	/*
1469 	 * We check against dst_vma as while sane VMA flags will have been
1470 	 * copied, VM_UFFD_WP may be set only on dst_vma.
1471 	 */
1472 	if (dst_vma->vm_flags & VM_COPY_ON_FORK)
1473 		return true;
1474 	/*
1475 	 * The presence of an anon_vma indicates an anonymous VMA has page
1476 	 * tables which naturally cannot be reconstituted on page fault.
1477 	 */
1478 	if (src_vma->anon_vma)
1479 		return true;
1480 
1481 	/*
1482 	 * Don't copy ptes where a page fault will fill them correctly.  Fork
1483 	 * becomes much lighter when there are big shared or private readonly
1484 	 * mappings. The tradeoff is that copy_page_range is more efficient
1485 	 * than faulting.
1486 	 */
1487 	return false;
1488 }
1489 
1490 int
1491 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1492 {
1493 	pgd_t *src_pgd, *dst_pgd;
1494 	unsigned long addr = src_vma->vm_start;
1495 	unsigned long end = src_vma->vm_end;
1496 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1497 	struct mm_struct *src_mm = src_vma->vm_mm;
1498 	struct mmu_notifier_range range;
1499 	unsigned long next;
1500 	bool is_cow;
1501 	int ret;
1502 
1503 	if (!vma_needs_copy(dst_vma, src_vma))
1504 		return 0;
1505 
1506 	if (is_vm_hugetlb_page(src_vma))
1507 		return copy_hugetlb_page_range(dst_mm, src_mm, dst_vma, src_vma);
1508 
1509 	/*
1510 	 * We need to invalidate the secondary MMU mappings only when
1511 	 * there could be a permission downgrade on the ptes of the
1512 	 * parent mm. And a permission downgrade will only happen if
1513 	 * is_cow_mapping() returns true.
1514 	 */
1515 	is_cow = is_cow_mapping(src_vma->vm_flags);
1516 
1517 	if (is_cow) {
1518 		mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
1519 					0, src_mm, addr, end);
1520 		mmu_notifier_invalidate_range_start(&range);
1521 		/*
1522 		 * Disabling preemption is not needed for the write side, as
1523 		 * the read side doesn't spin, but goes to the mmap_lock.
1524 		 *
1525 		 * Use the raw variant of the seqcount_t write API to avoid
1526 		 * lockdep complaining about preemptibility.
1527 		 */
1528 		vma_assert_write_locked(src_vma);
1529 		raw_write_seqcount_begin(&src_mm->write_protect_seq);
1530 	}
1531 
1532 	ret = 0;
1533 	dst_pgd = pgd_offset(dst_mm, addr);
1534 	src_pgd = pgd_offset(src_mm, addr);
1535 	do {
1536 		next = pgd_addr_end(addr, end);
1537 		if (pgd_none_or_clear_bad(src_pgd))
1538 			continue;
1539 		if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,
1540 					    addr, next))) {
1541 			ret = -ENOMEM;
1542 			break;
1543 		}
1544 	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
1545 
1546 	if (is_cow) {
1547 		raw_write_seqcount_end(&src_mm->write_protect_seq);
1548 		mmu_notifier_invalidate_range_end(&range);
1549 	}
1550 	return ret;
1551 }
1552 
1553 /* Whether we should zap all COWed (private) pages too */
1554 static inline bool should_zap_cows(struct zap_details *details)
1555 {
1556 	/* By default, zap all pages */
1557 	if (!details)
1558 		return true;
1559 
1560 	VM_WARN_ON_ONCE(details->skip_cows && details->reclaim_pt);
1561 
1562 	/* Or, we zap COWed pages only if the caller wants to */
1563 	return !details->skip_cows;
1564 }
1565 
1566 /* Decides whether we should zap this folio with the folio pointer specified */
1567 static inline bool should_zap_folio(struct zap_details *details,
1568 				    struct folio *folio)
1569 {
1570 	/* If we can make a decision without *folio.. */
1571 	if (should_zap_cows(details))
1572 		return true;
1573 
1574 	/* Otherwise we should only zap non-anon folios */
1575 	return !folio_test_anon(folio);
1576 }
1577 
1578 static inline bool zap_drop_markers(struct zap_details *details)
1579 {
1580 	if (!details)
1581 		return false;
1582 
1583 	return details->zap_flags & ZAP_FLAG_DROP_MARKER;
1584 }
1585 
1586 /*
1587  * This function makes sure that we'll replace the none pte with an uffd-wp
1588  * swap special pte marker when necessary. Must be with the pgtable lock held.
1589  *
1590  * Returns true if uffd-wp ptes was installed, false otherwise.
1591  */
1592 static inline bool
1593 zap_install_uffd_wp_if_needed(struct vm_area_struct *vma,
1594 			      unsigned long addr, pte_t *pte, int nr,
1595 			      struct zap_details *details, pte_t pteval)
1596 {
1597 	bool was_installed = false;
1598 
1599 	if (!uffd_supports_wp_marker())
1600 		return false;
1601 
1602 	/* Zap on anonymous always means dropping everything */
1603 	if (vma_is_anonymous(vma))
1604 		return false;
1605 
1606 	if (zap_drop_markers(details))
1607 		return false;
1608 
1609 	for (;;) {
1610 		/* the PFN in the PTE is irrelevant. */
1611 		if (pte_install_uffd_wp_if_needed(vma, addr, pte, pteval))
1612 			was_installed = true;
1613 		if (--nr == 0)
1614 			break;
1615 		pte++;
1616 		addr += PAGE_SIZE;
1617 	}
1618 
1619 	return was_installed;
1620 }
1621 
1622 static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb,
1623 		struct vm_area_struct *vma, struct folio *folio,
1624 		struct page *page, pte_t *pte, pte_t ptent, unsigned int nr,
1625 		unsigned long addr, struct zap_details *details, int *rss,
1626 		bool *force_flush, bool *force_break, bool *any_skipped)
1627 {
1628 	struct mm_struct *mm = tlb->mm;
1629 	bool delay_rmap = false;
1630 
1631 	if (!folio_test_anon(folio)) {
1632 		ptent = get_and_clear_full_ptes(mm, addr, pte, nr, tlb->fullmm);
1633 		if (pte_dirty(ptent)) {
1634 			folio_mark_dirty(folio);
1635 			if (tlb_delay_rmap(tlb)) {
1636 				delay_rmap = true;
1637 				*force_flush = true;
1638 			}
1639 		}
1640 		if (pte_young(ptent) && likely(vma_has_recency(vma)))
1641 			folio_mark_accessed(folio);
1642 		rss[mm_counter(folio)] -= nr;
1643 	} else {
1644 		/* We don't need up-to-date accessed/dirty bits. */
1645 		clear_full_ptes(mm, addr, pte, nr, tlb->fullmm);
1646 		rss[MM_ANONPAGES] -= nr;
1647 	}
1648 	/* Checking a single PTE in a batch is sufficient. */
1649 	arch_check_zapped_pte(vma, ptent);
1650 	tlb_remove_tlb_entries(tlb, pte, nr, addr);
1651 	if (unlikely(userfaultfd_pte_wp(vma, ptent)))
1652 		*any_skipped = zap_install_uffd_wp_if_needed(vma, addr, pte,
1653 							     nr, details, ptent);
1654 
1655 	if (!delay_rmap) {
1656 		folio_remove_rmap_ptes(folio, page, nr, vma);
1657 
1658 		if (unlikely(folio_mapcount(folio) < 0))
1659 			print_bad_pte(vma, addr, ptent, page);
1660 	}
1661 	if (unlikely(__tlb_remove_folio_pages(tlb, page, nr, delay_rmap))) {
1662 		*force_flush = true;
1663 		*force_break = true;
1664 	}
1665 }
1666 
1667 /*
1668  * Zap or skip at least one present PTE, trying to batch-process subsequent
1669  * PTEs that map consecutive pages of the same folio.
1670  *
1671  * Returns the number of processed (skipped or zapped) PTEs (at least 1).
1672  */
1673 static inline int zap_present_ptes(struct mmu_gather *tlb,
1674 		struct vm_area_struct *vma, pte_t *pte, pte_t ptent,
1675 		unsigned int max_nr, unsigned long addr,
1676 		struct zap_details *details, int *rss, bool *force_flush,
1677 		bool *force_break, bool *any_skipped)
1678 {
1679 	struct mm_struct *mm = tlb->mm;
1680 	struct folio *folio;
1681 	struct page *page;
1682 	int nr;
1683 
1684 	page = vm_normal_page(vma, addr, ptent);
1685 	if (!page) {
1686 		/* We don't need up-to-date accessed/dirty bits. */
1687 		ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm);
1688 		arch_check_zapped_pte(vma, ptent);
1689 		tlb_remove_tlb_entry(tlb, pte, addr);
1690 		if (userfaultfd_pte_wp(vma, ptent))
1691 			*any_skipped = zap_install_uffd_wp_if_needed(vma, addr,
1692 						pte, 1, details, ptent);
1693 		ksm_might_unmap_zero_page(mm, ptent);
1694 		return 1;
1695 	}
1696 
1697 	folio = page_folio(page);
1698 	if (unlikely(!should_zap_folio(details, folio))) {
1699 		*any_skipped = true;
1700 		return 1;
1701 	}
1702 
1703 	/*
1704 	 * Make sure that the common "small folio" case is as fast as possible
1705 	 * by keeping the batching logic separate.
1706 	 */
1707 	if (unlikely(folio_test_large(folio) && max_nr != 1)) {
1708 		nr = folio_pte_batch(folio, pte, ptent, max_nr);
1709 		zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr,
1710 				       addr, details, rss, force_flush,
1711 				       force_break, any_skipped);
1712 		return nr;
1713 	}
1714 	zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, 1, addr,
1715 			       details, rss, force_flush, force_break, any_skipped);
1716 	return 1;
1717 }
1718 
1719 static inline int zap_nonpresent_ptes(struct mmu_gather *tlb,
1720 		struct vm_area_struct *vma, pte_t *pte, pte_t ptent,
1721 		unsigned int max_nr, unsigned long addr,
1722 		struct zap_details *details, int *rss, bool *any_skipped)
1723 {
1724 	softleaf_t entry;
1725 	int nr = 1;
1726 
1727 	*any_skipped = true;
1728 	entry = softleaf_from_pte(ptent);
1729 	if (softleaf_is_device_private(entry) ||
1730 	    softleaf_is_device_exclusive(entry)) {
1731 		struct page *page = softleaf_to_page(entry);
1732 		struct folio *folio = page_folio(page);
1733 
1734 		if (unlikely(!should_zap_folio(details, folio)))
1735 			return 1;
1736 		/*
1737 		 * Both device private/exclusive mappings should only
1738 		 * work with anonymous page so far, so we don't need to
1739 		 * consider uffd-wp bit when zap. For more information,
1740 		 * see zap_install_uffd_wp_if_needed().
1741 		 */
1742 		WARN_ON_ONCE(!vma_is_anonymous(vma));
1743 		rss[mm_counter(folio)]--;
1744 		folio_remove_rmap_pte(folio, page, vma);
1745 		folio_put(folio);
1746 	} else if (softleaf_is_swap(entry)) {
1747 		/* Genuine swap entries, hence a private anon pages */
1748 		if (!should_zap_cows(details))
1749 			return 1;
1750 
1751 		nr = swap_pte_batch(pte, max_nr, ptent);
1752 		rss[MM_SWAPENTS] -= nr;
1753 		swap_put_entries_direct(entry, nr);
1754 	} else if (softleaf_is_migration(entry)) {
1755 		struct folio *folio = softleaf_to_folio(entry);
1756 
1757 		if (!should_zap_folio(details, folio))
1758 			return 1;
1759 		rss[mm_counter(folio)]--;
1760 	} else if (softleaf_is_uffd_wp_marker(entry)) {
1761 		/*
1762 		 * For anon: always drop the marker; for file: only
1763 		 * drop the marker if explicitly requested.
1764 		 */
1765 		if (!vma_is_anonymous(vma) && !zap_drop_markers(details))
1766 			return 1;
1767 	} else if (softleaf_is_guard_marker(entry)) {
1768 		/*
1769 		 * Ordinary zapping should not remove guard PTE
1770 		 * markers. Only do so if we should remove PTE markers
1771 		 * in general.
1772 		 */
1773 		if (!zap_drop_markers(details))
1774 			return 1;
1775 	} else if (softleaf_is_hwpoison(entry) ||
1776 		   softleaf_is_poison_marker(entry)) {
1777 		if (!should_zap_cows(details))
1778 			return 1;
1779 	} else {
1780 		/* We should have covered all the swap entry types */
1781 		pr_alert("unrecognized swap entry 0x%lx\n", entry.val);
1782 		WARN_ON_ONCE(1);
1783 	}
1784 	clear_not_present_full_ptes(vma->vm_mm, addr, pte, nr, tlb->fullmm);
1785 	*any_skipped = zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, ptent);
1786 
1787 	return nr;
1788 }
1789 
1790 static inline int do_zap_pte_range(struct mmu_gather *tlb,
1791 				   struct vm_area_struct *vma, pte_t *pte,
1792 				   unsigned long addr, unsigned long end,
1793 				   struct zap_details *details, int *rss,
1794 				   bool *force_flush, bool *force_break,
1795 				   bool *any_skipped)
1796 {
1797 	pte_t ptent = ptep_get(pte);
1798 	int max_nr = (end - addr) / PAGE_SIZE;
1799 	int nr = 0;
1800 
1801 	/* Skip all consecutive none ptes */
1802 	if (pte_none(ptent)) {
1803 		for (nr = 1; nr < max_nr; nr++) {
1804 			ptent = ptep_get(pte + nr);
1805 			if (!pte_none(ptent))
1806 				break;
1807 		}
1808 		max_nr -= nr;
1809 		if (!max_nr)
1810 			return nr;
1811 		pte += nr;
1812 		addr += nr * PAGE_SIZE;
1813 	}
1814 
1815 	if (pte_present(ptent))
1816 		nr += zap_present_ptes(tlb, vma, pte, ptent, max_nr, addr,
1817 				       details, rss, force_flush, force_break,
1818 				       any_skipped);
1819 	else
1820 		nr += zap_nonpresent_ptes(tlb, vma, pte, ptent, max_nr, addr,
1821 					  details, rss, any_skipped);
1822 
1823 	return nr;
1824 }
1825 
1826 static bool pte_table_reclaim_possible(unsigned long start, unsigned long end,
1827 		struct zap_details *details)
1828 {
1829 	if (!IS_ENABLED(CONFIG_PT_RECLAIM))
1830 		return false;
1831 	/* Only zap if we are allowed to and cover the full page table. */
1832 	return details && details->reclaim_pt && (end - start >= PMD_SIZE);
1833 }
1834 
1835 static bool zap_empty_pte_table(struct mm_struct *mm, pmd_t *pmd,
1836 		spinlock_t *ptl, pmd_t *pmdval)
1837 {
1838 	spinlock_t *pml = pmd_lockptr(mm, pmd);
1839 
1840 	if (ptl != pml && !spin_trylock(pml))
1841 		return false;
1842 
1843 	*pmdval = pmdp_get(pmd);
1844 	pmd_clear(pmd);
1845 	if (ptl != pml)
1846 		spin_unlock(pml);
1847 	return true;
1848 }
1849 
1850 static bool zap_pte_table_if_empty(struct mm_struct *mm, pmd_t *pmd,
1851 		unsigned long addr, pmd_t *pmdval)
1852 {
1853 	spinlock_t *pml, *ptl = NULL;
1854 	pte_t *start_pte, *pte;
1855 	int i;
1856 
1857 	pml = pmd_lock(mm, pmd);
1858 	start_pte = pte_offset_map_rw_nolock(mm, pmd, addr, pmdval, &ptl);
1859 	if (!start_pte)
1860 		goto out_ptl;
1861 	if (ptl != pml)
1862 		spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
1863 
1864 	for (i = 0, pte = start_pte; i < PTRS_PER_PTE; i++, pte++) {
1865 		if (!pte_none(ptep_get(pte)))
1866 			goto out_ptl;
1867 	}
1868 	pte_unmap(start_pte);
1869 
1870 	pmd_clear(pmd);
1871 
1872 	if (ptl != pml)
1873 		spin_unlock(ptl);
1874 	spin_unlock(pml);
1875 	return true;
1876 out_ptl:
1877 	if (start_pte)
1878 		pte_unmap_unlock(start_pte, ptl);
1879 	if (ptl != pml)
1880 		spin_unlock(pml);
1881 	return false;
1882 }
1883 
1884 static unsigned long zap_pte_range(struct mmu_gather *tlb,
1885 				struct vm_area_struct *vma, pmd_t *pmd,
1886 				unsigned long addr, unsigned long end,
1887 				struct zap_details *details)
1888 {
1889 	bool can_reclaim_pt = pte_table_reclaim_possible(addr, end, details);
1890 	bool force_flush = false, force_break = false;
1891 	struct mm_struct *mm = tlb->mm;
1892 	int rss[NR_MM_COUNTERS];
1893 	spinlock_t *ptl;
1894 	pte_t *start_pte;
1895 	pte_t *pte;
1896 	pmd_t pmdval;
1897 	unsigned long start = addr;
1898 	bool direct_reclaim = true;
1899 	int nr;
1900 
1901 retry:
1902 	tlb_change_page_size(tlb, PAGE_SIZE);
1903 	init_rss_vec(rss);
1904 	start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1905 	if (!pte)
1906 		return addr;
1907 
1908 	flush_tlb_batched_pending(mm);
1909 	lazy_mmu_mode_enable();
1910 	do {
1911 		bool any_skipped = false;
1912 
1913 		if (need_resched()) {
1914 			direct_reclaim = false;
1915 			break;
1916 		}
1917 
1918 		nr = do_zap_pte_range(tlb, vma, pte, addr, end, details, rss,
1919 				      &force_flush, &force_break, &any_skipped);
1920 		if (any_skipped)
1921 			can_reclaim_pt = false;
1922 		if (unlikely(force_break)) {
1923 			addr += nr * PAGE_SIZE;
1924 			direct_reclaim = false;
1925 			break;
1926 		}
1927 	} while (pte += nr, addr += PAGE_SIZE * nr, addr != end);
1928 
1929 	/*
1930 	 * Fast path: try to hold the pmd lock and unmap the PTE page.
1931 	 *
1932 	 * If the pte lock was released midway (retry case), or if the attempt
1933 	 * to hold the pmd lock failed, then we need to recheck all pte entries
1934 	 * to ensure they are still none, thereby preventing the pte entries
1935 	 * from being repopulated by another thread.
1936 	 */
1937 	if (can_reclaim_pt && direct_reclaim && addr == end)
1938 		direct_reclaim = zap_empty_pte_table(mm, pmd, ptl, &pmdval);
1939 
1940 	add_mm_rss_vec(mm, rss);
1941 	lazy_mmu_mode_disable();
1942 
1943 	/* Do the actual TLB flush before dropping ptl */
1944 	if (force_flush) {
1945 		tlb_flush_mmu_tlbonly(tlb);
1946 		tlb_flush_rmaps(tlb, vma);
1947 	}
1948 	pte_unmap_unlock(start_pte, ptl);
1949 
1950 	/*
1951 	 * If we forced a TLB flush (either due to running out of
1952 	 * batch buffers or because we needed to flush dirty TLB
1953 	 * entries before releasing the ptl), free the batched
1954 	 * memory too. Come back again if we didn't do everything.
1955 	 */
1956 	if (force_flush)
1957 		tlb_flush_mmu(tlb);
1958 
1959 	if (addr != end) {
1960 		cond_resched();
1961 		force_flush = false;
1962 		force_break = false;
1963 		goto retry;
1964 	}
1965 
1966 	if (can_reclaim_pt) {
1967 		if (direct_reclaim || zap_pte_table_if_empty(mm, pmd, start, &pmdval)) {
1968 			pte_free_tlb(tlb, pmd_pgtable(pmdval), addr);
1969 			mm_dec_nr_ptes(mm);
1970 		}
1971 	}
1972 
1973 	return addr;
1974 }
1975 
1976 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1977 				struct vm_area_struct *vma, pud_t *pud,
1978 				unsigned long addr, unsigned long end,
1979 				struct zap_details *details)
1980 {
1981 	pmd_t *pmd;
1982 	unsigned long next;
1983 
1984 	pmd = pmd_offset(pud, addr);
1985 	do {
1986 		next = pmd_addr_end(addr, end);
1987 		if (pmd_is_huge(*pmd)) {
1988 			if (next - addr != HPAGE_PMD_SIZE)
1989 				__split_huge_pmd(vma, pmd, addr, false);
1990 			else if (zap_huge_pmd(tlb, vma, pmd, addr)) {
1991 				addr = next;
1992 				continue;
1993 			}
1994 			/* fall through */
1995 		} else if (details && details->single_folio &&
1996 			   folio_test_pmd_mappable(details->single_folio) &&
1997 			   next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
1998 			sync_with_folio_pmd_zap(tlb->mm, pmd);
1999 		}
2000 		if (pmd_none(*pmd)) {
2001 			addr = next;
2002 			continue;
2003 		}
2004 		addr = zap_pte_range(tlb, vma, pmd, addr, next, details);
2005 		if (addr != next)
2006 			pmd--;
2007 	} while (pmd++, cond_resched(), addr != end);
2008 
2009 	return addr;
2010 }
2011 
2012 static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
2013 				struct vm_area_struct *vma, p4d_t *p4d,
2014 				unsigned long addr, unsigned long end,
2015 				struct zap_details *details)
2016 {
2017 	pud_t *pud;
2018 	unsigned long next;
2019 
2020 	pud = pud_offset(p4d, addr);
2021 	do {
2022 		next = pud_addr_end(addr, end);
2023 		if (pud_trans_huge(*pud)) {
2024 			if (next - addr != HPAGE_PUD_SIZE)
2025 				split_huge_pud(vma, pud, addr);
2026 			else if (zap_huge_pud(tlb, vma, pud, addr))
2027 				goto next;
2028 			/* fall through */
2029 		}
2030 		if (pud_none_or_clear_bad(pud))
2031 			continue;
2032 		next = zap_pmd_range(tlb, vma, pud, addr, next, details);
2033 next:
2034 		cond_resched();
2035 	} while (pud++, addr = next, addr != end);
2036 
2037 	return addr;
2038 }
2039 
2040 static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
2041 				struct vm_area_struct *vma, pgd_t *pgd,
2042 				unsigned long addr, unsigned long end,
2043 				struct zap_details *details)
2044 {
2045 	p4d_t *p4d;
2046 	unsigned long next;
2047 
2048 	p4d = p4d_offset(pgd, addr);
2049 	do {
2050 		next = p4d_addr_end(addr, end);
2051 		if (p4d_none_or_clear_bad(p4d))
2052 			continue;
2053 		next = zap_pud_range(tlb, vma, p4d, addr, next, details);
2054 	} while (p4d++, addr = next, addr != end);
2055 
2056 	return addr;
2057 }
2058 
2059 static void __zap_vma_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
2060 		unsigned long start, unsigned long end,
2061 		struct zap_details *details)
2062 {
2063 	const bool reaping = details && details->reaping;
2064 
2065 	VM_WARN_ON_ONCE(start >= end || !range_in_vma(vma, start, end));
2066 
2067 	/* uprobe_munmap() might sleep, so skip it when reaping. */
2068 	if (vma->vm_file && !reaping)
2069 		uprobe_munmap(vma, start, end);
2070 
2071 	if (unlikely(is_vm_hugetlb_page(vma))) {
2072 		zap_flags_t zap_flags = details ? details->zap_flags : 0;
2073 
2074 		VM_WARN_ON_ONCE(reaping);
2075 		/*
2076 		 * vm_file will be NULL when we fail early while instantiating
2077 		 * a new mapping. In this case, no pages were mapped yet and
2078 		 * there is nothing to do.
2079 		 */
2080 		if (!vma->vm_file)
2081 			return;
2082 		__unmap_hugepage_range(tlb, vma, start, end, NULL, zap_flags);
2083 	} else {
2084 		unsigned long next, addr = start;
2085 		pgd_t *pgd;
2086 
2087 		tlb_start_vma(tlb, vma);
2088 		pgd = pgd_offset(vma->vm_mm, addr);
2089 		do {
2090 			next = pgd_addr_end(addr, end);
2091 			if (pgd_none_or_clear_bad(pgd))
2092 				continue;
2093 			next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
2094 		} while (pgd++, addr = next, addr != end);
2095 		tlb_end_vma(tlb, vma);
2096 	}
2097 }
2098 
2099 /**
2100  * zap_vma_for_reaping - zap all page table entries in the vma without blocking
2101  * @vma: The vma to zap.
2102  *
2103  * Zap all page table entries in the vma without blocking for use by the oom
2104  * killer. Hugetlb vmas are not supported.
2105  *
2106  * Returns: 0 on success, -EBUSY if we would have to block.
2107  */
2108 int zap_vma_for_reaping(struct vm_area_struct *vma)
2109 {
2110 	struct zap_details details = {
2111 		.reaping = true,
2112 	};
2113 	struct mmu_notifier_range range;
2114 	struct mmu_gather tlb;
2115 
2116 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2117 				vma->vm_start, vma->vm_end);
2118 	tlb_gather_mmu(&tlb, vma->vm_mm);
2119 	if (mmu_notifier_invalidate_range_start_nonblock(&range)) {
2120 		tlb_finish_mmu(&tlb);
2121 		return -EBUSY;
2122 	}
2123 	__zap_vma_range(&tlb, vma, range.start, range.end, &details);
2124 	mmu_notifier_invalidate_range_end(&range);
2125 	tlb_finish_mmu(&tlb);
2126 	return 0;
2127 }
2128 
2129 /**
2130  * unmap_vmas - unmap a range of memory covered by a list of vma's
2131  * @tlb: address of the caller's struct mmu_gather
2132  * @unmap: The unmap_desc
2133  *
2134  * Unmap all pages in the vma list.
2135  *
2136  * Only addresses between `start' and `end' will be unmapped.
2137  *
2138  * The VMA list must be sorted in ascending virtual address order.
2139  *
2140  * unmap_vmas() assumes that the caller will flush the whole unmapped address
2141  * range after unmap_vmas() returns.  So the only responsibility here is to
2142  * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
2143  * drops the lock and schedules.
2144  */
2145 void unmap_vmas(struct mmu_gather *tlb, struct unmap_desc *unmap)
2146 {
2147 	struct vm_area_struct *vma;
2148 	struct mmu_notifier_range range;
2149 	struct zap_details details = {
2150 		.zap_flags = ZAP_FLAG_DROP_MARKER | ZAP_FLAG_UNMAP,
2151 	};
2152 
2153 	vma = unmap->first;
2154 	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm,
2155 				unmap->vma_start, unmap->vma_end);
2156 	mmu_notifier_invalidate_range_start(&range);
2157 	do {
2158 		unsigned long start = max(vma->vm_start, unmap->vma_start);
2159 		unsigned long end = min(vma->vm_end, unmap->vma_end);
2160 
2161 		hugetlb_zap_begin(vma, &start, &end);
2162 		__zap_vma_range(tlb, vma, start, end, &details);
2163 		hugetlb_zap_end(vma, &details);
2164 		vma = mas_find(unmap->mas, unmap->tree_end - 1);
2165 	} while (vma);
2166 	mmu_notifier_invalidate_range_end(&range);
2167 }
2168 
2169 /**
2170  * zap_vma_range_batched - zap page table entries in a vma range
2171  * @tlb: pointer to the caller's struct mmu_gather
2172  * @vma: the vma covering the range to zap
2173  * @address: starting address of the range to zap
2174  * @size: number of bytes to zap
2175  * @details: details specifying zapping behavior
2176  *
2177  * @tlb must not be NULL. The provided address range must be fully
2178  * contained within @vma. If @vma is for hugetlb, @tlb is flushed and
2179  * re-initialized by this function.
2180  *
2181  * If @details is NULL, this function will zap all page table entries.
2182  */
2183 void zap_vma_range_batched(struct mmu_gather *tlb,
2184 		struct vm_area_struct *vma, unsigned long address,
2185 		unsigned long size, struct zap_details *details)
2186 {
2187 	const unsigned long end = address + size;
2188 	struct mmu_notifier_range range;
2189 
2190 	VM_WARN_ON_ONCE(!tlb || tlb->mm != vma->vm_mm);
2191 
2192 	if (unlikely(!size))
2193 		return;
2194 
2195 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2196 				address, end);
2197 	hugetlb_zap_begin(vma, &range.start, &range.end);
2198 	update_hiwater_rss(vma->vm_mm);
2199 	mmu_notifier_invalidate_range_start(&range);
2200 	/*
2201 	 * unmap 'address-end' not 'range.start-range.end' as range
2202 	 * could have been expanded for hugetlb pmd sharing.
2203 	 */
2204 	__zap_vma_range(tlb, vma, address, end, details);
2205 	mmu_notifier_invalidate_range_end(&range);
2206 	if (is_vm_hugetlb_page(vma)) {
2207 		/*
2208 		 * flush tlb and free resources before hugetlb_zap_end(), to
2209 		 * avoid concurrent page faults' allocation failure.
2210 		 */
2211 		tlb_finish_mmu(tlb);
2212 		hugetlb_zap_end(vma, details);
2213 		tlb_gather_mmu(tlb, vma->vm_mm);
2214 	}
2215 }
2216 
2217 /**
2218  * zap_vma_range - zap all page table entries in a vma range
2219  * @vma: the vma covering the range to zap
2220  * @address: starting address of the range to zap
2221  * @size: number of bytes to zap
2222  *
2223  * The provided address range must be fully contained within @vma.
2224  */
2225 void zap_vma_range(struct vm_area_struct *vma, unsigned long address,
2226 		unsigned long size)
2227 {
2228 	struct mmu_gather tlb;
2229 
2230 	tlb_gather_mmu(&tlb, vma->vm_mm);
2231 	zap_vma_range_batched(&tlb, vma, address, size, NULL);
2232 	tlb_finish_mmu(&tlb);
2233 }
2234 
2235 /**
2236  * zap_special_vma_range - zap all page table entries in a special vma range
2237  * @vma: the vma covering the range to zap
2238  * @address: starting address of the range to zap
2239  * @size: number of bytes to zap
2240  *
2241  * This function does nothing when the provided address range is not fully
2242  * contained in @vma, or when the @vma is not VM_PFNMAP or VM_MIXEDMAP.
2243  */
2244 void zap_special_vma_range(struct vm_area_struct *vma, unsigned long address,
2245 		unsigned long size)
2246 {
2247 	if (!range_in_vma(vma, address, address + size) ||
2248 	   !(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)))
2249 		return;
2250 
2251 	zap_vma_range(vma, address, size);
2252 }
2253 EXPORT_SYMBOL_GPL(zap_special_vma_range);
2254 
2255 static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
2256 {
2257 	pgd_t *pgd;
2258 	p4d_t *p4d;
2259 	pud_t *pud;
2260 	pmd_t *pmd;
2261 
2262 	pgd = pgd_offset(mm, addr);
2263 	p4d = p4d_alloc(mm, pgd, addr);
2264 	if (!p4d)
2265 		return NULL;
2266 	pud = pud_alloc(mm, p4d, addr);
2267 	if (!pud)
2268 		return NULL;
2269 	pmd = pmd_alloc(mm, pud, addr);
2270 	if (!pmd)
2271 		return NULL;
2272 
2273 	VM_BUG_ON(pmd_trans_huge(*pmd));
2274 	return pmd;
2275 }
2276 
2277 pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
2278 		      spinlock_t **ptl)
2279 {
2280 	pmd_t *pmd = walk_to_pmd(mm, addr);
2281 
2282 	if (!pmd)
2283 		return NULL;
2284 	return pte_alloc_map_lock(mm, pmd, addr, ptl);
2285 }
2286 
2287 static bool vm_mixed_zeropage_allowed(struct vm_area_struct *vma)
2288 {
2289 	VM_WARN_ON_ONCE(vma->vm_flags & VM_PFNMAP);
2290 	/*
2291 	 * Whoever wants to forbid the zeropage after some zeropages
2292 	 * might already have been mapped has to scan the page tables and
2293 	 * bail out on any zeropages. Zeropages in COW mappings can
2294 	 * be unshared using FAULT_FLAG_UNSHARE faults.
2295 	 */
2296 	if (mm_forbids_zeropage(vma->vm_mm))
2297 		return false;
2298 	/* zeropages in COW mappings are common and unproblematic. */
2299 	if (is_cow_mapping(vma->vm_flags))
2300 		return true;
2301 	/* Mappings that do not allow for writable PTEs are unproblematic. */
2302 	if (!(vma->vm_flags & (VM_WRITE | VM_MAYWRITE)))
2303 		return true;
2304 	/*
2305 	 * Why not allow any VMA that has vm_ops->pfn_mkwrite? GUP could
2306 	 * find the shared zeropage and longterm-pin it, which would
2307 	 * be problematic as soon as the zeropage gets replaced by a different
2308 	 * page due to vma->vm_ops->pfn_mkwrite, because what's mapped would
2309 	 * now differ to what GUP looked up. FSDAX is incompatible to
2310 	 * FOLL_LONGTERM and VM_IO is incompatible to GUP completely (see
2311 	 * check_vma_flags).
2312 	 */
2313 	return vma->vm_ops && vma->vm_ops->pfn_mkwrite &&
2314 	       (vma_is_fsdax(vma) || vma->vm_flags & VM_IO);
2315 }
2316 
2317 static int validate_page_before_insert(struct vm_area_struct *vma,
2318 				       struct page *page)
2319 {
2320 	struct folio *folio = page_folio(page);
2321 
2322 	if (!folio_ref_count(folio))
2323 		return -EINVAL;
2324 	if (unlikely(is_zero_folio(folio))) {
2325 		if (!vm_mixed_zeropage_allowed(vma))
2326 			return -EINVAL;
2327 		return 0;
2328 	}
2329 	if (folio_test_anon(folio) || page_has_type(page))
2330 		return -EINVAL;
2331 	flush_dcache_folio(folio);
2332 	return 0;
2333 }
2334 
2335 static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte,
2336 				unsigned long addr, struct page *page,
2337 				pgprot_t prot, bool mkwrite)
2338 {
2339 	struct folio *folio = page_folio(page);
2340 	pte_t pteval = ptep_get(pte);
2341 
2342 	if (!pte_none(pteval)) {
2343 		if (!mkwrite)
2344 			return -EBUSY;
2345 
2346 		/* see insert_pfn(). */
2347 		if (pte_pfn(pteval) != page_to_pfn(page)) {
2348 			WARN_ON_ONCE(!is_zero_pfn(pte_pfn(pteval)));
2349 			return -EFAULT;
2350 		}
2351 		pteval = maybe_mkwrite(pteval, vma);
2352 		pteval = pte_mkyoung(pteval);
2353 		if (ptep_set_access_flags(vma, addr, pte, pteval, 1))
2354 			update_mmu_cache(vma, addr, pte);
2355 		return 0;
2356 	}
2357 
2358 	/* Ok, finally just insert the thing.. */
2359 	pteval = mk_pte(page, prot);
2360 	if (unlikely(is_zero_folio(folio))) {
2361 		pteval = pte_mkspecial(pteval);
2362 	} else {
2363 		folio_get(folio);
2364 		pteval = mk_pte(page, prot);
2365 		if (mkwrite) {
2366 			pteval = pte_mkyoung(pteval);
2367 			pteval = maybe_mkwrite(pte_mkdirty(pteval), vma);
2368 		}
2369 		inc_mm_counter(vma->vm_mm, mm_counter_file(folio));
2370 		folio_add_file_rmap_pte(folio, page, vma);
2371 	}
2372 	set_pte_at(vma->vm_mm, addr, pte, pteval);
2373 	return 0;
2374 }
2375 
2376 static int insert_page(struct vm_area_struct *vma, unsigned long addr,
2377 			struct page *page, pgprot_t prot, bool mkwrite)
2378 {
2379 	int retval;
2380 	pte_t *pte;
2381 	spinlock_t *ptl;
2382 
2383 	retval = validate_page_before_insert(vma, page);
2384 	if (retval)
2385 		goto out;
2386 	retval = -ENOMEM;
2387 	pte = get_locked_pte(vma->vm_mm, addr, &ptl);
2388 	if (!pte)
2389 		goto out;
2390 	retval = insert_page_into_pte_locked(vma, pte, addr, page, prot,
2391 					mkwrite);
2392 	pte_unmap_unlock(pte, ptl);
2393 out:
2394 	return retval;
2395 }
2396 
2397 static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte,
2398 			unsigned long addr, struct page *page, pgprot_t prot)
2399 {
2400 	int err;
2401 
2402 	err = validate_page_before_insert(vma, page);
2403 	if (err)
2404 		return err;
2405 	return insert_page_into_pte_locked(vma, pte, addr, page, prot, false);
2406 }
2407 
2408 /* insert_pages() amortizes the cost of spinlock operations
2409  * when inserting pages in a loop.
2410  */
2411 static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
2412 			struct page **pages, unsigned long *num, pgprot_t prot)
2413 {
2414 	pmd_t *pmd = NULL;
2415 	pte_t *start_pte, *pte;
2416 	spinlock_t *pte_lock;
2417 	struct mm_struct *const mm = vma->vm_mm;
2418 	unsigned long curr_page_idx = 0;
2419 	unsigned long remaining_pages_total = *num;
2420 	unsigned long pages_to_write_in_pmd;
2421 	int ret;
2422 more:
2423 	ret = -EFAULT;
2424 	pmd = walk_to_pmd(mm, addr);
2425 	if (!pmd)
2426 		goto out;
2427 
2428 	pages_to_write_in_pmd = min_t(unsigned long,
2429 		remaining_pages_total, PTRS_PER_PTE - pte_index(addr));
2430 
2431 	/* Allocate the PTE if necessary; takes PMD lock once only. */
2432 	ret = -ENOMEM;
2433 	if (pte_alloc(mm, pmd))
2434 		goto out;
2435 
2436 	while (pages_to_write_in_pmd) {
2437 		int pte_idx = 0;
2438 		const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
2439 
2440 		start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
2441 		if (!start_pte) {
2442 			ret = -EFAULT;
2443 			goto out;
2444 		}
2445 		for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
2446 			int err = insert_page_in_batch_locked(vma, pte,
2447 				addr, pages[curr_page_idx], prot);
2448 			if (unlikely(err)) {
2449 				pte_unmap_unlock(start_pte, pte_lock);
2450 				ret = err;
2451 				remaining_pages_total -= pte_idx;
2452 				goto out;
2453 			}
2454 			addr += PAGE_SIZE;
2455 			++curr_page_idx;
2456 		}
2457 		pte_unmap_unlock(start_pte, pte_lock);
2458 		pages_to_write_in_pmd -= batch_size;
2459 		remaining_pages_total -= batch_size;
2460 	}
2461 	if (remaining_pages_total)
2462 		goto more;
2463 	ret = 0;
2464 out:
2465 	*num = remaining_pages_total;
2466 	return ret;
2467 }
2468 
2469 /**
2470  * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
2471  * @vma: user vma to map to
2472  * @addr: target start user address of these pages
2473  * @pages: source kernel pages
2474  * @num: in: number of pages to map. out: number of pages that were *not*
2475  * mapped. (0 means all pages were successfully mapped).
2476  *
2477  * Preferred over vm_insert_page() when inserting multiple pages.
2478  *
2479  * In case of error, we may have mapped a subset of the provided
2480  * pages. It is the caller's responsibility to account for this case.
2481  *
2482  * The same restrictions apply as in vm_insert_page().
2483  */
2484 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
2485 			struct page **pages, unsigned long *num)
2486 {
2487 	const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
2488 
2489 	if (addr < vma->vm_start || end_addr >= vma->vm_end)
2490 		return -EFAULT;
2491 	if (!(vma->vm_flags & VM_MIXEDMAP)) {
2492 		BUG_ON(mmap_read_trylock(vma->vm_mm));
2493 		BUG_ON(vma->vm_flags & VM_PFNMAP);
2494 		vm_flags_set(vma, VM_MIXEDMAP);
2495 	}
2496 	/* Defer page refcount checking till we're about to map that page. */
2497 	return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
2498 }
2499 EXPORT_SYMBOL(vm_insert_pages);
2500 
2501 /**
2502  * vm_insert_page - insert single page into user vma
2503  * @vma: user vma to map to
2504  * @addr: target user address of this page
2505  * @page: source kernel page
2506  *
2507  * This allows drivers to insert individual pages they've allocated
2508  * into a user vma. The zeropage is supported in some VMAs,
2509  * see vm_mixed_zeropage_allowed().
2510  *
2511  * The page has to be a nice clean _individual_ kernel allocation.
2512  * If you allocate a compound page, you need to have marked it as
2513  * such (__GFP_COMP), or manually just split the page up yourself
2514  * (see split_page()).
2515  *
2516  * NOTE! Traditionally this was done with "remap_pfn_range()" which
2517  * took an arbitrary page protection parameter. This doesn't allow
2518  * that. Your vma protection will have to be set up correctly, which
2519  * means that if you want a shared writable mapping, you'd better
2520  * ask for a shared writable mapping!
2521  *
2522  * The page does not need to be reserved.
2523  *
2524  * Usually this function is called from f_op->mmap() handler
2525  * under mm->mmap_lock write-lock, so it can change vma->vm_flags.
2526  * Caller must set VM_MIXEDMAP on vma if it wants to call this
2527  * function from other places, for example from page-fault handler.
2528  *
2529  * Return: %0 on success, negative error code otherwise.
2530  */
2531 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
2532 			struct page *page)
2533 {
2534 	if (addr < vma->vm_start || addr >= vma->vm_end)
2535 		return -EFAULT;
2536 	if (!(vma->vm_flags & VM_MIXEDMAP)) {
2537 		BUG_ON(mmap_read_trylock(vma->vm_mm));
2538 		BUG_ON(vma->vm_flags & VM_PFNMAP);
2539 		vm_flags_set(vma, VM_MIXEDMAP);
2540 	}
2541 	return insert_page(vma, addr, page, vma->vm_page_prot, false);
2542 }
2543 EXPORT_SYMBOL(vm_insert_page);
2544 
2545 /*
2546  * __vm_map_pages - maps range of kernel pages into user vma
2547  * @vma: user vma to map to
2548  * @pages: pointer to array of source kernel pages
2549  * @num: number of pages in page array
2550  * @offset: user's requested vm_pgoff
2551  *
2552  * This allows drivers to map range of kernel pages into a user vma.
2553  * The zeropage is supported in some VMAs, see
2554  * vm_mixed_zeropage_allowed().
2555  *
2556  * Return: 0 on success and error code otherwise.
2557  */
2558 static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2559 				unsigned long num, unsigned long offset)
2560 {
2561 	unsigned long count = vma_pages(vma);
2562 	unsigned long uaddr = vma->vm_start;
2563 
2564 	/* Fail if the user requested offset is beyond the end of the object */
2565 	if (offset >= num)
2566 		return -ENXIO;
2567 
2568 	/* Fail if the user requested size exceeds available object size */
2569 	if (count > num - offset)
2570 		return -ENXIO;
2571 
2572 	return vm_insert_pages(vma, uaddr, pages + offset, &count);
2573 }
2574 
2575 /**
2576  * vm_map_pages - maps range of kernel pages starts with non zero offset
2577  * @vma: user vma to map to
2578  * @pages: pointer to array of source kernel pages
2579  * @num: number of pages in page array
2580  *
2581  * Maps an object consisting of @num pages, catering for the user's
2582  * requested vm_pgoff
2583  *
2584  * If we fail to insert any page into the vma, the function will return
2585  * immediately leaving any previously inserted pages present.  Callers
2586  * from the mmap handler may immediately return the error as their caller
2587  * will destroy the vma, removing any successfully inserted pages. Other
2588  * callers should make their own arrangements for calling unmap_region().
2589  *
2590  * Context: Process context. Called by mmap handlers.
2591  * Return: 0 on success and error code otherwise.
2592  */
2593 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2594 				unsigned long num)
2595 {
2596 	return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
2597 }
2598 EXPORT_SYMBOL(vm_map_pages);
2599 
2600 /**
2601  * vm_map_pages_zero - map range of kernel pages starts with zero offset
2602  * @vma: user vma to map to
2603  * @pages: pointer to array of source kernel pages
2604  * @num: number of pages in page array
2605  *
2606  * Similar to vm_map_pages(), except that it explicitly sets the offset
2607  * to 0. This function is intended for the drivers that did not consider
2608  * vm_pgoff.
2609  *
2610  * Context: Process context. Called by mmap handlers.
2611  * Return: 0 on success and error code otherwise.
2612  */
2613 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
2614 				unsigned long num)
2615 {
2616 	return __vm_map_pages(vma, pages, num, 0);
2617 }
2618 EXPORT_SYMBOL(vm_map_pages_zero);
2619 
2620 static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2621 			unsigned long pfn, pgprot_t prot, bool mkwrite)
2622 {
2623 	struct mm_struct *mm = vma->vm_mm;
2624 	pte_t *pte, entry;
2625 	spinlock_t *ptl;
2626 
2627 	pte = get_locked_pte(mm, addr, &ptl);
2628 	if (!pte)
2629 		return VM_FAULT_OOM;
2630 	entry = ptep_get(pte);
2631 	if (!pte_none(entry)) {
2632 		if (mkwrite) {
2633 			/*
2634 			 * For read faults on private mappings the PFN passed
2635 			 * in may not match the PFN we have mapped if the
2636 			 * mapped PFN is a writeable COW page.  In the mkwrite
2637 			 * case we are creating a writable PTE for a shared
2638 			 * mapping and we expect the PFNs to match. If they
2639 			 * don't match, we are likely racing with block
2640 			 * allocation and mapping invalidation so just skip the
2641 			 * update.
2642 			 */
2643 			if (pte_pfn(entry) != pfn) {
2644 				WARN_ON_ONCE(!is_zero_pfn(pte_pfn(entry)));
2645 				goto out_unlock;
2646 			}
2647 			entry = pte_mkyoung(entry);
2648 			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2649 			if (ptep_set_access_flags(vma, addr, pte, entry, 1))
2650 				update_mmu_cache(vma, addr, pte);
2651 		}
2652 		goto out_unlock;
2653 	}
2654 
2655 	/* Ok, finally just insert the thing.. */
2656 	entry = pte_mkspecial(pfn_pte(pfn, prot));
2657 
2658 	if (mkwrite) {
2659 		entry = pte_mkyoung(entry);
2660 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2661 	}
2662 
2663 	set_pte_at(mm, addr, pte, entry);
2664 	update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
2665 
2666 out_unlock:
2667 	pte_unmap_unlock(pte, ptl);
2668 	return VM_FAULT_NOPAGE;
2669 }
2670 
2671 /**
2672  * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
2673  * @vma: user vma to map to
2674  * @addr: target user address of this page
2675  * @pfn: source kernel pfn
2676  * @pgprot: pgprot flags for the inserted page
2677  *
2678  * This is exactly like vmf_insert_pfn(), except that it allows drivers
2679  * to override pgprot on a per-page basis.
2680  *
2681  * This only makes sense for IO mappings, and it makes no sense for
2682  * COW mappings.  In general, using multiple vmas is preferable;
2683  * vmf_insert_pfn_prot should only be used if using multiple VMAs is
2684  * impractical.
2685  *
2686  * pgprot typically only differs from @vma->vm_page_prot when drivers set
2687  * caching- and encryption bits different than those of @vma->vm_page_prot,
2688  * because the caching- or encryption mode may not be known at mmap() time.
2689  *
2690  * This is ok as long as @vma->vm_page_prot is not used by the core vm
2691  * to set caching and encryption bits for those vmas (except for COW pages).
2692  * This is ensured by core vm only modifying these page table entries using
2693  * functions that don't touch caching- or encryption bits, using pte_modify()
2694  * if needed. (See for example mprotect()).
2695  *
2696  * Also when new page-table entries are created, this is only done using the
2697  * fault() callback, and never using the value of vma->vm_page_prot,
2698  * except for page-table entries that point to anonymous pages as the result
2699  * of COW.
2700  *
2701  * Context: Process context.  May allocate using %GFP_KERNEL.
2702  * Return: vm_fault_t value.
2703  */
2704 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2705 			unsigned long pfn, pgprot_t pgprot)
2706 {
2707 	/*
2708 	 * Technically, architectures with pte_special can avoid all these
2709 	 * restrictions (same for remap_pfn_range).  However we would like
2710 	 * consistency in testing and feature parity among all, so we should
2711 	 * try to keep these invariants in place for everybody.
2712 	 */
2713 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
2714 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
2715 						(VM_PFNMAP|VM_MIXEDMAP));
2716 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
2717 	BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
2718 
2719 	if (addr < vma->vm_start || addr >= vma->vm_end)
2720 		return VM_FAULT_SIGBUS;
2721 
2722 	if (!pfn_modify_allowed(pfn, pgprot))
2723 		return VM_FAULT_SIGBUS;
2724 
2725 	pfnmap_setup_cachemode_pfn(pfn, &pgprot);
2726 
2727 	return insert_pfn(vma, addr, pfn, pgprot, false);
2728 }
2729 EXPORT_SYMBOL(vmf_insert_pfn_prot);
2730 
2731 /**
2732  * vmf_insert_pfn - insert single pfn into user vma
2733  * @vma: user vma to map to
2734  * @addr: target user address of this page
2735  * @pfn: source kernel pfn
2736  *
2737  * Similar to vm_insert_page, this allows drivers to insert individual pages
2738  * they've allocated into a user vma. Same comments apply.
2739  *
2740  * This function should only be called from a vm_ops->fault handler, and
2741  * in that case the handler should return the result of this function.
2742  *
2743  * vma cannot be a COW mapping.
2744  *
2745  * As this is called only for pages that do not currently exist, we
2746  * do not need to flush old virtual caches or the TLB.
2747  *
2748  * Context: Process context.  May allocate using %GFP_KERNEL.
2749  * Return: vm_fault_t value.
2750  */
2751 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2752 			unsigned long pfn)
2753 {
2754 	return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
2755 }
2756 EXPORT_SYMBOL(vmf_insert_pfn);
2757 
2758 static bool vm_mixed_ok(struct vm_area_struct *vma, unsigned long pfn,
2759 			bool mkwrite)
2760 {
2761 	if (unlikely(is_zero_pfn(pfn)) &&
2762 	    (mkwrite || !vm_mixed_zeropage_allowed(vma)))
2763 		return false;
2764 	/* these checks mirror the abort conditions in vm_normal_page */
2765 	if (vma->vm_flags & VM_MIXEDMAP)
2766 		return true;
2767 	if (is_zero_pfn(pfn))
2768 		return true;
2769 	return false;
2770 }
2771 
2772 static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
2773 		unsigned long addr, unsigned long pfn, bool mkwrite)
2774 {
2775 	pgprot_t pgprot = vma->vm_page_prot;
2776 	int err;
2777 
2778 	if (!vm_mixed_ok(vma, pfn, mkwrite))
2779 		return VM_FAULT_SIGBUS;
2780 
2781 	if (addr < vma->vm_start || addr >= vma->vm_end)
2782 		return VM_FAULT_SIGBUS;
2783 
2784 	pfnmap_setup_cachemode_pfn(pfn, &pgprot);
2785 
2786 	if (!pfn_modify_allowed(pfn, pgprot))
2787 		return VM_FAULT_SIGBUS;
2788 
2789 	/*
2790 	 * If we don't have pte special, then we have to use the pfn_valid()
2791 	 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
2792 	 * refcount the page if pfn_valid is true (hence insert_page rather
2793 	 * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
2794 	 * without pte special, it would there be refcounted as a normal page.
2795 	 */
2796 	if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pfn_valid(pfn)) {
2797 		struct page *page;
2798 
2799 		/*
2800 		 * At this point we are committed to insert_page()
2801 		 * regardless of whether the caller specified flags that
2802 		 * result in pfn_t_has_page() == false.
2803 		 */
2804 		page = pfn_to_page(pfn);
2805 		err = insert_page(vma, addr, page, pgprot, mkwrite);
2806 	} else {
2807 		return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
2808 	}
2809 
2810 	if (err == -ENOMEM)
2811 		return VM_FAULT_OOM;
2812 	if (err < 0 && err != -EBUSY)
2813 		return VM_FAULT_SIGBUS;
2814 
2815 	return VM_FAULT_NOPAGE;
2816 }
2817 
2818 vm_fault_t vmf_insert_page_mkwrite(struct vm_fault *vmf, struct page *page,
2819 			bool write)
2820 {
2821 	pgprot_t pgprot = vmf->vma->vm_page_prot;
2822 	unsigned long addr = vmf->address;
2823 	int err;
2824 
2825 	if (addr < vmf->vma->vm_start || addr >= vmf->vma->vm_end)
2826 		return VM_FAULT_SIGBUS;
2827 
2828 	err = insert_page(vmf->vma, addr, page, pgprot, write);
2829 	if (err == -ENOMEM)
2830 		return VM_FAULT_OOM;
2831 	if (err < 0 && err != -EBUSY)
2832 		return VM_FAULT_SIGBUS;
2833 
2834 	return VM_FAULT_NOPAGE;
2835 }
2836 EXPORT_SYMBOL_GPL(vmf_insert_page_mkwrite);
2837 
2838 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2839 		unsigned long pfn)
2840 {
2841 	return __vm_insert_mixed(vma, addr, pfn, false);
2842 }
2843 EXPORT_SYMBOL(vmf_insert_mixed);
2844 
2845 /*
2846  *  If the insertion of PTE failed because someone else already added a
2847  *  different entry in the mean time, we treat that as success as we assume
2848  *  the same entry was actually inserted.
2849  */
2850 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2851 		unsigned long addr, unsigned long pfn)
2852 {
2853 	return __vm_insert_mixed(vma, addr, pfn, true);
2854 }
2855 
2856 /*
2857  * maps a range of physical memory into the requested pages. the old
2858  * mappings are removed. any references to nonexistent pages results
2859  * in null mappings (currently treated as "copy-on-access")
2860  */
2861 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
2862 			unsigned long addr, unsigned long end,
2863 			unsigned long pfn, pgprot_t prot)
2864 {
2865 	pte_t *pte, *mapped_pte;
2866 	spinlock_t *ptl;
2867 	int err = 0;
2868 
2869 	mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
2870 	if (!pte)
2871 		return -ENOMEM;
2872 	lazy_mmu_mode_enable();
2873 	do {
2874 		BUG_ON(!pte_none(ptep_get(pte)));
2875 		if (!pfn_modify_allowed(pfn, prot)) {
2876 			err = -EACCES;
2877 			break;
2878 		}
2879 		set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
2880 		pfn++;
2881 	} while (pte++, addr += PAGE_SIZE, addr != end);
2882 	lazy_mmu_mode_disable();
2883 	pte_unmap_unlock(mapped_pte, ptl);
2884 	return err;
2885 }
2886 
2887 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
2888 			unsigned long addr, unsigned long end,
2889 			unsigned long pfn, pgprot_t prot)
2890 {
2891 	pmd_t *pmd;
2892 	unsigned long next;
2893 	int err;
2894 
2895 	pfn -= addr >> PAGE_SHIFT;
2896 	pmd = pmd_alloc(mm, pud, addr);
2897 	if (!pmd)
2898 		return -ENOMEM;
2899 	VM_BUG_ON(pmd_trans_huge(*pmd));
2900 	do {
2901 		next = pmd_addr_end(addr, end);
2902 		err = remap_pte_range(mm, pmd, addr, next,
2903 				pfn + (addr >> PAGE_SHIFT), prot);
2904 		if (err)
2905 			return err;
2906 	} while (pmd++, addr = next, addr != end);
2907 	return 0;
2908 }
2909 
2910 static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
2911 			unsigned long addr, unsigned long end,
2912 			unsigned long pfn, pgprot_t prot)
2913 {
2914 	pud_t *pud;
2915 	unsigned long next;
2916 	int err;
2917 
2918 	pfn -= addr >> PAGE_SHIFT;
2919 	pud = pud_alloc(mm, p4d, addr);
2920 	if (!pud)
2921 		return -ENOMEM;
2922 	do {
2923 		next = pud_addr_end(addr, end);
2924 		err = remap_pmd_range(mm, pud, addr, next,
2925 				pfn + (addr >> PAGE_SHIFT), prot);
2926 		if (err)
2927 			return err;
2928 	} while (pud++, addr = next, addr != end);
2929 	return 0;
2930 }
2931 
2932 static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2933 			unsigned long addr, unsigned long end,
2934 			unsigned long pfn, pgprot_t prot)
2935 {
2936 	p4d_t *p4d;
2937 	unsigned long next;
2938 	int err;
2939 
2940 	pfn -= addr >> PAGE_SHIFT;
2941 	p4d = p4d_alloc(mm, pgd, addr);
2942 	if (!p4d)
2943 		return -ENOMEM;
2944 	do {
2945 		next = p4d_addr_end(addr, end);
2946 		err = remap_pud_range(mm, p4d, addr, next,
2947 				pfn + (addr >> PAGE_SHIFT), prot);
2948 		if (err)
2949 			return err;
2950 	} while (p4d++, addr = next, addr != end);
2951 	return 0;
2952 }
2953 
2954 static int get_remap_pgoff(bool is_cow, unsigned long addr,
2955 		unsigned long end, unsigned long vm_start, unsigned long vm_end,
2956 		unsigned long pfn, pgoff_t *vm_pgoff_p)
2957 {
2958 	/*
2959 	 * There's a horrible special case to handle copy-on-write
2960 	 * behaviour that some programs depend on. We mark the "original"
2961 	 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
2962 	 * See vm_normal_page() for details.
2963 	 */
2964 	if (is_cow) {
2965 		if (addr != vm_start || end != vm_end)
2966 			return -EINVAL;
2967 		*vm_pgoff_p = pfn;
2968 	}
2969 
2970 	return 0;
2971 }
2972 
2973 static int remap_pfn_range_internal(struct vm_area_struct *vma, unsigned long addr,
2974 		unsigned long pfn, unsigned long size, pgprot_t prot)
2975 {
2976 	pgd_t *pgd;
2977 	unsigned long next;
2978 	unsigned long end = addr + PAGE_ALIGN(size);
2979 	struct mm_struct *mm = vma->vm_mm;
2980 	int err;
2981 
2982 	if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
2983 		return -EINVAL;
2984 
2985 	VM_WARN_ON_ONCE(!vma_test_all_mask(vma, VMA_REMAP_FLAGS));
2986 
2987 	BUG_ON(addr >= end);
2988 	pfn -= addr >> PAGE_SHIFT;
2989 	pgd = pgd_offset(mm, addr);
2990 	flush_cache_range(vma, addr, end);
2991 	do {
2992 		next = pgd_addr_end(addr, end);
2993 		err = remap_p4d_range(mm, pgd, addr, next,
2994 				pfn + (addr >> PAGE_SHIFT), prot);
2995 		if (err)
2996 			return err;
2997 	} while (pgd++, addr = next, addr != end);
2998 
2999 	return 0;
3000 }
3001 
3002 /*
3003  * Variant of remap_pfn_range that does not call track_pfn_remap.  The caller
3004  * must have pre-validated the caching bits of the pgprot_t.
3005  */
3006 static int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
3007 		unsigned long pfn, unsigned long size, pgprot_t prot)
3008 {
3009 	int error = remap_pfn_range_internal(vma, addr, pfn, size, prot);
3010 
3011 	if (!error)
3012 		return 0;
3013 
3014 	/*
3015 	 * A partial pfn range mapping is dangerous: it does not
3016 	 * maintain page reference counts, and callers may free
3017 	 * pages due to the error. So zap it early.
3018 	 */
3019 	zap_vma_range(vma, addr, size);
3020 	return error;
3021 }
3022 
3023 #ifdef __HAVE_PFNMAP_TRACKING
3024 static inline struct pfnmap_track_ctx *pfnmap_track_ctx_alloc(unsigned long pfn,
3025 		unsigned long size, pgprot_t *prot)
3026 {
3027 	struct pfnmap_track_ctx *ctx;
3028 
3029 	if (pfnmap_track(pfn, size, prot))
3030 		return ERR_PTR(-EINVAL);
3031 
3032 	ctx = kmalloc_obj(*ctx);
3033 	if (unlikely(!ctx)) {
3034 		pfnmap_untrack(pfn, size);
3035 		return ERR_PTR(-ENOMEM);
3036 	}
3037 
3038 	ctx->pfn = pfn;
3039 	ctx->size = size;
3040 	kref_init(&ctx->kref);
3041 	return ctx;
3042 }
3043 
3044 void pfnmap_track_ctx_release(struct kref *ref)
3045 {
3046 	struct pfnmap_track_ctx *ctx = container_of(ref, struct pfnmap_track_ctx, kref);
3047 
3048 	pfnmap_untrack(ctx->pfn, ctx->size);
3049 	kfree(ctx);
3050 }
3051 
3052 static int remap_pfn_range_track(struct vm_area_struct *vma, unsigned long addr,
3053 		unsigned long pfn, unsigned long size, pgprot_t prot)
3054 {
3055 	struct pfnmap_track_ctx *ctx = NULL;
3056 	int err;
3057 
3058 	size = PAGE_ALIGN(size);
3059 
3060 	/*
3061 	 * If we cover the full VMA, we'll perform actual tracking, and
3062 	 * remember to untrack when the last reference to our tracking
3063 	 * context from a VMA goes away. We'll keep tracking the whole pfn
3064 	 * range even during VMA splits and partial unmapping.
3065 	 *
3066 	 * If we only cover parts of the VMA, we'll only setup the cachemode
3067 	 * in the pgprot for the pfn range.
3068 	 */
3069 	if (addr == vma->vm_start && addr + size == vma->vm_end) {
3070 		if (vma->pfnmap_track_ctx)
3071 			return -EINVAL;
3072 		ctx = pfnmap_track_ctx_alloc(pfn, size, &prot);
3073 		if (IS_ERR(ctx))
3074 			return PTR_ERR(ctx);
3075 	} else if (pfnmap_setup_cachemode(pfn, size, &prot)) {
3076 		return -EINVAL;
3077 	}
3078 
3079 	err = remap_pfn_range_notrack(vma, addr, pfn, size, prot);
3080 	if (ctx) {
3081 		if (err)
3082 			kref_put(&ctx->kref, pfnmap_track_ctx_release);
3083 		else
3084 			vma->pfnmap_track_ctx = ctx;
3085 	}
3086 	return err;
3087 }
3088 
3089 static int do_remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
3090 		unsigned long pfn, unsigned long size, pgprot_t prot)
3091 {
3092 	return remap_pfn_range_track(vma, addr, pfn, size, prot);
3093 }
3094 #else
3095 static int do_remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
3096 		unsigned long pfn, unsigned long size, pgprot_t prot)
3097 {
3098 	return remap_pfn_range_notrack(vma, addr, pfn, size, prot);
3099 }
3100 #endif
3101 
3102 void remap_pfn_range_prepare(struct vm_area_desc *desc, unsigned long pfn)
3103 {
3104 	/*
3105 	 * We set addr=VMA start, end=VMA end here, so this won't fail, but we
3106 	 * check it again on complete and will fail there if specified addr is
3107 	 * invalid.
3108 	 */
3109 	get_remap_pgoff(vma_desc_is_cow_mapping(desc), desc->start, desc->end,
3110 			desc->start, desc->end, pfn, &desc->pgoff);
3111 	vma_desc_set_flags_mask(desc, VMA_REMAP_FLAGS);
3112 }
3113 
3114 static int remap_pfn_range_prepare_vma(struct vm_area_struct *vma, unsigned long addr,
3115 		unsigned long pfn, unsigned long size)
3116 {
3117 	unsigned long end = addr + PAGE_ALIGN(size);
3118 	int err;
3119 
3120 	err = get_remap_pgoff(is_cow_mapping(vma->vm_flags), addr, end,
3121 			      vma->vm_start, vma->vm_end, pfn, &vma->vm_pgoff);
3122 	if (err)
3123 		return err;
3124 
3125 	vma_set_flags_mask(vma, VMA_REMAP_FLAGS);
3126 	return 0;
3127 }
3128 
3129 /**
3130  * remap_pfn_range - remap kernel memory to userspace
3131  * @vma: user vma to map to
3132  * @addr: target page aligned user address to start at
3133  * @pfn: page frame number of kernel physical memory address
3134  * @size: size of mapping area
3135  * @prot: page protection flags for this mapping
3136  *
3137  * Note: this is only safe if the mm semaphore is held when called.
3138  *
3139  * Return: %0 on success, negative error code otherwise.
3140  */
3141 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
3142 		    unsigned long pfn, unsigned long size, pgprot_t prot)
3143 {
3144 	int err;
3145 
3146 	err = remap_pfn_range_prepare_vma(vma, addr, pfn, size);
3147 	if (err)
3148 		return err;
3149 
3150 	return do_remap_pfn_range(vma, addr, pfn, size, prot);
3151 }
3152 EXPORT_SYMBOL(remap_pfn_range);
3153 
3154 int remap_pfn_range_complete(struct vm_area_struct *vma, unsigned long addr,
3155 		unsigned long pfn, unsigned long size, pgprot_t prot)
3156 {
3157 	return do_remap_pfn_range(vma, addr, pfn, size, prot);
3158 }
3159 
3160 /**
3161  * vm_iomap_memory - remap memory to userspace
3162  * @vma: user vma to map to
3163  * @start: start of the physical memory to be mapped
3164  * @len: size of area
3165  *
3166  * This is a simplified io_remap_pfn_range() for common driver use. The
3167  * driver just needs to give us the physical memory range to be mapped,
3168  * we'll figure out the rest from the vma information.
3169  *
3170  * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
3171  * whatever write-combining details or similar.
3172  *
3173  * Return: %0 on success, negative error code otherwise.
3174  */
3175 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
3176 {
3177 	unsigned long vm_len, pfn, pages;
3178 
3179 	/* Check that the physical memory area passed in looks valid */
3180 	if (start + len < start)
3181 		return -EINVAL;
3182 	/*
3183 	 * You *really* shouldn't map things that aren't page-aligned,
3184 	 * but we've historically allowed it because IO memory might
3185 	 * just have smaller alignment.
3186 	 */
3187 	len += start & ~PAGE_MASK;
3188 	pfn = start >> PAGE_SHIFT;
3189 	pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
3190 	if (pfn + pages < pfn)
3191 		return -EINVAL;
3192 
3193 	/* We start the mapping 'vm_pgoff' pages into the area */
3194 	if (vma->vm_pgoff > pages)
3195 		return -EINVAL;
3196 	pfn += vma->vm_pgoff;
3197 	pages -= vma->vm_pgoff;
3198 
3199 	/* Can we fit all of the mapping? */
3200 	vm_len = vma->vm_end - vma->vm_start;
3201 	if (vm_len >> PAGE_SHIFT > pages)
3202 		return -EINVAL;
3203 
3204 	/* Ok, let it rip */
3205 	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
3206 }
3207 EXPORT_SYMBOL(vm_iomap_memory);
3208 
3209 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
3210 				     unsigned long addr, unsigned long end,
3211 				     pte_fn_t fn, void *data, bool create,
3212 				     pgtbl_mod_mask *mask)
3213 {
3214 	pte_t *pte, *mapped_pte;
3215 	int err = 0;
3216 	spinlock_t *ptl;
3217 
3218 	if (create) {
3219 		mapped_pte = pte = (mm == &init_mm) ?
3220 			pte_alloc_kernel_track(pmd, addr, mask) :
3221 			pte_alloc_map_lock(mm, pmd, addr, &ptl);
3222 		if (!pte)
3223 			return -ENOMEM;
3224 	} else {
3225 		mapped_pte = pte = (mm == &init_mm) ?
3226 			pte_offset_kernel(pmd, addr) :
3227 			pte_offset_map_lock(mm, pmd, addr, &ptl);
3228 		if (!pte)
3229 			return -EINVAL;
3230 	}
3231 
3232 	lazy_mmu_mode_enable();
3233 
3234 	if (fn) {
3235 		do {
3236 			if (create || !pte_none(ptep_get(pte))) {
3237 				err = fn(pte, addr, data);
3238 				if (err)
3239 					break;
3240 			}
3241 		} while (pte++, addr += PAGE_SIZE, addr != end);
3242 	}
3243 	*mask |= PGTBL_PTE_MODIFIED;
3244 
3245 	lazy_mmu_mode_disable();
3246 
3247 	if (mm != &init_mm)
3248 		pte_unmap_unlock(mapped_pte, ptl);
3249 	return err;
3250 }
3251 
3252 static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
3253 				     unsigned long addr, unsigned long end,
3254 				     pte_fn_t fn, void *data, bool create,
3255 				     pgtbl_mod_mask *mask)
3256 {
3257 	pmd_t *pmd;
3258 	unsigned long next;
3259 	int err = 0;
3260 
3261 	BUG_ON(pud_leaf(*pud));
3262 
3263 	if (create) {
3264 		pmd = pmd_alloc_track(mm, pud, addr, mask);
3265 		if (!pmd)
3266 			return -ENOMEM;
3267 	} else {
3268 		pmd = pmd_offset(pud, addr);
3269 	}
3270 	do {
3271 		next = pmd_addr_end(addr, end);
3272 		if (pmd_none(*pmd) && !create)
3273 			continue;
3274 		if (WARN_ON_ONCE(pmd_leaf(*pmd)))
3275 			return -EINVAL;
3276 		if (!pmd_none(*pmd) && WARN_ON_ONCE(pmd_bad(*pmd))) {
3277 			if (!create)
3278 				continue;
3279 			pmd_clear_bad(pmd);
3280 		}
3281 		err = apply_to_pte_range(mm, pmd, addr, next,
3282 					 fn, data, create, mask);
3283 		if (err)
3284 			break;
3285 	} while (pmd++, addr = next, addr != end);
3286 
3287 	return err;
3288 }
3289 
3290 static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
3291 				     unsigned long addr, unsigned long end,
3292 				     pte_fn_t fn, void *data, bool create,
3293 				     pgtbl_mod_mask *mask)
3294 {
3295 	pud_t *pud;
3296 	unsigned long next;
3297 	int err = 0;
3298 
3299 	if (create) {
3300 		pud = pud_alloc_track(mm, p4d, addr, mask);
3301 		if (!pud)
3302 			return -ENOMEM;
3303 	} else {
3304 		pud = pud_offset(p4d, addr);
3305 	}
3306 	do {
3307 		next = pud_addr_end(addr, end);
3308 		if (pud_none(*pud) && !create)
3309 			continue;
3310 		if (WARN_ON_ONCE(pud_leaf(*pud)))
3311 			return -EINVAL;
3312 		if (!pud_none(*pud) && WARN_ON_ONCE(pud_bad(*pud))) {
3313 			if (!create)
3314 				continue;
3315 			pud_clear_bad(pud);
3316 		}
3317 		err = apply_to_pmd_range(mm, pud, addr, next,
3318 					 fn, data, create, mask);
3319 		if (err)
3320 			break;
3321 	} while (pud++, addr = next, addr != end);
3322 
3323 	return err;
3324 }
3325 
3326 static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
3327 				     unsigned long addr, unsigned long end,
3328 				     pte_fn_t fn, void *data, bool create,
3329 				     pgtbl_mod_mask *mask)
3330 {
3331 	p4d_t *p4d;
3332 	unsigned long next;
3333 	int err = 0;
3334 
3335 	if (create) {
3336 		p4d = p4d_alloc_track(mm, pgd, addr, mask);
3337 		if (!p4d)
3338 			return -ENOMEM;
3339 	} else {
3340 		p4d = p4d_offset(pgd, addr);
3341 	}
3342 	do {
3343 		next = p4d_addr_end(addr, end);
3344 		if (p4d_none(*p4d) && !create)
3345 			continue;
3346 		if (WARN_ON_ONCE(p4d_leaf(*p4d)))
3347 			return -EINVAL;
3348 		if (!p4d_none(*p4d) && WARN_ON_ONCE(p4d_bad(*p4d))) {
3349 			if (!create)
3350 				continue;
3351 			p4d_clear_bad(p4d);
3352 		}
3353 		err = apply_to_pud_range(mm, p4d, addr, next,
3354 					 fn, data, create, mask);
3355 		if (err)
3356 			break;
3357 	} while (p4d++, addr = next, addr != end);
3358 
3359 	return err;
3360 }
3361 
3362 static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
3363 				 unsigned long size, pte_fn_t fn,
3364 				 void *data, bool create)
3365 {
3366 	pgd_t *pgd;
3367 	unsigned long start = addr, next;
3368 	unsigned long end = addr + size;
3369 	pgtbl_mod_mask mask = 0;
3370 	int err = 0;
3371 
3372 	if (WARN_ON(addr >= end))
3373 		return -EINVAL;
3374 
3375 	pgd = pgd_offset(mm, addr);
3376 	do {
3377 		next = pgd_addr_end(addr, end);
3378 		if (pgd_none(*pgd) && !create)
3379 			continue;
3380 		if (WARN_ON_ONCE(pgd_leaf(*pgd))) {
3381 			err = -EINVAL;
3382 			break;
3383 		}
3384 		if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) {
3385 			if (!create)
3386 				continue;
3387 			pgd_clear_bad(pgd);
3388 		}
3389 		err = apply_to_p4d_range(mm, pgd, addr, next,
3390 					 fn, data, create, &mask);
3391 		if (err)
3392 			break;
3393 	} while (pgd++, addr = next, addr != end);
3394 
3395 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
3396 		arch_sync_kernel_mappings(start, start + size);
3397 
3398 	return err;
3399 }
3400 
3401 /*
3402  * Scan a region of virtual memory, filling in page tables as necessary
3403  * and calling a provided function on each leaf page table.
3404  */
3405 int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
3406 			unsigned long size, pte_fn_t fn, void *data)
3407 {
3408 	return __apply_to_page_range(mm, addr, size, fn, data, true);
3409 }
3410 EXPORT_SYMBOL_GPL(apply_to_page_range);
3411 
3412 /*
3413  * Scan a region of virtual memory, calling a provided function on
3414  * each leaf page table where it exists.
3415  *
3416  * Unlike apply_to_page_range, this does _not_ fill in page tables
3417  * where they are absent.
3418  */
3419 int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
3420 				 unsigned long size, pte_fn_t fn, void *data)
3421 {
3422 	return __apply_to_page_range(mm, addr, size, fn, data, false);
3423 }
3424 
3425 /*
3426  * handle_pte_fault chooses page fault handler according to an entry which was
3427  * read non-atomically.  Before making any commitment, on those architectures
3428  * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
3429  * parts, do_swap_page must check under lock before unmapping the pte and
3430  * proceeding (but do_wp_page is only called after already making such a check;
3431  * and do_anonymous_page can safely check later on).
3432  */
3433 static inline int pte_unmap_same(struct vm_fault *vmf)
3434 {
3435 	int same = 1;
3436 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION)
3437 	if (sizeof(pte_t) > sizeof(unsigned long)) {
3438 		spin_lock(vmf->ptl);
3439 		same = pte_same(ptep_get(vmf->pte), vmf->orig_pte);
3440 		spin_unlock(vmf->ptl);
3441 	}
3442 #endif
3443 	pte_unmap(vmf->pte);
3444 	vmf->pte = NULL;
3445 	return same;
3446 }
3447 
3448 /*
3449  * Return:
3450  *	0:		copied succeeded
3451  *	-EHWPOISON:	copy failed due to hwpoison in source page
3452  *	-EAGAIN:	copied failed (some other reason)
3453  */
3454 static inline int __wp_page_copy_user(struct page *dst, struct page *src,
3455 				      struct vm_fault *vmf)
3456 {
3457 	int ret;
3458 	void *kaddr;
3459 	void __user *uaddr;
3460 	struct vm_area_struct *vma = vmf->vma;
3461 	struct mm_struct *mm = vma->vm_mm;
3462 	unsigned long addr = vmf->address;
3463 
3464 	if (likely(src)) {
3465 		if (copy_mc_user_highpage(dst, src, addr, vma))
3466 			return -EHWPOISON;
3467 		return 0;
3468 	}
3469 
3470 	/*
3471 	 * If the source page was a PFN mapping, we don't have
3472 	 * a "struct page" for it. We do a best-effort copy by
3473 	 * just copying from the original user address. If that
3474 	 * fails, we just zero-fill it. Live with it.
3475 	 */
3476 	kaddr = kmap_local_page(dst);
3477 	pagefault_disable();
3478 	uaddr = (void __user *)(addr & PAGE_MASK);
3479 
3480 	/*
3481 	 * On architectures with software "accessed" bits, we would
3482 	 * take a double page fault, so mark it accessed here.
3483 	 */
3484 	vmf->pte = NULL;
3485 	if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) {
3486 		pte_t entry;
3487 
3488 		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
3489 		if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
3490 			/*
3491 			 * Other thread has already handled the fault
3492 			 * and update local tlb only
3493 			 */
3494 			if (vmf->pte)
3495 				update_mmu_tlb(vma, addr, vmf->pte);
3496 			ret = -EAGAIN;
3497 			goto pte_unlock;
3498 		}
3499 
3500 		entry = pte_mkyoung(vmf->orig_pte);
3501 		if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
3502 			update_mmu_cache_range(vmf, vma, addr, vmf->pte, 1);
3503 	}
3504 
3505 	/*
3506 	 * This really shouldn't fail, because the page is there
3507 	 * in the page tables. But it might just be unreadable,
3508 	 * in which case we just give up and fill the result with
3509 	 * zeroes.
3510 	 */
3511 	if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
3512 		if (vmf->pte)
3513 			goto warn;
3514 
3515 		/* Re-validate under PTL if the page is still mapped */
3516 		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
3517 		if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
3518 			/* The PTE changed under us, update local tlb */
3519 			if (vmf->pte)
3520 				update_mmu_tlb(vma, addr, vmf->pte);
3521 			ret = -EAGAIN;
3522 			goto pte_unlock;
3523 		}
3524 
3525 		/*
3526 		 * The same page can be mapped back since last copy attempt.
3527 		 * Try to copy again under PTL.
3528 		 */
3529 		if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
3530 			/*
3531 			 * Give a warn in case there can be some obscure
3532 			 * use-case
3533 			 */
3534 warn:
3535 			WARN_ON_ONCE(1);
3536 			clear_page(kaddr);
3537 		}
3538 	}
3539 
3540 	ret = 0;
3541 
3542 pte_unlock:
3543 	if (vmf->pte)
3544 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3545 	pagefault_enable();
3546 	kunmap_local(kaddr);
3547 	flush_dcache_page(dst);
3548 
3549 	return ret;
3550 }
3551 
3552 static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
3553 {
3554 	struct file *vm_file = vma->vm_file;
3555 
3556 	if (vm_file)
3557 		return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
3558 
3559 	/*
3560 	 * Special mappings (e.g. VDSO) do not have any file so fake
3561 	 * a default GFP_KERNEL for them.
3562 	 */
3563 	return GFP_KERNEL;
3564 }
3565 
3566 /*
3567  * Notify the address space that the page is about to become writable so that
3568  * it can prohibit this or wait for the page to get into an appropriate state.
3569  *
3570  * We do this without the lock held, so that it can sleep if it needs to.
3571  */
3572 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf, struct folio *folio)
3573 {
3574 	vm_fault_t ret;
3575 	unsigned int old_flags = vmf->flags;
3576 
3577 	vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
3578 
3579 	if (vmf->vma->vm_file &&
3580 	    IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
3581 		return VM_FAULT_SIGBUS;
3582 
3583 	ret = vmf->vma->vm_ops->page_mkwrite(vmf);
3584 	/* Restore original flags so that caller is not surprised */
3585 	vmf->flags = old_flags;
3586 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
3587 		return ret;
3588 	if (unlikely(!(ret & VM_FAULT_LOCKED))) {
3589 		folio_lock(folio);
3590 		if (!folio->mapping) {
3591 			folio_unlock(folio);
3592 			return 0; /* retry */
3593 		}
3594 		ret |= VM_FAULT_LOCKED;
3595 	} else
3596 		VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
3597 	return ret;
3598 }
3599 
3600 /*
3601  * Handle dirtying of a page in shared file mapping on a write fault.
3602  *
3603  * The function expects the page to be locked and unlocks it.
3604  */
3605 static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
3606 {
3607 	struct vm_area_struct *vma = vmf->vma;
3608 	struct address_space *mapping;
3609 	struct folio *folio = page_folio(vmf->page);
3610 	bool dirtied;
3611 	bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
3612 
3613 	dirtied = folio_mark_dirty(folio);
3614 	VM_BUG_ON_FOLIO(folio_test_anon(folio), folio);
3615 	/*
3616 	 * Take a local copy of the address_space - folio.mapping may be zeroed
3617 	 * by truncate after folio_unlock().   The address_space itself remains
3618 	 * pinned by vma->vm_file's reference.  We rely on folio_unlock()'s
3619 	 * release semantics to prevent the compiler from undoing this copying.
3620 	 */
3621 	mapping = folio_raw_mapping(folio);
3622 	folio_unlock(folio);
3623 
3624 	if (!page_mkwrite)
3625 		file_update_time(vma->vm_file);
3626 
3627 	/*
3628 	 * Throttle page dirtying rate down to writeback speed.
3629 	 *
3630 	 * mapping may be NULL here because some device drivers do not
3631 	 * set page.mapping but still dirty their pages
3632 	 *
3633 	 * Drop the mmap_lock before waiting on IO, if we can. The file
3634 	 * is pinning the mapping, as per above.
3635 	 */
3636 	if ((dirtied || page_mkwrite) && mapping) {
3637 		struct file *fpin;
3638 
3639 		fpin = maybe_unlock_mmap_for_io(vmf, NULL);
3640 		balance_dirty_pages_ratelimited(mapping);
3641 		if (fpin) {
3642 			fput(fpin);
3643 			return VM_FAULT_COMPLETED;
3644 		}
3645 	}
3646 
3647 	return 0;
3648 }
3649 
3650 /*
3651  * Handle write page faults for pages that can be reused in the current vma
3652  *
3653  * This can happen either due to the mapping being with the VM_SHARED flag,
3654  * or due to us being the last reference standing to the page. In either
3655  * case, all we need to do here is to mark the page as writable and update
3656  * any related book-keeping.
3657  */
3658 static inline void wp_page_reuse(struct vm_fault *vmf, struct folio *folio)
3659 	__releases(vmf->ptl)
3660 {
3661 	struct vm_area_struct *vma = vmf->vma;
3662 	pte_t entry;
3663 
3664 	VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE));
3665 	VM_WARN_ON(is_zero_pfn(pte_pfn(vmf->orig_pte)));
3666 
3667 	if (folio) {
3668 		VM_BUG_ON(folio_test_anon(folio) &&
3669 			  !PageAnonExclusive(vmf->page));
3670 		/*
3671 		 * Clear the folio's cpupid information as the existing
3672 		 * information potentially belongs to a now completely
3673 		 * unrelated process.
3674 		 */
3675 		folio_xchg_last_cpupid(folio, (1 << LAST_CPUPID_SHIFT) - 1);
3676 	}
3677 
3678 	flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3679 	entry = pte_mkyoung(vmf->orig_pte);
3680 	entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3681 	if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
3682 		update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
3683 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3684 	count_vm_event(PGREUSE);
3685 }
3686 
3687 /*
3688  * We could add a bitflag somewhere, but for now, we know that all
3689  * vm_ops that have a ->map_pages have been audited and don't need
3690  * the mmap_lock to be held.
3691  */
3692 static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf)
3693 {
3694 	struct vm_area_struct *vma = vmf->vma;
3695 
3696 	if (vma->vm_ops->map_pages || !(vmf->flags & FAULT_FLAG_VMA_LOCK))
3697 		return 0;
3698 	vma_end_read(vma);
3699 	return VM_FAULT_RETRY;
3700 }
3701 
3702 /**
3703  * __vmf_anon_prepare - Prepare to handle an anonymous fault.
3704  * @vmf: The vm_fault descriptor passed from the fault handler.
3705  *
3706  * When preparing to insert an anonymous page into a VMA from a
3707  * fault handler, call this function rather than anon_vma_prepare().
3708  * If this vma does not already have an associated anon_vma and we are
3709  * only protected by the per-VMA lock, the caller must retry with the
3710  * mmap_lock held.  __anon_vma_prepare() will look at adjacent VMAs to
3711  * determine if this VMA can share its anon_vma, and that's not safe to
3712  * do with only the per-VMA lock held for this VMA.
3713  *
3714  * Return: 0 if fault handling can proceed.  Any other value should be
3715  * returned to the caller.
3716  */
3717 vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf)
3718 {
3719 	struct vm_area_struct *vma = vmf->vma;
3720 	vm_fault_t ret = 0;
3721 
3722 	if (likely(vma->anon_vma))
3723 		return 0;
3724 	if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
3725 		if (!mmap_read_trylock(vma->vm_mm))
3726 			return VM_FAULT_RETRY;
3727 	}
3728 	if (__anon_vma_prepare(vma))
3729 		ret = VM_FAULT_OOM;
3730 	if (vmf->flags & FAULT_FLAG_VMA_LOCK)
3731 		mmap_read_unlock(vma->vm_mm);
3732 	return ret;
3733 }
3734 
3735 /*
3736  * Handle the case of a page which we actually need to copy to a new page,
3737  * either due to COW or unsharing.
3738  *
3739  * Called with mmap_lock locked and the old page referenced, but
3740  * without the ptl held.
3741  *
3742  * High level logic flow:
3743  *
3744  * - Allocate a page, copy the content of the old page to the new one.
3745  * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
3746  * - Take the PTL. If the pte changed, bail out and release the allocated page
3747  * - If the pte is still the way we remember it, update the page table and all
3748  *   relevant references. This includes dropping the reference the page-table
3749  *   held to the old page, as well as updating the rmap.
3750  * - In any case, unlock the PTL and drop the reference we took to the old page.
3751  */
3752 static vm_fault_t wp_page_copy(struct vm_fault *vmf)
3753 {
3754 	const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
3755 	struct vm_area_struct *vma = vmf->vma;
3756 	struct mm_struct *mm = vma->vm_mm;
3757 	struct folio *old_folio = NULL;
3758 	struct folio *new_folio = NULL;
3759 	pte_t entry;
3760 	int page_copied = 0;
3761 	struct mmu_notifier_range range;
3762 	vm_fault_t ret;
3763 	bool pfn_is_zero;
3764 
3765 	delayacct_wpcopy_start();
3766 
3767 	if (vmf->page)
3768 		old_folio = page_folio(vmf->page);
3769 	ret = vmf_anon_prepare(vmf);
3770 	if (unlikely(ret))
3771 		goto out;
3772 
3773 	pfn_is_zero = is_zero_pfn(pte_pfn(vmf->orig_pte));
3774 	new_folio = folio_prealloc(mm, vma, vmf->address, pfn_is_zero);
3775 	if (!new_folio)
3776 		goto oom;
3777 
3778 	if (!pfn_is_zero) {
3779 		int err;
3780 
3781 		err = __wp_page_copy_user(&new_folio->page, vmf->page, vmf);
3782 		if (err) {
3783 			/*
3784 			 * COW failed, if the fault was solved by other,
3785 			 * it's fine. If not, userspace would re-fault on
3786 			 * the same address and we will handle the fault
3787 			 * from the second attempt.
3788 			 * The -EHWPOISON case will not be retried.
3789 			 */
3790 			folio_put(new_folio);
3791 			if (old_folio)
3792 				folio_put(old_folio);
3793 
3794 			delayacct_wpcopy_end();
3795 			return err == -EHWPOISON ? VM_FAULT_HWPOISON : 0;
3796 		}
3797 		kmsan_copy_page_meta(&new_folio->page, vmf->page);
3798 	}
3799 
3800 	__folio_mark_uptodate(new_folio);
3801 
3802 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
3803 				vmf->address & PAGE_MASK,
3804 				(vmf->address & PAGE_MASK) + PAGE_SIZE);
3805 	mmu_notifier_invalidate_range_start(&range);
3806 
3807 	/*
3808 	 * Re-check the pte - we dropped the lock
3809 	 */
3810 	vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
3811 	if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
3812 		if (old_folio) {
3813 			if (!folio_test_anon(old_folio)) {
3814 				dec_mm_counter(mm, mm_counter_file(old_folio));
3815 				inc_mm_counter(mm, MM_ANONPAGES);
3816 			}
3817 		} else {
3818 			ksm_might_unmap_zero_page(mm, vmf->orig_pte);
3819 			inc_mm_counter(mm, MM_ANONPAGES);
3820 		}
3821 		flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3822 		entry = folio_mk_pte(new_folio, vma->vm_page_prot);
3823 		entry = pte_sw_mkyoung(entry);
3824 		if (unlikely(unshare)) {
3825 			if (pte_soft_dirty(vmf->orig_pte))
3826 				entry = pte_mksoft_dirty(entry);
3827 			if (pte_uffd_wp(vmf->orig_pte))
3828 				entry = pte_mkuffd_wp(entry);
3829 		} else {
3830 			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3831 		}
3832 
3833 		/*
3834 		 * Clear the pte entry and flush it first, before updating the
3835 		 * pte with the new entry, to keep TLBs on different CPUs in
3836 		 * sync. This code used to set the new PTE then flush TLBs, but
3837 		 * that left a window where the new PTE could be loaded into
3838 		 * some TLBs while the old PTE remains in others.
3839 		 */
3840 		ptep_clear_flush(vma, vmf->address, vmf->pte);
3841 		folio_add_new_anon_rmap(new_folio, vma, vmf->address, RMAP_EXCLUSIVE);
3842 		folio_add_lru_vma(new_folio, vma);
3843 		BUG_ON(unshare && pte_write(entry));
3844 		set_pte_at(mm, vmf->address, vmf->pte, entry);
3845 		update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
3846 		if (old_folio) {
3847 			/*
3848 			 * Only after switching the pte to the new page may
3849 			 * we remove the mapcount here. Otherwise another
3850 			 * process may come and find the rmap count decremented
3851 			 * before the pte is switched to the new page, and
3852 			 * "reuse" the old page writing into it while our pte
3853 			 * here still points into it and can be read by other
3854 			 * threads.
3855 			 *
3856 			 * The critical issue is to order this
3857 			 * folio_remove_rmap_pte() with the ptp_clear_flush
3858 			 * above. Those stores are ordered by (if nothing else,)
3859 			 * the barrier present in the atomic_add_negative
3860 			 * in folio_remove_rmap_pte();
3861 			 *
3862 			 * Then the TLB flush in ptep_clear_flush ensures that
3863 			 * no process can access the old page before the
3864 			 * decremented mapcount is visible. And the old page
3865 			 * cannot be reused until after the decremented
3866 			 * mapcount is visible. So transitively, TLBs to
3867 			 * old page will be flushed before it can be reused.
3868 			 */
3869 			folio_remove_rmap_pte(old_folio, vmf->page, vma);
3870 		}
3871 
3872 		/* Free the old page.. */
3873 		new_folio = old_folio;
3874 		page_copied = 1;
3875 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3876 	} else if (vmf->pte) {
3877 		update_mmu_tlb(vma, vmf->address, vmf->pte);
3878 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3879 	}
3880 
3881 	mmu_notifier_invalidate_range_end(&range);
3882 
3883 	if (new_folio)
3884 		folio_put(new_folio);
3885 	if (old_folio) {
3886 		if (page_copied)
3887 			free_swap_cache(old_folio);
3888 		folio_put(old_folio);
3889 	}
3890 
3891 	delayacct_wpcopy_end();
3892 	return 0;
3893 oom:
3894 	ret = VM_FAULT_OOM;
3895 out:
3896 	if (old_folio)
3897 		folio_put(old_folio);
3898 
3899 	delayacct_wpcopy_end();
3900 	return ret;
3901 }
3902 
3903 /**
3904  * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
3905  *			  writeable once the page is prepared
3906  *
3907  * @vmf: structure describing the fault
3908  * @folio: the folio of vmf->page
3909  *
3910  * This function handles all that is needed to finish a write page fault in a
3911  * shared mapping due to PTE being read-only once the mapped page is prepared.
3912  * It handles locking of PTE and modifying it.
3913  *
3914  * The function expects the page to be locked or other protection against
3915  * concurrent faults / writeback (such as DAX radix tree locks).
3916  *
3917  * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before
3918  * we acquired PTE lock.
3919  */
3920 static vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf, struct folio *folio)
3921 {
3922 	WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
3923 	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
3924 				       &vmf->ptl);
3925 	if (!vmf->pte)
3926 		return VM_FAULT_NOPAGE;
3927 	/*
3928 	 * We might have raced with another page fault while we released the
3929 	 * pte_offset_map_lock.
3930 	 */
3931 	if (!pte_same(ptep_get(vmf->pte), vmf->orig_pte)) {
3932 		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
3933 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3934 		return VM_FAULT_NOPAGE;
3935 	}
3936 	wp_page_reuse(vmf, folio);
3937 	return 0;
3938 }
3939 
3940 /*
3941  * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
3942  * mapping
3943  */
3944 static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
3945 {
3946 	struct vm_area_struct *vma = vmf->vma;
3947 
3948 	if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
3949 		vm_fault_t ret;
3950 
3951 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3952 		ret = vmf_can_call_fault(vmf);
3953 		if (ret)
3954 			return ret;
3955 
3956 		vmf->flags |= FAULT_FLAG_MKWRITE;
3957 		ret = vma->vm_ops->pfn_mkwrite(vmf);
3958 		if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
3959 			return ret;
3960 		return finish_mkwrite_fault(vmf, NULL);
3961 	}
3962 	wp_page_reuse(vmf, NULL);
3963 	return 0;
3964 }
3965 
3966 static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio)
3967 	__releases(vmf->ptl)
3968 {
3969 	struct vm_area_struct *vma = vmf->vma;
3970 	vm_fault_t ret = 0;
3971 
3972 	folio_get(folio);
3973 
3974 	if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
3975 		vm_fault_t tmp;
3976 
3977 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3978 		tmp = vmf_can_call_fault(vmf);
3979 		if (tmp) {
3980 			folio_put(folio);
3981 			return tmp;
3982 		}
3983 
3984 		tmp = do_page_mkwrite(vmf, folio);
3985 		if (unlikely(!tmp || (tmp &
3986 				      (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
3987 			folio_put(folio);
3988 			return tmp;
3989 		}
3990 		tmp = finish_mkwrite_fault(vmf, folio);
3991 		if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
3992 			folio_unlock(folio);
3993 			folio_put(folio);
3994 			return tmp;
3995 		}
3996 	} else {
3997 		wp_page_reuse(vmf, folio);
3998 		folio_lock(folio);
3999 	}
4000 	ret |= fault_dirty_shared_page(vmf);
4001 	folio_put(folio);
4002 
4003 	return ret;
4004 }
4005 
4006 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4007 static bool __wp_can_reuse_large_anon_folio(struct folio *folio,
4008 		struct vm_area_struct *vma)
4009 {
4010 	bool exclusive = false;
4011 
4012 	/* Let's just free up a large folio if only a single page is mapped. */
4013 	if (folio_large_mapcount(folio) <= 1)
4014 		return false;
4015 
4016 	/*
4017 	 * The assumption for anonymous folios is that each page can only get
4018 	 * mapped once into each MM. The only exception are KSM folios, which
4019 	 * are always small.
4020 	 *
4021 	 * Each taken mapcount must be paired with exactly one taken reference,
4022 	 * whereby the refcount must be incremented before the mapcount when
4023 	 * mapping a page, and the refcount must be decremented after the
4024 	 * mapcount when unmapping a page.
4025 	 *
4026 	 * If all folio references are from mappings, and all mappings are in
4027 	 * the page tables of this MM, then this folio is exclusive to this MM.
4028 	 */
4029 	if (test_bit(FOLIO_MM_IDS_SHARED_BITNUM, &folio->_mm_ids))
4030 		return false;
4031 
4032 	VM_WARN_ON_ONCE(folio_test_ksm(folio));
4033 
4034 	if (unlikely(folio_test_swapcache(folio))) {
4035 		/*
4036 		 * Note: freeing up the swapcache will fail if some PTEs are
4037 		 * still swap entries.
4038 		 */
4039 		if (!folio_trylock(folio))
4040 			return false;
4041 		folio_free_swap(folio);
4042 		folio_unlock(folio);
4043 	}
4044 
4045 	if (folio_large_mapcount(folio) != folio_ref_count(folio))
4046 		return false;
4047 
4048 	/* Stabilize the mapcount vs. refcount and recheck. */
4049 	folio_lock_large_mapcount(folio);
4050 	VM_WARN_ON_ONCE_FOLIO(folio_large_mapcount(folio) > folio_ref_count(folio), folio);
4051 
4052 	if (test_bit(FOLIO_MM_IDS_SHARED_BITNUM, &folio->_mm_ids))
4053 		goto unlock;
4054 	if (folio_large_mapcount(folio) != folio_ref_count(folio))
4055 		goto unlock;
4056 
4057 	VM_WARN_ON_ONCE_FOLIO(folio_large_mapcount(folio) > folio_nr_pages(folio), folio);
4058 	VM_WARN_ON_ONCE_FOLIO(folio_entire_mapcount(folio), folio);
4059 	VM_WARN_ON_ONCE(folio_mm_id(folio, 0) != vma->vm_mm->mm_id &&
4060 			folio_mm_id(folio, 1) != vma->vm_mm->mm_id);
4061 
4062 	/*
4063 	 * Do we need the folio lock? Likely not. If there would have been
4064 	 * references from page migration/swapout, we would have detected
4065 	 * an additional folio reference and never ended up here.
4066 	 */
4067 	exclusive = true;
4068 unlock:
4069 	folio_unlock_large_mapcount(folio);
4070 	return exclusive;
4071 }
4072 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
4073 static bool __wp_can_reuse_large_anon_folio(struct folio *folio,
4074 		struct vm_area_struct *vma)
4075 {
4076 	BUILD_BUG();
4077 }
4078 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4079 
4080 static bool wp_can_reuse_anon_folio(struct folio *folio,
4081 				    struct vm_area_struct *vma)
4082 {
4083 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && folio_test_large(folio))
4084 		return __wp_can_reuse_large_anon_folio(folio, vma);
4085 
4086 	/*
4087 	 * We have to verify under folio lock: these early checks are
4088 	 * just an optimization to avoid locking the folio and freeing
4089 	 * the swapcache if there is little hope that we can reuse.
4090 	 *
4091 	 * KSM doesn't necessarily raise the folio refcount.
4092 	 */
4093 	if (folio_test_ksm(folio) || folio_ref_count(folio) > 3)
4094 		return false;
4095 	if (!folio_test_lru(folio))
4096 		/*
4097 		 * We cannot easily detect+handle references from
4098 		 * remote LRU caches or references to LRU folios.
4099 		 */
4100 		lru_add_drain();
4101 	if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio))
4102 		return false;
4103 	if (!folio_trylock(folio))
4104 		return false;
4105 	if (folio_test_swapcache(folio))
4106 		folio_free_swap(folio);
4107 	if (folio_test_ksm(folio) || folio_ref_count(folio) != 1) {
4108 		folio_unlock(folio);
4109 		return false;
4110 	}
4111 	/*
4112 	 * Ok, we've got the only folio reference from our mapping
4113 	 * and the folio is locked, it's dark out, and we're wearing
4114 	 * sunglasses. Hit it.
4115 	 */
4116 	folio_move_anon_rmap(folio, vma);
4117 	folio_unlock(folio);
4118 	return true;
4119 }
4120 
4121 /*
4122  * This routine handles present pages, when
4123  * * users try to write to a shared page (FAULT_FLAG_WRITE)
4124  * * GUP wants to take a R/O pin on a possibly shared anonymous page
4125  *   (FAULT_FLAG_UNSHARE)
4126  *
4127  * It is done by copying the page to a new address and decrementing the
4128  * shared-page counter for the old page.
4129  *
4130  * Note that this routine assumes that the protection checks have been
4131  * done by the caller (the low-level page fault routine in most cases).
4132  * Thus, with FAULT_FLAG_WRITE, we can safely just mark it writable once we've
4133  * done any necessary COW.
4134  *
4135  * In case of FAULT_FLAG_WRITE, we also mark the page dirty at this point even
4136  * though the page will change only once the write actually happens. This
4137  * avoids a few races, and potentially makes it more efficient.
4138  *
4139  * We enter with non-exclusive mmap_lock (to exclude vma changes,
4140  * but allow concurrent faults), with pte both mapped and locked.
4141  * We return with mmap_lock still held, but pte unmapped and unlocked.
4142  */
4143 static vm_fault_t do_wp_page(struct vm_fault *vmf)
4144 	__releases(vmf->ptl)
4145 {
4146 	const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
4147 	struct vm_area_struct *vma = vmf->vma;
4148 	struct folio *folio = NULL;
4149 	pte_t pte;
4150 
4151 	if (likely(!unshare)) {
4152 		if (userfaultfd_pte_wp(vma, ptep_get(vmf->pte))) {
4153 			if (!userfaultfd_wp_async(vma)) {
4154 				pte_unmap_unlock(vmf->pte, vmf->ptl);
4155 				return handle_userfault(vmf, VM_UFFD_WP);
4156 			}
4157 
4158 			/*
4159 			 * Nothing needed (cache flush, TLB invalidations,
4160 			 * etc.) because we're only removing the uffd-wp bit,
4161 			 * which is completely invisible to the user.
4162 			 */
4163 			pte = pte_clear_uffd_wp(ptep_get(vmf->pte));
4164 
4165 			set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
4166 			/*
4167 			 * Update this to be prepared for following up CoW
4168 			 * handling
4169 			 */
4170 			vmf->orig_pte = pte;
4171 		}
4172 
4173 		/*
4174 		 * Userfaultfd write-protect can defer flushes. Ensure the TLB
4175 		 * is flushed in this case before copying.
4176 		 */
4177 		if (unlikely(userfaultfd_wp(vmf->vma) &&
4178 			     mm_tlb_flush_pending(vmf->vma->vm_mm)))
4179 			flush_tlb_page(vmf->vma, vmf->address);
4180 	}
4181 
4182 	vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
4183 
4184 	if (vmf->page)
4185 		folio = page_folio(vmf->page);
4186 
4187 	/*
4188 	 * Shared mapping: we are guaranteed to have VM_WRITE and
4189 	 * FAULT_FLAG_WRITE set at this point.
4190 	 */
4191 	if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
4192 		/*
4193 		 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
4194 		 * VM_PFNMAP VMA. FS DAX also wants ops->pfn_mkwrite called.
4195 		 *
4196 		 * We should not cow pages in a shared writeable mapping.
4197 		 * Just mark the pages writable and/or call ops->pfn_mkwrite.
4198 		 */
4199 		if (!vmf->page || is_fsdax_page(vmf->page)) {
4200 			vmf->page = NULL;
4201 			return wp_pfn_shared(vmf);
4202 		}
4203 		return wp_page_shared(vmf, folio);
4204 	}
4205 
4206 	/*
4207 	 * Private mapping: create an exclusive anonymous page copy if reuse
4208 	 * is impossible. We might miss VM_WRITE for FOLL_FORCE handling.
4209 	 *
4210 	 * If we encounter a page that is marked exclusive, we must reuse
4211 	 * the page without further checks.
4212 	 */
4213 	if (folio && folio_test_anon(folio) &&
4214 	    (PageAnonExclusive(vmf->page) || wp_can_reuse_anon_folio(folio, vma))) {
4215 		if (!PageAnonExclusive(vmf->page))
4216 			SetPageAnonExclusive(vmf->page);
4217 		if (unlikely(unshare)) {
4218 			pte_unmap_unlock(vmf->pte, vmf->ptl);
4219 			return 0;
4220 		}
4221 		wp_page_reuse(vmf, folio);
4222 		return 0;
4223 	}
4224 	/*
4225 	 * Ok, we need to copy. Oh, well..
4226 	 */
4227 	if (folio)
4228 		folio_get(folio);
4229 
4230 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4231 #ifdef CONFIG_KSM
4232 	if (folio && folio_test_ksm(folio))
4233 		count_vm_event(COW_KSM);
4234 #endif
4235 	return wp_page_copy(vmf);
4236 }
4237 
4238 static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
4239 					    pgoff_t first_index,
4240 					    pgoff_t last_index,
4241 					    struct zap_details *details)
4242 {
4243 	struct vm_area_struct *vma;
4244 	unsigned long start, size;
4245 	struct mmu_gather tlb;
4246 
4247 	vma_interval_tree_foreach(vma, root, first_index, last_index) {
4248 		const pgoff_t start_idx = max(first_index, vma->vm_pgoff);
4249 		const pgoff_t end_idx = min(last_index, vma_last_pgoff(vma)) + 1;
4250 
4251 		start = vma->vm_start + ((start_idx - vma->vm_pgoff) << PAGE_SHIFT);
4252 		size = (end_idx - start_idx) << PAGE_SHIFT;
4253 
4254 		tlb_gather_mmu(&tlb, vma->vm_mm);
4255 		zap_vma_range_batched(&tlb, vma, start, size, details);
4256 		tlb_finish_mmu(&tlb);
4257 	}
4258 }
4259 
4260 /**
4261  * unmap_mapping_folio() - Unmap single folio from processes.
4262  * @folio: The locked folio to be unmapped.
4263  *
4264  * Unmap this folio from any userspace process which still has it mmaped.
4265  * Typically, for efficiency, the range of nearby pages has already been
4266  * unmapped by unmap_mapping_pages() or unmap_mapping_range().  But once
4267  * truncation or invalidation holds the lock on a folio, it may find that
4268  * the page has been remapped again: and then uses unmap_mapping_folio()
4269  * to unmap it finally.
4270  */
4271 void unmap_mapping_folio(struct folio *folio)
4272 {
4273 	struct address_space *mapping = folio->mapping;
4274 	struct zap_details details = { };
4275 	pgoff_t	first_index;
4276 	pgoff_t	last_index;
4277 
4278 	VM_BUG_ON(!folio_test_locked(folio));
4279 
4280 	first_index = folio->index;
4281 	last_index = folio_next_index(folio) - 1;
4282 
4283 	details.skip_cows = true;
4284 	details.single_folio = folio;
4285 	details.zap_flags = ZAP_FLAG_DROP_MARKER;
4286 
4287 	i_mmap_lock_read(mapping);
4288 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
4289 		unmap_mapping_range_tree(&mapping->i_mmap, first_index,
4290 					 last_index, &details);
4291 	i_mmap_unlock_read(mapping);
4292 }
4293 
4294 /**
4295  * unmap_mapping_pages() - Unmap pages from processes.
4296  * @mapping: The address space containing pages to be unmapped.
4297  * @start: Index of first page to be unmapped.
4298  * @nr: Number of pages to be unmapped.  0 to unmap to end of file.
4299  * @even_cows: Whether to unmap even private COWed pages.
4300  *
4301  * Unmap the pages in this address space from any userspace process which
4302  * has them mmaped.  Generally, you want to remove COWed pages as well when
4303  * a file is being truncated, but not when invalidating pages from the page
4304  * cache.
4305  */
4306 void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
4307 		pgoff_t nr, bool even_cows)
4308 {
4309 	struct zap_details details = { };
4310 	pgoff_t	first_index = start;
4311 	pgoff_t	last_index = start + nr - 1;
4312 
4313 	details.skip_cows = !even_cows;
4314 	if (last_index < first_index)
4315 		last_index = ULONG_MAX;
4316 
4317 	i_mmap_lock_read(mapping);
4318 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
4319 		unmap_mapping_range_tree(&mapping->i_mmap, first_index,
4320 					 last_index, &details);
4321 	i_mmap_unlock_read(mapping);
4322 }
4323 EXPORT_SYMBOL_GPL(unmap_mapping_pages);
4324 
4325 /**
4326  * unmap_mapping_range - unmap the portion of all mmaps in the specified
4327  * address_space corresponding to the specified byte range in the underlying
4328  * file.
4329  *
4330  * @mapping: the address space containing mmaps to be unmapped.
4331  * @holebegin: byte in first page to unmap, relative to the start of
4332  * the underlying file.  This will be rounded down to a PAGE_SIZE
4333  * boundary.  Note that this is different from truncate_pagecache(), which
4334  * must keep the partial page.  In contrast, we must get rid of
4335  * partial pages.
4336  * @holelen: size of prospective hole in bytes.  This will be rounded
4337  * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
4338  * end of the file.
4339  * @even_cows: 1 when truncating a file, unmap even private COWed pages;
4340  * but 0 when invalidating pagecache, don't throw away private data.
4341  */
4342 void unmap_mapping_range(struct address_space *mapping,
4343 		loff_t const holebegin, loff_t const holelen, int even_cows)
4344 {
4345 	pgoff_t hba = (pgoff_t)(holebegin) >> PAGE_SHIFT;
4346 	pgoff_t hlen = ((pgoff_t)(holelen) + PAGE_SIZE - 1) >> PAGE_SHIFT;
4347 
4348 	/* Check for overflow. */
4349 	if (sizeof(holelen) > sizeof(hlen)) {
4350 		long long holeend =
4351 			(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
4352 		if (holeend & ~(long long)ULONG_MAX)
4353 			hlen = ULONG_MAX - hba + 1;
4354 	}
4355 
4356 	unmap_mapping_pages(mapping, hba, hlen, even_cows);
4357 }
4358 EXPORT_SYMBOL(unmap_mapping_range);
4359 
4360 /*
4361  * Restore a potential device exclusive pte to a working pte entry
4362  */
4363 static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
4364 {
4365 	struct folio *folio = page_folio(vmf->page);
4366 	struct vm_area_struct *vma = vmf->vma;
4367 	struct mmu_notifier_range range;
4368 	vm_fault_t ret;
4369 
4370 	/*
4371 	 * We need a reference to lock the folio because we don't hold
4372 	 * the PTL so a racing thread can remove the device-exclusive
4373 	 * entry and unmap it. If the folio is free the entry must
4374 	 * have been removed already. If it happens to have already
4375 	 * been re-allocated after being freed all we do is lock and
4376 	 * unlock it.
4377 	 */
4378 	if (!folio_try_get(folio))
4379 		return 0;
4380 
4381 	ret = folio_lock_or_retry(folio, vmf);
4382 	if (ret) {
4383 		folio_put(folio);
4384 		return ret;
4385 	}
4386 	mmu_notifier_range_init_owner(&range, MMU_NOTIFY_CLEAR, 0,
4387 				vma->vm_mm, vmf->address & PAGE_MASK,
4388 				(vmf->address & PAGE_MASK) + PAGE_SIZE, NULL);
4389 	mmu_notifier_invalidate_range_start(&range);
4390 
4391 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
4392 				&vmf->ptl);
4393 	if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
4394 		restore_exclusive_pte(vma, folio, vmf->page, vmf->address,
4395 				      vmf->pte, vmf->orig_pte);
4396 
4397 	if (vmf->pte)
4398 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4399 	folio_unlock(folio);
4400 	folio_put(folio);
4401 
4402 	mmu_notifier_invalidate_range_end(&range);
4403 	return 0;
4404 }
4405 
4406 /*
4407  * Check if we should call folio_free_swap to free the swap cache.
4408  * folio_free_swap only frees the swap cache to release the slot if swap
4409  * count is zero, so we don't need to check the swap count here.
4410  */
4411 static inline bool should_try_to_free_swap(struct swap_info_struct *si,
4412 					   struct folio *folio,
4413 					   struct vm_area_struct *vma,
4414 					   unsigned int extra_refs,
4415 					   unsigned int fault_flags)
4416 {
4417 	if (!folio_test_swapcache(folio))
4418 		return false;
4419 	/*
4420 	 * Always try to free swap cache for SWP_SYNCHRONOUS_IO devices. Swap
4421 	 * cache can help save some IO or memory overhead, but these devices
4422 	 * are fast, and meanwhile, swap cache pinning the slot deferring the
4423 	 * release of metadata or fragmentation is a more critical issue.
4424 	 */
4425 	if (data_race(si->flags & SWP_SYNCHRONOUS_IO))
4426 		return true;
4427 	if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) ||
4428 	    folio_test_mlocked(folio))
4429 		return true;
4430 	/*
4431 	 * If we want to map a page that's in the swapcache writable, we
4432 	 * have to detect via the refcount if we're really the exclusive
4433 	 * user. Try freeing the swapcache to get rid of the swapcache
4434 	 * reference only in case it's likely that we'll be the exclusive user.
4435 	 */
4436 	return (fault_flags & FAULT_FLAG_WRITE) && !folio_test_ksm(folio) &&
4437 		folio_ref_count(folio) == (extra_refs + folio_nr_pages(folio));
4438 }
4439 
4440 static vm_fault_t pte_marker_clear(struct vm_fault *vmf)
4441 {
4442 	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
4443 				       vmf->address, &vmf->ptl);
4444 	if (!vmf->pte)
4445 		return 0;
4446 	/*
4447 	 * Be careful so that we will only recover a special uffd-wp pte into a
4448 	 * none pte.  Otherwise it means the pte could have changed, so retry.
4449 	 *
4450 	 * This should also cover the case where e.g. the pte changed
4451 	 * quickly from a PTE_MARKER_UFFD_WP into PTE_MARKER_POISONED.
4452 	 * So pte_is_marker() check is not enough to safely drop the pte.
4453 	 */
4454 	if (pte_same(vmf->orig_pte, ptep_get(vmf->pte)))
4455 		pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte);
4456 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4457 	return 0;
4458 }
4459 
4460 static vm_fault_t do_pte_missing(struct vm_fault *vmf)
4461 {
4462 	if (vma_is_anonymous(vmf->vma))
4463 		return do_anonymous_page(vmf);
4464 	else
4465 		return do_fault(vmf);
4466 }
4467 
4468 /*
4469  * This is actually a page-missing access, but with uffd-wp special pte
4470  * installed.  It means this pte was wr-protected before being unmapped.
4471  */
4472 static vm_fault_t pte_marker_handle_uffd_wp(struct vm_fault *vmf)
4473 {
4474 	/*
4475 	 * Just in case there're leftover special ptes even after the region
4476 	 * got unregistered - we can simply clear them.
4477 	 */
4478 	if (unlikely(!userfaultfd_wp(vmf->vma)))
4479 		return pte_marker_clear(vmf);
4480 
4481 	return do_pte_missing(vmf);
4482 }
4483 
4484 static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
4485 {
4486 	const softleaf_t entry = softleaf_from_pte(vmf->orig_pte);
4487 	const pte_marker marker = softleaf_to_marker(entry);
4488 
4489 	/*
4490 	 * PTE markers should never be empty.  If anything weird happened,
4491 	 * the best thing to do is to kill the process along with its mm.
4492 	 */
4493 	if (WARN_ON_ONCE(!marker))
4494 		return VM_FAULT_SIGBUS;
4495 
4496 	/* Higher priority than uffd-wp when data corrupted */
4497 	if (marker & PTE_MARKER_POISONED)
4498 		return VM_FAULT_HWPOISON;
4499 
4500 	/* Hitting a guard page is always a fatal condition. */
4501 	if (marker & PTE_MARKER_GUARD)
4502 		return VM_FAULT_SIGSEGV;
4503 
4504 	if (softleaf_is_uffd_wp_marker(entry))
4505 		return pte_marker_handle_uffd_wp(vmf);
4506 
4507 	/* This is an unknown pte marker */
4508 	return VM_FAULT_SIGBUS;
4509 }
4510 
4511 static struct folio *__alloc_swap_folio(struct vm_fault *vmf)
4512 {
4513 	struct vm_area_struct *vma = vmf->vma;
4514 	struct folio *folio;
4515 	softleaf_t entry;
4516 
4517 	folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vmf->address);
4518 	if (!folio)
4519 		return NULL;
4520 
4521 	entry = softleaf_from_pte(vmf->orig_pte);
4522 	if (mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
4523 					   GFP_KERNEL, entry)) {
4524 		folio_put(folio);
4525 		return NULL;
4526 	}
4527 
4528 	return folio;
4529 }
4530 
4531 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4532 /*
4533  * Check if the PTEs within a range are contiguous swap entries
4534  * and have consistent swapcache, zeromap.
4535  */
4536 static bool can_swapin_thp(struct vm_fault *vmf, pte_t *ptep, int nr_pages)
4537 {
4538 	unsigned long addr;
4539 	softleaf_t entry;
4540 	int idx;
4541 	pte_t pte;
4542 
4543 	addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE);
4544 	idx = (vmf->address - addr) / PAGE_SIZE;
4545 	pte = ptep_get(ptep);
4546 
4547 	if (!pte_same(pte, pte_move_swp_offset(vmf->orig_pte, -idx)))
4548 		return false;
4549 	entry = softleaf_from_pte(pte);
4550 	if (swap_pte_batch(ptep, nr_pages, pte) != nr_pages)
4551 		return false;
4552 
4553 	/*
4554 	 * swap_read_folio() can't handle the case a large folio is hybridly
4555 	 * from different backends. And they are likely corner cases. Similar
4556 	 * things might be added once zswap support large folios.
4557 	 */
4558 	if (unlikely(swap_zeromap_batch(entry, nr_pages, NULL) != nr_pages))
4559 		return false;
4560 	if (unlikely(non_swapcache_batch(entry, nr_pages) != nr_pages))
4561 		return false;
4562 
4563 	return true;
4564 }
4565 
4566 static inline unsigned long thp_swap_suitable_orders(pgoff_t swp_offset,
4567 						     unsigned long addr,
4568 						     unsigned long orders)
4569 {
4570 	int order, nr;
4571 
4572 	order = highest_order(orders);
4573 
4574 	/*
4575 	 * To swap in a THP with nr pages, we require that its first swap_offset
4576 	 * is aligned with that number, as it was when the THP was swapped out.
4577 	 * This helps filter out most invalid entries.
4578 	 */
4579 	while (orders) {
4580 		nr = 1 << order;
4581 		if ((addr >> PAGE_SHIFT) % nr == swp_offset % nr)
4582 			break;
4583 		order = next_order(&orders, order);
4584 	}
4585 
4586 	return orders;
4587 }
4588 
4589 static struct folio *alloc_swap_folio(struct vm_fault *vmf)
4590 {
4591 	struct vm_area_struct *vma = vmf->vma;
4592 	unsigned long orders;
4593 	struct folio *folio;
4594 	unsigned long addr;
4595 	softleaf_t entry;
4596 	spinlock_t *ptl;
4597 	pte_t *pte;
4598 	gfp_t gfp;
4599 	int order;
4600 
4601 	/*
4602 	 * If uffd is active for the vma we need per-page fault fidelity to
4603 	 * maintain the uffd semantics.
4604 	 */
4605 	if (unlikely(userfaultfd_armed(vma)))
4606 		goto fallback;
4607 
4608 	/*
4609 	 * A large swapped out folio could be partially or fully in zswap. We
4610 	 * lack handling for such cases, so fallback to swapping in order-0
4611 	 * folio.
4612 	 */
4613 	if (!zswap_never_enabled())
4614 		goto fallback;
4615 
4616 	entry = softleaf_from_pte(vmf->orig_pte);
4617 	/*
4618 	 * Get a list of all the (large) orders below PMD_ORDER that are enabled
4619 	 * and suitable for swapping THP.
4620 	 */
4621 	orders = thp_vma_allowable_orders(vma, vma->vm_flags, TVA_PAGEFAULT,
4622 					  BIT(PMD_ORDER) - 1);
4623 	orders = thp_vma_suitable_orders(vma, vmf->address, orders);
4624 	orders = thp_swap_suitable_orders(swp_offset(entry),
4625 					  vmf->address, orders);
4626 
4627 	if (!orders)
4628 		goto fallback;
4629 
4630 	pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
4631 				  vmf->address & PMD_MASK, &ptl);
4632 	if (unlikely(!pte))
4633 		goto fallback;
4634 
4635 	/*
4636 	 * For do_swap_page, find the highest order where the aligned range is
4637 	 * completely swap entries with contiguous swap offsets.
4638 	 */
4639 	order = highest_order(orders);
4640 	while (orders) {
4641 		addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
4642 		if (can_swapin_thp(vmf, pte + pte_index(addr), 1 << order))
4643 			break;
4644 		order = next_order(&orders, order);
4645 	}
4646 
4647 	pte_unmap_unlock(pte, ptl);
4648 
4649 	/* Try allocating the highest of the remaining orders. */
4650 	gfp = vma_thp_gfp_mask(vma);
4651 	while (orders) {
4652 		addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
4653 		folio = vma_alloc_folio(gfp, order, vma, addr);
4654 		if (folio) {
4655 			if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
4656 							    gfp, entry))
4657 				return folio;
4658 			count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK_CHARGE);
4659 			folio_put(folio);
4660 		}
4661 		count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK);
4662 		order = next_order(&orders, order);
4663 	}
4664 
4665 fallback:
4666 	return __alloc_swap_folio(vmf);
4667 }
4668 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
4669 static struct folio *alloc_swap_folio(struct vm_fault *vmf)
4670 {
4671 	return __alloc_swap_folio(vmf);
4672 }
4673 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4674 
4675 /* Sanity check that a folio is fully exclusive */
4676 static void check_swap_exclusive(struct folio *folio, swp_entry_t entry,
4677 				 unsigned int nr_pages)
4678 {
4679 	/* Called under PT locked and folio locked, the swap count is stable */
4680 	do {
4681 		VM_WARN_ON_ONCE_FOLIO(__swap_count(entry) != 1, folio);
4682 		entry.val++;
4683 	} while (--nr_pages);
4684 }
4685 
4686 /*
4687  * We enter with non-exclusive mmap_lock (to exclude vma changes,
4688  * but allow concurrent faults), and pte mapped but not yet locked.
4689  * We return with pte unmapped and unlocked.
4690  *
4691  * We return with the mmap_lock locked or unlocked in the same cases
4692  * as does filemap_fault().
4693  */
4694 vm_fault_t do_swap_page(struct vm_fault *vmf)
4695 {
4696 	struct vm_area_struct *vma = vmf->vma;
4697 	struct folio *swapcache = NULL, *folio;
4698 	struct page *page;
4699 	struct swap_info_struct *si = NULL;
4700 	rmap_t rmap_flags = RMAP_NONE;
4701 	bool exclusive = false;
4702 	softleaf_t entry;
4703 	pte_t pte;
4704 	vm_fault_t ret = 0;
4705 	int nr_pages;
4706 	unsigned long page_idx;
4707 	unsigned long address;
4708 	pte_t *ptep;
4709 
4710 	if (!pte_unmap_same(vmf))
4711 		goto out;
4712 
4713 	entry = softleaf_from_pte(vmf->orig_pte);
4714 	if (unlikely(!softleaf_is_swap(entry))) {
4715 		if (softleaf_is_migration(entry)) {
4716 			migration_entry_wait(vma->vm_mm, vmf->pmd,
4717 					     vmf->address);
4718 		} else if (softleaf_is_device_exclusive(entry)) {
4719 			vmf->page = softleaf_to_page(entry);
4720 			ret = remove_device_exclusive_entry(vmf);
4721 		} else if (softleaf_is_device_private(entry)) {
4722 			if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
4723 				/*
4724 				 * migrate_to_ram is not yet ready to operate
4725 				 * under VMA lock.
4726 				 */
4727 				vma_end_read(vma);
4728 				ret = VM_FAULT_RETRY;
4729 				goto out;
4730 			}
4731 
4732 			vmf->page = softleaf_to_page(entry);
4733 			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4734 					vmf->address, &vmf->ptl);
4735 			if (unlikely(!vmf->pte ||
4736 				     !pte_same(ptep_get(vmf->pte),
4737 							vmf->orig_pte)))
4738 				goto unlock;
4739 
4740 			/*
4741 			 * Get a page reference while we know the page can't be
4742 			 * freed.
4743 			 */
4744 			if (trylock_page(vmf->page)) {
4745 				struct dev_pagemap *pgmap;
4746 
4747 				get_page(vmf->page);
4748 				pte_unmap_unlock(vmf->pte, vmf->ptl);
4749 				pgmap = page_pgmap(vmf->page);
4750 				ret = pgmap->ops->migrate_to_ram(vmf);
4751 				unlock_page(vmf->page);
4752 				put_page(vmf->page);
4753 			} else {
4754 				pte_unmap(vmf->pte);
4755 				softleaf_entry_wait_on_locked(entry, vmf->ptl);
4756 			}
4757 		} else if (softleaf_is_hwpoison(entry)) {
4758 			ret = VM_FAULT_HWPOISON;
4759 		} else if (softleaf_is_marker(entry)) {
4760 			ret = handle_pte_marker(vmf);
4761 		} else {
4762 			print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
4763 			ret = VM_FAULT_SIGBUS;
4764 		}
4765 		goto out;
4766 	}
4767 
4768 	/* Prevent swapoff from happening to us. */
4769 	si = get_swap_device(entry);
4770 	if (unlikely(!si))
4771 		goto out;
4772 
4773 	folio = swap_cache_get_folio(entry);
4774 	if (folio)
4775 		swap_update_readahead(folio, vma, vmf->address);
4776 	if (!folio) {
4777 		if (data_race(si->flags & SWP_SYNCHRONOUS_IO)) {
4778 			folio = alloc_swap_folio(vmf);
4779 			if (folio) {
4780 				/*
4781 				 * folio is charged, so swapin can only fail due
4782 				 * to raced swapin and return NULL.
4783 				 */
4784 				swapcache = swapin_folio(entry, folio);
4785 				if (swapcache != folio)
4786 					folio_put(folio);
4787 				folio = swapcache;
4788 			}
4789 		} else {
4790 			folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, vmf);
4791 		}
4792 
4793 		if (!folio) {
4794 			/*
4795 			 * Back out if somebody else faulted in this pte
4796 			 * while we released the pte lock.
4797 			 */
4798 			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4799 					vmf->address, &vmf->ptl);
4800 			if (likely(vmf->pte &&
4801 				   pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
4802 				ret = VM_FAULT_OOM;
4803 			goto unlock;
4804 		}
4805 
4806 		/* Had to read the page from swap area: Major fault */
4807 		ret = VM_FAULT_MAJOR;
4808 		count_vm_event(PGMAJFAULT);
4809 		count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
4810 	}
4811 
4812 	swapcache = folio;
4813 	ret |= folio_lock_or_retry(folio, vmf);
4814 	if (ret & VM_FAULT_RETRY)
4815 		goto out_release;
4816 
4817 	page = folio_file_page(folio, swp_offset(entry));
4818 	/*
4819 	 * Make sure folio_free_swap() or swapoff did not release the
4820 	 * swapcache from under us.  The page pin, and pte_same test
4821 	 * below, are not enough to exclude that.  Even if it is still
4822 	 * swapcache, we need to check that the page's swap has not
4823 	 * changed.
4824 	 */
4825 	if (unlikely(!folio_matches_swap_entry(folio, entry)))
4826 		goto out_page;
4827 
4828 	if (unlikely(PageHWPoison(page))) {
4829 		/*
4830 		 * hwpoisoned dirty swapcache pages are kept for killing
4831 		 * owner processes (which may be unknown at hwpoison time)
4832 		 */
4833 		ret = VM_FAULT_HWPOISON;
4834 		goto out_page;
4835 	}
4836 
4837 	/*
4838 	 * KSM sometimes has to copy on read faults, for example, if
4839 	 * folio->index of non-ksm folios would be nonlinear inside the
4840 	 * anon VMA -- the ksm flag is lost on actual swapout.
4841 	 */
4842 	folio = ksm_might_need_to_copy(folio, vma, vmf->address);
4843 	if (unlikely(!folio)) {
4844 		ret = VM_FAULT_OOM;
4845 		folio = swapcache;
4846 		goto out_page;
4847 	} else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
4848 		ret = VM_FAULT_HWPOISON;
4849 		folio = swapcache;
4850 		goto out_page;
4851 	} else if (folio != swapcache)
4852 		page = folio_page(folio, 0);
4853 
4854 	/*
4855 	 * If we want to map a page that's in the swapcache writable, we
4856 	 * have to detect via the refcount if we're really the exclusive
4857 	 * owner. Try removing the extra reference from the local LRU
4858 	 * caches if required.
4859 	 */
4860 	if ((vmf->flags & FAULT_FLAG_WRITE) &&
4861 	    !folio_test_ksm(folio) && !folio_test_lru(folio))
4862 		lru_add_drain();
4863 
4864 	folio_throttle_swaprate(folio, GFP_KERNEL);
4865 
4866 	/*
4867 	 * Back out if somebody else already faulted in this pte.
4868 	 */
4869 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
4870 			&vmf->ptl);
4871 	if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
4872 		goto out_nomap;
4873 
4874 	if (unlikely(!folio_test_uptodate(folio))) {
4875 		ret = VM_FAULT_SIGBUS;
4876 		goto out_nomap;
4877 	}
4878 
4879 	nr_pages = 1;
4880 	page_idx = 0;
4881 	address = vmf->address;
4882 	ptep = vmf->pte;
4883 	if (folio_test_large(folio) && folio_test_swapcache(folio)) {
4884 		int nr = folio_nr_pages(folio);
4885 		unsigned long idx = folio_page_idx(folio, page);
4886 		unsigned long folio_start = address - idx * PAGE_SIZE;
4887 		unsigned long folio_end = folio_start + nr * PAGE_SIZE;
4888 		pte_t *folio_ptep;
4889 		pte_t folio_pte;
4890 
4891 		if (unlikely(folio_start < max(address & PMD_MASK, vma->vm_start)))
4892 			goto check_folio;
4893 		if (unlikely(folio_end > pmd_addr_end(address, vma->vm_end)))
4894 			goto check_folio;
4895 
4896 		folio_ptep = vmf->pte - idx;
4897 		folio_pte = ptep_get(folio_ptep);
4898 		if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
4899 		    swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
4900 			goto check_folio;
4901 
4902 		page_idx = idx;
4903 		address = folio_start;
4904 		ptep = folio_ptep;
4905 		nr_pages = nr;
4906 		entry = folio->swap;
4907 		page = &folio->page;
4908 	}
4909 
4910 check_folio:
4911 	/*
4912 	 * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte
4913 	 * must never point at an anonymous page in the swapcache that is
4914 	 * PG_anon_exclusive. Sanity check that this holds and especially, that
4915 	 * no filesystem set PG_mappedtodisk on a page in the swapcache. Sanity
4916 	 * check after taking the PT lock and making sure that nobody
4917 	 * concurrently faulted in this page and set PG_anon_exclusive.
4918 	 */
4919 	BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio));
4920 	BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page));
4921 
4922 	/*
4923 	 * If a large folio already belongs to anon mapping, then we
4924 	 * can just go on and map it partially.
4925 	 * If not, with the large swapin check above failing, the page table
4926 	 * have changed, so sub pages might got charged to the wrong cgroup,
4927 	 * or even should be shmem. So we have to free it and fallback.
4928 	 * Nothing should have touched it, both anon and shmem checks if a
4929 	 * large folio is fully appliable before use.
4930 	 *
4931 	 * This will be removed once we unify folio allocation in the swap cache
4932 	 * layer, where allocation of a folio stabilizes the swap entries.
4933 	 */
4934 	if (!folio_test_anon(folio) && folio_test_large(folio) &&
4935 	    nr_pages != folio_nr_pages(folio)) {
4936 		if (!WARN_ON_ONCE(folio_test_dirty(folio)))
4937 			swap_cache_del_folio(folio);
4938 		goto out_nomap;
4939 	}
4940 
4941 	/*
4942 	 * Check under PT lock (to protect against concurrent fork() sharing
4943 	 * the swap entry concurrently) for certainly exclusive pages.
4944 	 */
4945 	if (!folio_test_ksm(folio)) {
4946 		/*
4947 		 * The can_swapin_thp check above ensures all PTE have
4948 		 * same exclusiveness. Checking just one PTE is fine.
4949 		 */
4950 		exclusive = pte_swp_exclusive(vmf->orig_pte);
4951 		if (exclusive)
4952 			check_swap_exclusive(folio, entry, nr_pages);
4953 		if (folio != swapcache) {
4954 			/*
4955 			 * We have a fresh page that is not exposed to the
4956 			 * swapcache -> certainly exclusive.
4957 			 */
4958 			exclusive = true;
4959 		} else if (exclusive && folio_test_writeback(folio) &&
4960 			  data_race(si->flags & SWP_STABLE_WRITES)) {
4961 			/*
4962 			 * This is tricky: not all swap backends support
4963 			 * concurrent page modifications while under writeback.
4964 			 *
4965 			 * So if we stumble over such a page in the swapcache
4966 			 * we must not set the page exclusive, otherwise we can
4967 			 * map it writable without further checks and modify it
4968 			 * while still under writeback.
4969 			 *
4970 			 * For these problematic swap backends, simply drop the
4971 			 * exclusive marker: this is perfectly fine as we start
4972 			 * writeback only if we fully unmapped the page and
4973 			 * there are no unexpected references on the page after
4974 			 * unmapping succeeded. After fully unmapped, no
4975 			 * further GUP references (FOLL_GET and FOLL_PIN) can
4976 			 * appear, so dropping the exclusive marker and mapping
4977 			 * it only R/O is fine.
4978 			 */
4979 			exclusive = false;
4980 		}
4981 	}
4982 
4983 	/*
4984 	 * Some architectures may have to restore extra metadata to the page
4985 	 * when reading from swap. This metadata may be indexed by swap entry
4986 	 * so this must be called before folio_put_swap().
4987 	 */
4988 	arch_swap_restore(folio_swap(entry, folio), folio);
4989 
4990 	add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
4991 	add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
4992 	pte = mk_pte(page, vma->vm_page_prot);
4993 	if (pte_swp_soft_dirty(vmf->orig_pte))
4994 		pte = pte_mksoft_dirty(pte);
4995 	if (pte_swp_uffd_wp(vmf->orig_pte))
4996 		pte = pte_mkuffd_wp(pte);
4997 
4998 	/*
4999 	 * Same logic as in do_wp_page(); however, optimize for pages that are
5000 	 * certainly not shared either because we just allocated them without
5001 	 * exposing them to the swapcache or because the swap entry indicates
5002 	 * exclusivity.
5003 	 */
5004 	if (!folio_test_ksm(folio) &&
5005 	    (exclusive || folio_ref_count(folio) == 1)) {
5006 		if ((vma->vm_flags & VM_WRITE) && !userfaultfd_pte_wp(vma, pte) &&
5007 		    !pte_needs_soft_dirty_wp(vma, pte)) {
5008 			pte = pte_mkwrite(pte, vma);
5009 			if (vmf->flags & FAULT_FLAG_WRITE) {
5010 				pte = pte_mkdirty(pte);
5011 				vmf->flags &= ~FAULT_FLAG_WRITE;
5012 			}
5013 		}
5014 		rmap_flags |= RMAP_EXCLUSIVE;
5015 	}
5016 	folio_ref_add(folio, nr_pages - 1);
5017 	flush_icache_pages(vma, page, nr_pages);
5018 	vmf->orig_pte = pte_advance_pfn(pte, page_idx);
5019 
5020 	/* ksm created a completely new copy */
5021 	if (unlikely(folio != swapcache)) {
5022 		folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE);
5023 		folio_add_lru_vma(folio, vma);
5024 		folio_put_swap(swapcache, NULL);
5025 	} else if (!folio_test_anon(folio)) {
5026 		/*
5027 		 * We currently only expect !anon folios that are fully
5028 		 * mappable. See the comment after can_swapin_thp above.
5029 		 */
5030 		VM_WARN_ON_ONCE_FOLIO(folio_nr_pages(folio) != nr_pages, folio);
5031 		VM_WARN_ON_ONCE_FOLIO(folio_mapped(folio), folio);
5032 		folio_add_new_anon_rmap(folio, vma, address, rmap_flags);
5033 		folio_put_swap(folio, NULL);
5034 	} else {
5035 		VM_WARN_ON_ONCE(nr_pages != 1 && nr_pages != folio_nr_pages(folio));
5036 		folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address,
5037 					 rmap_flags);
5038 		folio_put_swap(folio, nr_pages == 1 ? page : NULL);
5039 	}
5040 
5041 	VM_BUG_ON(!folio_test_anon(folio) ||
5042 			(pte_write(pte) && !PageAnonExclusive(page)));
5043 	set_ptes(vma->vm_mm, address, ptep, pte, nr_pages);
5044 	arch_do_swap_page_nr(vma->vm_mm, vma, address,
5045 			pte, pte, nr_pages);
5046 
5047 	/*
5048 	 * Remove the swap entry and conditionally try to free up the swapcache.
5049 	 * Do it after mapping, so raced page faults will likely see the folio
5050 	 * in swap cache and wait on the folio lock.
5051 	 */
5052 	if (should_try_to_free_swap(si, folio, vma, nr_pages, vmf->flags))
5053 		folio_free_swap(folio);
5054 
5055 	folio_unlock(folio);
5056 	if (unlikely(folio != swapcache)) {
5057 		/*
5058 		 * Hold the lock to avoid the swap entry to be reused
5059 		 * until we take the PT lock for the pte_same() check
5060 		 * (to avoid false positives from pte_same). For
5061 		 * further safety release the lock after the folio_put_swap
5062 		 * so that the swap count won't change under a
5063 		 * parallel locked swapcache.
5064 		 */
5065 		folio_unlock(swapcache);
5066 		folio_put(swapcache);
5067 	}
5068 
5069 	if (vmf->flags & FAULT_FLAG_WRITE) {
5070 		ret |= do_wp_page(vmf);
5071 		if (ret & VM_FAULT_ERROR)
5072 			ret &= VM_FAULT_ERROR;
5073 		goto out;
5074 	}
5075 
5076 	/* No need to invalidate - it was non-present before */
5077 	update_mmu_cache_range(vmf, vma, address, ptep, nr_pages);
5078 unlock:
5079 	if (vmf->pte)
5080 		pte_unmap_unlock(vmf->pte, vmf->ptl);
5081 out:
5082 	if (si)
5083 		put_swap_device(si);
5084 	return ret;
5085 out_nomap:
5086 	if (vmf->pte)
5087 		pte_unmap_unlock(vmf->pte, vmf->ptl);
5088 out_page:
5089 	if (folio_test_swapcache(folio))
5090 		folio_free_swap(folio);
5091 	folio_unlock(folio);
5092 out_release:
5093 	folio_put(folio);
5094 	if (folio != swapcache) {
5095 		folio_unlock(swapcache);
5096 		folio_put(swapcache);
5097 	}
5098 	if (si)
5099 		put_swap_device(si);
5100 	return ret;
5101 }
5102 
5103 static bool pte_range_none(pte_t *pte, int nr_pages)
5104 {
5105 	int i;
5106 
5107 	for (i = 0; i < nr_pages; i++) {
5108 		if (!pte_none(ptep_get_lockless(pte + i)))
5109 			return false;
5110 	}
5111 
5112 	return true;
5113 }
5114 
5115 static struct folio *alloc_anon_folio(struct vm_fault *vmf)
5116 {
5117 	struct vm_area_struct *vma = vmf->vma;
5118 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5119 	unsigned long orders;
5120 	struct folio *folio;
5121 	unsigned long addr;
5122 	pte_t *pte;
5123 	gfp_t gfp;
5124 	int order;
5125 
5126 	/*
5127 	 * If uffd is active for the vma we need per-page fault fidelity to
5128 	 * maintain the uffd semantics.
5129 	 */
5130 	if (unlikely(userfaultfd_armed(vma)))
5131 		goto fallback;
5132 
5133 	/*
5134 	 * Get a list of all the (large) orders below PMD_ORDER that are enabled
5135 	 * for this vma. Then filter out the orders that can't be allocated over
5136 	 * the faulting address and still be fully contained in the vma.
5137 	 */
5138 	orders = thp_vma_allowable_orders(vma, vma->vm_flags, TVA_PAGEFAULT,
5139 					  BIT(PMD_ORDER) - 1);
5140 	orders = thp_vma_suitable_orders(vma, vmf->address, orders);
5141 
5142 	if (!orders)
5143 		goto fallback;
5144 
5145 	pte = pte_offset_map(vmf->pmd, vmf->address & PMD_MASK);
5146 	if (!pte)
5147 		return ERR_PTR(-EAGAIN);
5148 
5149 	/*
5150 	 * Find the highest order where the aligned range is completely
5151 	 * pte_none(). Note that all remaining orders will be completely
5152 	 * pte_none().
5153 	 */
5154 	order = highest_order(orders);
5155 	while (orders) {
5156 		addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
5157 		if (pte_range_none(pte + pte_index(addr), 1 << order))
5158 			break;
5159 		order = next_order(&orders, order);
5160 	}
5161 
5162 	pte_unmap(pte);
5163 
5164 	if (!orders)
5165 		goto fallback;
5166 
5167 	/* Try allocating the highest of the remaining orders. */
5168 	gfp = vma_thp_gfp_mask(vma);
5169 	while (orders) {
5170 		addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
5171 		folio = vma_alloc_folio(gfp, order, vma, addr);
5172 		if (folio) {
5173 			if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
5174 				count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
5175 				folio_put(folio);
5176 				goto next;
5177 			}
5178 			folio_throttle_swaprate(folio, gfp);
5179 			/*
5180 			 * When a folio is not zeroed during allocation
5181 			 * (__GFP_ZERO not used) or user folios require special
5182 			 * handling, folio_zero_user() is used to make sure
5183 			 * that the page corresponding to the faulting address
5184 			 * will be hot in the cache after zeroing.
5185 			 */
5186 			if (user_alloc_needs_zeroing())
5187 				folio_zero_user(folio, vmf->address);
5188 			return folio;
5189 		}
5190 next:
5191 		count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK);
5192 		order = next_order(&orders, order);
5193 	}
5194 
5195 fallback:
5196 #endif
5197 	return folio_prealloc(vma->vm_mm, vma, vmf->address, true);
5198 }
5199 
5200 void map_anon_folio_pte_nopf(struct folio *folio, pte_t *pte,
5201 		struct vm_area_struct *vma, unsigned long addr,
5202 		bool uffd_wp)
5203 {
5204 	const unsigned int nr_pages = folio_nr_pages(folio);
5205 	pte_t entry = folio_mk_pte(folio, vma->vm_page_prot);
5206 
5207 	entry = pte_sw_mkyoung(entry);
5208 
5209 	if (vma->vm_flags & VM_WRITE)
5210 		entry = pte_mkwrite(pte_mkdirty(entry), vma);
5211 	if (uffd_wp)
5212 		entry = pte_mkuffd_wp(entry);
5213 
5214 	folio_ref_add(folio, nr_pages - 1);
5215 	folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
5216 	folio_add_lru_vma(folio, vma);
5217 	set_ptes(vma->vm_mm, addr, pte, entry, nr_pages);
5218 	update_mmu_cache_range(NULL, vma, addr, pte, nr_pages);
5219 }
5220 
5221 static void map_anon_folio_pte_pf(struct folio *folio, pte_t *pte,
5222 		struct vm_area_struct *vma, unsigned long addr, bool uffd_wp)
5223 {
5224 	const unsigned int order = folio_order(folio);
5225 
5226 	map_anon_folio_pte_nopf(folio, pte, vma, addr, uffd_wp);
5227 	add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1L << order);
5228 	count_mthp_stat(order, MTHP_STAT_ANON_FAULT_ALLOC);
5229 }
5230 
5231 /*
5232  * We enter with non-exclusive mmap_lock (to exclude vma changes,
5233  * but allow concurrent faults), and pte mapped but not yet locked.
5234  * We return with mmap_lock still held, but pte unmapped and unlocked.
5235  */
5236 static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
5237 {
5238 	struct vm_area_struct *vma = vmf->vma;
5239 	unsigned long addr = vmf->address;
5240 	struct folio *folio;
5241 	vm_fault_t ret = 0;
5242 	int nr_pages;
5243 	pte_t entry;
5244 
5245 	/* File mapping without ->vm_ops ? */
5246 	if (vma->vm_flags & VM_SHARED)
5247 		return VM_FAULT_SIGBUS;
5248 
5249 	/*
5250 	 * Use pte_alloc() instead of pte_alloc_map(), so that OOM can
5251 	 * be distinguished from a transient failure of pte_offset_map().
5252 	 */
5253 	if (pte_alloc(vma->vm_mm, vmf->pmd))
5254 		return VM_FAULT_OOM;
5255 
5256 	/* Use the zero-page for reads */
5257 	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
5258 			!mm_forbids_zeropage(vma->vm_mm)) {
5259 		entry = pte_mkspecial(pfn_pte(zero_pfn(vmf->address),
5260 						vma->vm_page_prot));
5261 		vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
5262 				vmf->address, &vmf->ptl);
5263 		if (!vmf->pte)
5264 			goto unlock;
5265 		if (vmf_pte_changed(vmf)) {
5266 			update_mmu_tlb(vma, vmf->address, vmf->pte);
5267 			goto unlock;
5268 		}
5269 		ret = check_stable_address_space(vma->vm_mm);
5270 		if (ret)
5271 			goto unlock;
5272 		/* Deliver the page fault to userland, check inside PT lock */
5273 		if (userfaultfd_missing(vma)) {
5274 			pte_unmap_unlock(vmf->pte, vmf->ptl);
5275 			return handle_userfault(vmf, VM_UFFD_MISSING);
5276 		}
5277 		if (vmf_orig_pte_uffd_wp(vmf))
5278 			entry = pte_mkuffd_wp(entry);
5279 		set_pte_at(vma->vm_mm, addr, vmf->pte, entry);
5280 
5281 		/* No need to invalidate - it was non-present before */
5282 		update_mmu_cache(vma, addr, vmf->pte);
5283 		goto unlock;
5284 	}
5285 
5286 	/* Allocate our own private page. */
5287 	ret = vmf_anon_prepare(vmf);
5288 	if (ret)
5289 		return ret;
5290 	/* Returns NULL on OOM or ERR_PTR(-EAGAIN) if we must retry the fault */
5291 	folio = alloc_anon_folio(vmf);
5292 	if (IS_ERR(folio))
5293 		return 0;
5294 	if (!folio)
5295 		goto oom;
5296 
5297 	nr_pages = folio_nr_pages(folio);
5298 	addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE);
5299 
5300 	/*
5301 	 * The memory barrier inside __folio_mark_uptodate makes sure that
5302 	 * preceding stores to the page contents become visible before
5303 	 * the set_pte_at() write.
5304 	 */
5305 	__folio_mark_uptodate(folio);
5306 
5307 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
5308 	if (!vmf->pte)
5309 		goto release;
5310 	if (nr_pages == 1 && vmf_pte_changed(vmf)) {
5311 		update_mmu_tlb(vma, addr, vmf->pte);
5312 		goto release;
5313 	} else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) {
5314 		update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages);
5315 		goto release;
5316 	}
5317 
5318 	ret = check_stable_address_space(vma->vm_mm);
5319 	if (ret)
5320 		goto release;
5321 
5322 	/* Deliver the page fault to userland, check inside PT lock */
5323 	if (userfaultfd_missing(vma)) {
5324 		pte_unmap_unlock(vmf->pte, vmf->ptl);
5325 		folio_put(folio);
5326 		return handle_userfault(vmf, VM_UFFD_MISSING);
5327 	}
5328 	map_anon_folio_pte_pf(folio, vmf->pte, vma, addr,
5329 			      vmf_orig_pte_uffd_wp(vmf));
5330 unlock:
5331 	if (vmf->pte)
5332 		pte_unmap_unlock(vmf->pte, vmf->ptl);
5333 	return ret;
5334 release:
5335 	folio_put(folio);
5336 	goto unlock;
5337 oom:
5338 	return VM_FAULT_OOM;
5339 }
5340 
5341 /*
5342  * The mmap_lock must have been held on entry, and may have been
5343  * released depending on flags and vma->vm_ops->fault() return value.
5344  * See filemap_fault() and __lock_page_retry().
5345  */
5346 static vm_fault_t __do_fault(struct vm_fault *vmf)
5347 {
5348 	struct vm_area_struct *vma = vmf->vma;
5349 	struct folio *folio;
5350 	vm_fault_t ret;
5351 
5352 	/*
5353 	 * Preallocate pte before we take page_lock because this might lead to
5354 	 * deadlocks for memcg reclaim which waits for pages under writeback:
5355 	 *				lock_page(A)
5356 	 *				SetPageWriteback(A)
5357 	 *				unlock_page(A)
5358 	 * lock_page(B)
5359 	 *				lock_page(B)
5360 	 * pte_alloc_one
5361 	 *   shrink_folio_list
5362 	 *     wait_on_page_writeback(A)
5363 	 *				SetPageWriteback(B)
5364 	 *				unlock_page(B)
5365 	 *				# flush A, B to clear the writeback
5366 	 */
5367 	if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
5368 		vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
5369 		if (!vmf->prealloc_pte)
5370 			return VM_FAULT_OOM;
5371 	}
5372 
5373 	ret = vma->vm_ops->fault(vmf);
5374 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
5375 			    VM_FAULT_DONE_COW)))
5376 		return ret;
5377 
5378 	folio = page_folio(vmf->page);
5379 	if (unlikely(PageHWPoison(vmf->page))) {
5380 		vm_fault_t poisonret = VM_FAULT_HWPOISON;
5381 		if (ret & VM_FAULT_LOCKED) {
5382 			if (page_mapped(vmf->page))
5383 				unmap_mapping_folio(folio);
5384 			/* Retry if a clean folio was removed from the cache. */
5385 			if (mapping_evict_folio(folio->mapping, folio))
5386 				poisonret = VM_FAULT_NOPAGE;
5387 			folio_unlock(folio);
5388 		}
5389 		folio_put(folio);
5390 		vmf->page = NULL;
5391 		return poisonret;
5392 	}
5393 
5394 	if (unlikely(!(ret & VM_FAULT_LOCKED)))
5395 		folio_lock(folio);
5396 	else
5397 		VM_BUG_ON_PAGE(!folio_test_locked(folio), vmf->page);
5398 
5399 	return ret;
5400 }
5401 
5402 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5403 static void deposit_prealloc_pte(struct vm_fault *vmf)
5404 {
5405 	struct vm_area_struct *vma = vmf->vma;
5406 
5407 	pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
5408 	/*
5409 	 * We are going to consume the prealloc table,
5410 	 * count that as nr_ptes.
5411 	 */
5412 	mm_inc_nr_ptes(vma->vm_mm);
5413 	vmf->prealloc_pte = NULL;
5414 }
5415 
5416 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *page)
5417 {
5418 	struct vm_area_struct *vma = vmf->vma;
5419 	bool write = vmf->flags & FAULT_FLAG_WRITE;
5420 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
5421 	pmd_t entry;
5422 	vm_fault_t ret = VM_FAULT_FALLBACK;
5423 
5424 	/*
5425 	 * It is too late to allocate a small folio, we already have a large
5426 	 * folio in the pagecache: especially s390 KVM cannot tolerate any
5427 	 * PMD mappings, but PTE-mapped THP are fine. So let's simply refuse any
5428 	 * PMD mappings if THPs are disabled. As we already have a THP,
5429 	 * behave as if we are forcing a collapse.
5430 	 */
5431 	if (thp_disabled_by_hw() || vma_thp_disabled(vma, vma->vm_flags,
5432 						     /* forced_collapse=*/ true))
5433 		return ret;
5434 
5435 	if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
5436 		return ret;
5437 
5438 	if (!is_pmd_order(folio_order(folio)))
5439 		return ret;
5440 	page = &folio->page;
5441 
5442 	/*
5443 	 * Just backoff if any subpage of a THP is corrupted otherwise
5444 	 * the corrupted page may mapped by PMD silently to escape the
5445 	 * check.  This kind of THP just can be PTE mapped.  Access to
5446 	 * the corrupted subpage should trigger SIGBUS as expected.
5447 	 */
5448 	if (unlikely(folio_test_has_hwpoisoned(folio)))
5449 		return ret;
5450 
5451 	/*
5452 	 * Archs like ppc64 need additional space to store information
5453 	 * related to pte entry. Use the preallocated table for that.
5454 	 */
5455 	if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
5456 		vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
5457 		if (!vmf->prealloc_pte)
5458 			return VM_FAULT_OOM;
5459 	}
5460 
5461 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
5462 	if (unlikely(!pmd_none(*vmf->pmd)))
5463 		goto out;
5464 
5465 	flush_icache_pages(vma, page, HPAGE_PMD_NR);
5466 
5467 	entry = folio_mk_pmd(folio, vma->vm_page_prot);
5468 	if (write)
5469 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
5470 
5471 	add_mm_counter(vma->vm_mm, mm_counter_file(folio), HPAGE_PMD_NR);
5472 	folio_add_file_rmap_pmd(folio, page, vma);
5473 
5474 	/*
5475 	 * deposit and withdraw with pmd lock held
5476 	 */
5477 	if (arch_needs_pgtable_deposit())
5478 		deposit_prealloc_pte(vmf);
5479 
5480 	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
5481 
5482 	update_mmu_cache_pmd(vma, haddr, vmf->pmd);
5483 
5484 	/* fault is handled */
5485 	ret = 0;
5486 	count_vm_event(THP_FILE_MAPPED);
5487 out:
5488 	spin_unlock(vmf->ptl);
5489 	return ret;
5490 }
5491 #else
5492 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *page)
5493 {
5494 	return VM_FAULT_FALLBACK;
5495 }
5496 #endif
5497 
5498 /**
5499  * set_pte_range - Set a range of PTEs to point to pages in a folio.
5500  * @vmf: Fault description.
5501  * @folio: The folio that contains @page.
5502  * @page: The first page to create a PTE for.
5503  * @nr: The number of PTEs to create.
5504  * @addr: The first address to create a PTE for.
5505  */
5506 void set_pte_range(struct vm_fault *vmf, struct folio *folio,
5507 		struct page *page, unsigned int nr, unsigned long addr)
5508 {
5509 	struct vm_area_struct *vma = vmf->vma;
5510 	bool write = vmf->flags & FAULT_FLAG_WRITE;
5511 	bool prefault = !in_range(vmf->address, addr, nr * PAGE_SIZE);
5512 	pte_t entry;
5513 
5514 	flush_icache_pages(vma, page, nr);
5515 	entry = mk_pte(page, vma->vm_page_prot);
5516 
5517 	if (prefault && arch_wants_old_prefaulted_pte())
5518 		entry = pte_mkold(entry);
5519 	else
5520 		entry = pte_sw_mkyoung(entry);
5521 
5522 	if (write)
5523 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
5524 	else if (pte_write(entry) && folio_test_dirty(folio))
5525 		entry = pte_mkdirty(entry);
5526 	if (unlikely(vmf_orig_pte_uffd_wp(vmf)))
5527 		entry = pte_mkuffd_wp(entry);
5528 	/* copy-on-write page */
5529 	if (write && !(vma->vm_flags & VM_SHARED)) {
5530 		VM_BUG_ON_FOLIO(nr != 1, folio);
5531 		folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
5532 		folio_add_lru_vma(folio, vma);
5533 	} else {
5534 		folio_add_file_rmap_ptes(folio, page, nr, vma);
5535 	}
5536 	set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr);
5537 
5538 	/* no need to invalidate: a not-present page won't be cached */
5539 	update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr);
5540 }
5541 
5542 static bool vmf_pte_changed(struct vm_fault *vmf)
5543 {
5544 	if (vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)
5545 		return !pte_same(ptep_get(vmf->pte), vmf->orig_pte);
5546 
5547 	return !pte_none(ptep_get(vmf->pte));
5548 }
5549 
5550 /**
5551  * finish_fault - finish page fault once we have prepared the page to fault
5552  *
5553  * @vmf: structure describing the fault
5554  *
5555  * This function handles all that is needed to finish a page fault once the
5556  * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
5557  * given page, adds reverse page mapping, handles memcg charges and LRU
5558  * addition.
5559  *
5560  * The function expects the page to be locked and on success it consumes a
5561  * reference of a page being mapped (for the PTE which maps it).
5562  *
5563  * Return: %0 on success, %VM_FAULT_ code in case of error.
5564  */
5565 vm_fault_t finish_fault(struct vm_fault *vmf)
5566 {
5567 	struct vm_area_struct *vma = vmf->vma;
5568 	struct page *page;
5569 	struct folio *folio;
5570 	vm_fault_t ret;
5571 	bool is_cow = (vmf->flags & FAULT_FLAG_WRITE) &&
5572 		      !(vma->vm_flags & VM_SHARED);
5573 	int type, nr_pages;
5574 	unsigned long addr;
5575 	bool needs_fallback = false;
5576 
5577 fallback:
5578 	addr = vmf->address;
5579 
5580 	/* Did we COW the page? */
5581 	if (is_cow)
5582 		page = vmf->cow_page;
5583 	else
5584 		page = vmf->page;
5585 
5586 	folio = page_folio(page);
5587 	/*
5588 	 * check even for read faults because we might have lost our CoWed
5589 	 * page
5590 	 */
5591 	if (!(vma->vm_flags & VM_SHARED)) {
5592 		ret = check_stable_address_space(vma->vm_mm);
5593 		if (ret)
5594 			return ret;
5595 	}
5596 
5597 	if (!needs_fallback && vma->vm_file) {
5598 		struct address_space *mapping = vma->vm_file->f_mapping;
5599 		pgoff_t file_end;
5600 
5601 		file_end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
5602 
5603 		/*
5604 		 * Do not allow to map with PTEs beyond i_size and with PMD
5605 		 * across i_size to preserve SIGBUS semantics.
5606 		 *
5607 		 * Make an exception for shmem/tmpfs that for long time
5608 		 * intentionally mapped with PMDs across i_size.
5609 		 */
5610 		needs_fallback = !shmem_mapping(mapping) &&
5611 			file_end < folio_next_index(folio);
5612 	}
5613 
5614 	if (pmd_none(*vmf->pmd)) {
5615 		if (!needs_fallback && folio_test_pmd_mappable(folio)) {
5616 			ret = do_set_pmd(vmf, folio, page);
5617 			if (ret != VM_FAULT_FALLBACK)
5618 				return ret;
5619 		}
5620 
5621 		if (vmf->prealloc_pte)
5622 			pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte);
5623 		else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd)))
5624 			return VM_FAULT_OOM;
5625 	}
5626 
5627 	nr_pages = folio_nr_pages(folio);
5628 
5629 	/* Using per-page fault to maintain the uffd semantics */
5630 	if (unlikely(userfaultfd_armed(vma)) || unlikely(needs_fallback)) {
5631 		nr_pages = 1;
5632 	} else if (nr_pages > 1) {
5633 		pgoff_t idx = folio_page_idx(folio, page);
5634 		/* The page offset of vmf->address within the VMA. */
5635 		pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff;
5636 		/* The index of the entry in the pagetable for fault page. */
5637 		pgoff_t pte_off = pte_index(vmf->address);
5638 
5639 		/*
5640 		 * Fallback to per-page fault in case the folio size in page
5641 		 * cache beyond the VMA limits and PMD pagetable limits.
5642 		 */
5643 		if (unlikely(vma_off < idx ||
5644 			    vma_off + (nr_pages - idx) > vma_pages(vma) ||
5645 			    pte_off < idx ||
5646 			    pte_off + (nr_pages - idx)  > PTRS_PER_PTE)) {
5647 			nr_pages = 1;
5648 		} else {
5649 			/* Now we can set mappings for the whole large folio. */
5650 			addr = vmf->address - idx * PAGE_SIZE;
5651 			page = &folio->page;
5652 		}
5653 	}
5654 
5655 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
5656 				       addr, &vmf->ptl);
5657 	if (!vmf->pte)
5658 		return VM_FAULT_NOPAGE;
5659 
5660 	/* Re-check under ptl */
5661 	if (nr_pages == 1 && unlikely(vmf_pte_changed(vmf))) {
5662 		update_mmu_tlb(vma, addr, vmf->pte);
5663 		ret = VM_FAULT_NOPAGE;
5664 		goto unlock;
5665 	} else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) {
5666 		needs_fallback = true;
5667 		pte_unmap_unlock(vmf->pte, vmf->ptl);
5668 		goto fallback;
5669 	}
5670 
5671 	folio_ref_add(folio, nr_pages - 1);
5672 	set_pte_range(vmf, folio, page, nr_pages, addr);
5673 	type = is_cow ? MM_ANONPAGES : mm_counter_file(folio);
5674 	add_mm_counter(vma->vm_mm, type, nr_pages);
5675 	ret = 0;
5676 
5677 unlock:
5678 	pte_unmap_unlock(vmf->pte, vmf->ptl);
5679 	return ret;
5680 }
5681 
5682 static unsigned long fault_around_pages __read_mostly =
5683 	65536 >> PAGE_SHIFT;
5684 
5685 #ifdef CONFIG_DEBUG_FS
5686 static int fault_around_bytes_get(void *data, u64 *val)
5687 {
5688 	*val = fault_around_pages << PAGE_SHIFT;
5689 	return 0;
5690 }
5691 
5692 /*
5693  * fault_around_bytes must be rounded down to the nearest page order as it's
5694  * what do_fault_around() expects to see.
5695  */
5696 static int fault_around_bytes_set(void *data, u64 val)
5697 {
5698 	if (val / PAGE_SIZE > PTRS_PER_PTE)
5699 		return -EINVAL;
5700 
5701 	/*
5702 	 * The minimum value is 1 page, however this results in no fault-around
5703 	 * at all. See should_fault_around().
5704 	 */
5705 	val = max(val, PAGE_SIZE);
5706 	fault_around_pages = rounddown_pow_of_two(val) >> PAGE_SHIFT;
5707 
5708 	return 0;
5709 }
5710 DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
5711 		fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
5712 
5713 static int __init fault_around_debugfs(void)
5714 {
5715 	debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
5716 				   &fault_around_bytes_fops);
5717 	return 0;
5718 }
5719 late_initcall(fault_around_debugfs);
5720 #endif
5721 
5722 /*
5723  * do_fault_around() tries to map few pages around the fault address. The hope
5724  * is that the pages will be needed soon and this will lower the number of
5725  * faults to handle.
5726  *
5727  * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
5728  * not ready to be mapped: not up-to-date, locked, etc.
5729  *
5730  * This function doesn't cross VMA or page table boundaries, in order to call
5731  * map_pages() and acquire a PTE lock only once.
5732  *
5733  * fault_around_pages defines how many pages we'll try to map.
5734  * do_fault_around() expects it to be set to a power of two less than or equal
5735  * to PTRS_PER_PTE.
5736  *
5737  * The virtual address of the area that we map is naturally aligned to
5738  * fault_around_pages * PAGE_SIZE rounded down to the machine page size
5739  * (and therefore to page order).  This way it's easier to guarantee
5740  * that we don't cross page table boundaries.
5741  */
5742 static vm_fault_t do_fault_around(struct vm_fault *vmf)
5743 {
5744 	pgoff_t nr_pages = READ_ONCE(fault_around_pages);
5745 	pgoff_t pte_off = pte_index(vmf->address);
5746 	/* The page offset of vmf->address within the VMA. */
5747 	pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff;
5748 	pgoff_t from_pte, to_pte;
5749 	vm_fault_t ret;
5750 
5751 	/* The PTE offset of the start address, clamped to the VMA. */
5752 	from_pte = max(ALIGN_DOWN(pte_off, nr_pages),
5753 		       pte_off - min(pte_off, vma_off));
5754 
5755 	/* The PTE offset of the end address, clamped to the VMA and PTE. */
5756 	to_pte = min3(from_pte + nr_pages, (pgoff_t)PTRS_PER_PTE,
5757 		      pte_off + vma_pages(vmf->vma) - vma_off) - 1;
5758 
5759 	if (pmd_none(*vmf->pmd)) {
5760 		vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
5761 		if (!vmf->prealloc_pte)
5762 			return VM_FAULT_OOM;
5763 	}
5764 
5765 	rcu_read_lock();
5766 	ret = vmf->vma->vm_ops->map_pages(vmf,
5767 			vmf->pgoff + from_pte - pte_off,
5768 			vmf->pgoff + to_pte - pte_off);
5769 	rcu_read_unlock();
5770 
5771 	return ret;
5772 }
5773 
5774 /* Return true if we should do read fault-around, false otherwise */
5775 static inline bool should_fault_around(struct vm_fault *vmf)
5776 {
5777 	/* No ->map_pages?  No way to fault around... */
5778 	if (!vmf->vma->vm_ops->map_pages)
5779 		return false;
5780 
5781 	if (uffd_disable_fault_around(vmf->vma))
5782 		return false;
5783 
5784 	/* A single page implies no faulting 'around' at all. */
5785 	return fault_around_pages > 1;
5786 }
5787 
5788 static vm_fault_t do_read_fault(struct vm_fault *vmf)
5789 {
5790 	vm_fault_t ret = 0;
5791 	struct folio *folio;
5792 
5793 	/*
5794 	 * Let's call ->map_pages() first and use ->fault() as fallback
5795 	 * if page by the offset is not ready to be mapped (cold cache or
5796 	 * something).
5797 	 */
5798 	if (should_fault_around(vmf)) {
5799 		ret = do_fault_around(vmf);
5800 		if (ret)
5801 			return ret;
5802 	}
5803 
5804 	ret = vmf_can_call_fault(vmf);
5805 	if (ret)
5806 		return ret;
5807 
5808 	ret = __do_fault(vmf);
5809 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5810 		return ret;
5811 
5812 	ret |= finish_fault(vmf);
5813 	folio = page_folio(vmf->page);
5814 	folio_unlock(folio);
5815 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5816 		folio_put(folio);
5817 	return ret;
5818 }
5819 
5820 static vm_fault_t do_cow_fault(struct vm_fault *vmf)
5821 {
5822 	struct vm_area_struct *vma = vmf->vma;
5823 	struct folio *folio;
5824 	vm_fault_t ret;
5825 
5826 	ret = vmf_can_call_fault(vmf);
5827 	if (!ret)
5828 		ret = vmf_anon_prepare(vmf);
5829 	if (ret)
5830 		return ret;
5831 
5832 	folio = folio_prealloc(vma->vm_mm, vma, vmf->address, false);
5833 	if (!folio)
5834 		return VM_FAULT_OOM;
5835 
5836 	vmf->cow_page = &folio->page;
5837 
5838 	ret = __do_fault(vmf);
5839 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5840 		goto uncharge_out;
5841 	if (ret & VM_FAULT_DONE_COW)
5842 		return ret;
5843 
5844 	if (copy_mc_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma)) {
5845 		ret = VM_FAULT_HWPOISON;
5846 		goto unlock;
5847 	}
5848 	__folio_mark_uptodate(folio);
5849 
5850 	ret |= finish_fault(vmf);
5851 unlock:
5852 	unlock_page(vmf->page);
5853 	put_page(vmf->page);
5854 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5855 		goto uncharge_out;
5856 	return ret;
5857 uncharge_out:
5858 	folio_put(folio);
5859 	return ret;
5860 }
5861 
5862 static vm_fault_t do_shared_fault(struct vm_fault *vmf)
5863 {
5864 	struct vm_area_struct *vma = vmf->vma;
5865 	vm_fault_t ret, tmp;
5866 	struct folio *folio;
5867 
5868 	ret = vmf_can_call_fault(vmf);
5869 	if (ret)
5870 		return ret;
5871 
5872 	ret = __do_fault(vmf);
5873 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5874 		return ret;
5875 
5876 	folio = page_folio(vmf->page);
5877 
5878 	/*
5879 	 * Check if the backing address space wants to know that the page is
5880 	 * about to become writable
5881 	 */
5882 	if (vma->vm_ops->page_mkwrite) {
5883 		folio_unlock(folio);
5884 		tmp = do_page_mkwrite(vmf, folio);
5885 		if (unlikely(!tmp ||
5886 				(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
5887 			folio_put(folio);
5888 			return tmp;
5889 		}
5890 	}
5891 
5892 	ret |= finish_fault(vmf);
5893 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
5894 					VM_FAULT_RETRY))) {
5895 		folio_unlock(folio);
5896 		folio_put(folio);
5897 		return ret;
5898 	}
5899 
5900 	ret |= fault_dirty_shared_page(vmf);
5901 	return ret;
5902 }
5903 
5904 /*
5905  * We enter with non-exclusive mmap_lock (to exclude vma changes,
5906  * but allow concurrent faults).
5907  * The mmap_lock may have been released depending on flags and our
5908  * return value.  See filemap_fault() and __folio_lock_or_retry().
5909  * If mmap_lock is released, vma may become invalid (for example
5910  * by other thread calling munmap()).
5911  */
5912 static vm_fault_t do_fault(struct vm_fault *vmf)
5913 {
5914 	struct vm_area_struct *vma = vmf->vma;
5915 	struct mm_struct *vm_mm = vma->vm_mm;
5916 	vm_fault_t ret;
5917 
5918 	/*
5919 	 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
5920 	 */
5921 	if (!vma->vm_ops->fault) {
5922 		vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
5923 					       vmf->address, &vmf->ptl);
5924 		if (unlikely(!vmf->pte))
5925 			ret = VM_FAULT_SIGBUS;
5926 		else {
5927 			/*
5928 			 * Make sure this is not a temporary clearing of pte
5929 			 * by holding ptl and checking again. A R/M/W update
5930 			 * of pte involves: take ptl, clearing the pte so that
5931 			 * we don't have concurrent modification by hardware
5932 			 * followed by an update.
5933 			 */
5934 			if (unlikely(pte_none(ptep_get(vmf->pte))))
5935 				ret = VM_FAULT_SIGBUS;
5936 			else
5937 				ret = VM_FAULT_NOPAGE;
5938 
5939 			pte_unmap_unlock(vmf->pte, vmf->ptl);
5940 		}
5941 	} else if (!(vmf->flags & FAULT_FLAG_WRITE))
5942 		ret = do_read_fault(vmf);
5943 	else if (!(vma->vm_flags & VM_SHARED))
5944 		ret = do_cow_fault(vmf);
5945 	else
5946 		ret = do_shared_fault(vmf);
5947 
5948 	/* preallocated pagetable is unused: free it */
5949 	if (vmf->prealloc_pte) {
5950 		pte_free(vm_mm, vmf->prealloc_pte);
5951 		vmf->prealloc_pte = NULL;
5952 	}
5953 	return ret;
5954 }
5955 
5956 int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
5957 		      unsigned long addr, int *flags,
5958 		      bool writable, int *last_cpupid)
5959 {
5960 	struct vm_area_struct *vma = vmf->vma;
5961 
5962 	/*
5963 	 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
5964 	 * much anyway since they can be in shared cache state. This misses
5965 	 * the case where a mapping is writable but the process never writes
5966 	 * to it but pte_write gets cleared during protection updates and
5967 	 * pte_dirty has unpredictable behaviour between PTE scan updates,
5968 	 * background writeback, dirty balancing and application behaviour.
5969 	 */
5970 	if (!writable)
5971 		*flags |= TNF_NO_GROUP;
5972 
5973 	/*
5974 	 * Flag if the folio is shared between multiple address spaces. This
5975 	 * is later used when determining whether to group tasks together
5976 	 */
5977 	if (folio_maybe_mapped_shared(folio) && (vma->vm_flags & VM_SHARED))
5978 		*flags |= TNF_SHARED;
5979 	/*
5980 	 * For memory tiering mode, cpupid of slow memory page is used
5981 	 * to record page access time.  So use default value.
5982 	 */
5983 	if (folio_use_access_time(folio))
5984 		*last_cpupid = (-1 & LAST_CPUPID_MASK);
5985 	else
5986 		*last_cpupid = folio_last_cpupid(folio);
5987 
5988 	/* Record the current PID accessing VMA */
5989 	vma_set_access_pid_bit(vma);
5990 
5991 	count_vm_numa_event(NUMA_HINT_FAULTS);
5992 #ifdef CONFIG_NUMA_BALANCING
5993 	count_memcg_folio_events(folio, NUMA_HINT_FAULTS, 1);
5994 #endif
5995 	if (folio_nid(folio) == numa_node_id()) {
5996 		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
5997 		*flags |= TNF_FAULT_LOCAL;
5998 	}
5999 
6000 	return mpol_misplaced(folio, vmf, addr);
6001 }
6002 
6003 static void numa_rebuild_single_mapping(struct vm_fault *vmf, struct vm_area_struct *vma,
6004 					unsigned long fault_addr, pte_t *fault_pte,
6005 					bool writable)
6006 {
6007 	pte_t pte, old_pte;
6008 
6009 	old_pte = ptep_modify_prot_start(vma, fault_addr, fault_pte);
6010 	pte = pte_modify(old_pte, vma->vm_page_prot);
6011 	pte = pte_mkyoung(pte);
6012 	if (writable)
6013 		pte = pte_mkwrite(pte, vma);
6014 	ptep_modify_prot_commit(vma, fault_addr, fault_pte, old_pte, pte);
6015 	update_mmu_cache_range(vmf, vma, fault_addr, fault_pte, 1);
6016 }
6017 
6018 static void numa_rebuild_large_mapping(struct vm_fault *vmf, struct vm_area_struct *vma,
6019 				       struct folio *folio, pte_t fault_pte,
6020 				       bool ignore_writable, bool pte_write_upgrade)
6021 {
6022 	int nr = pte_pfn(fault_pte) - folio_pfn(folio);
6023 	unsigned long start, end, addr = vmf->address;
6024 	unsigned long addr_start = addr - (nr << PAGE_SHIFT);
6025 	unsigned long pt_start = ALIGN_DOWN(addr, PMD_SIZE);
6026 	pte_t *start_ptep;
6027 
6028 	/* Stay within the VMA and within the page table. */
6029 	start = max3(addr_start, pt_start, vma->vm_start);
6030 	end = min3(addr_start + folio_size(folio), pt_start + PMD_SIZE,
6031 		   vma->vm_end);
6032 	start_ptep = vmf->pte - ((addr - start) >> PAGE_SHIFT);
6033 
6034 	/* Restore all PTEs' mapping of the large folio */
6035 	for (addr = start; addr != end; start_ptep++, addr += PAGE_SIZE) {
6036 		pte_t ptent = ptep_get(start_ptep);
6037 		bool writable = false;
6038 
6039 		if (!pte_present(ptent) || !pte_protnone(ptent))
6040 			continue;
6041 
6042 		if (pfn_folio(pte_pfn(ptent)) != folio)
6043 			continue;
6044 
6045 		if (!ignore_writable) {
6046 			ptent = pte_modify(ptent, vma->vm_page_prot);
6047 			writable = pte_write(ptent);
6048 			if (!writable && pte_write_upgrade &&
6049 			    can_change_pte_writable(vma, addr, ptent))
6050 				writable = true;
6051 		}
6052 
6053 		numa_rebuild_single_mapping(vmf, vma, addr, start_ptep, writable);
6054 	}
6055 }
6056 
6057 static vm_fault_t do_numa_page(struct vm_fault *vmf)
6058 {
6059 	struct vm_area_struct *vma = vmf->vma;
6060 	struct folio *folio = NULL;
6061 	int nid = NUMA_NO_NODE;
6062 	bool writable = false, ignore_writable = false;
6063 	bool pte_write_upgrade = vma_wants_manual_pte_write_upgrade(vma);
6064 	int last_cpupid;
6065 	int target_nid;
6066 	pte_t pte, old_pte;
6067 	int flags = 0, nr_pages;
6068 
6069 	/*
6070 	 * The pte cannot be used safely until we verify, while holding the page
6071 	 * table lock, that its contents have not changed during fault handling.
6072 	 */
6073 	spin_lock(vmf->ptl);
6074 	/* Read the live PTE from the page tables: */
6075 	old_pte = ptep_get(vmf->pte);
6076 
6077 	if (unlikely(!pte_same(old_pte, vmf->orig_pte))) {
6078 		pte_unmap_unlock(vmf->pte, vmf->ptl);
6079 		return 0;
6080 	}
6081 
6082 	pte = pte_modify(old_pte, vma->vm_page_prot);
6083 
6084 	/*
6085 	 * Detect now whether the PTE could be writable; this information
6086 	 * is only valid while holding the PT lock.
6087 	 */
6088 	writable = pte_write(pte);
6089 	if (!writable && pte_write_upgrade &&
6090 	    can_change_pte_writable(vma, vmf->address, pte))
6091 		writable = true;
6092 
6093 	folio = vm_normal_folio(vma, vmf->address, pte);
6094 	if (!folio || folio_is_zone_device(folio))
6095 		goto out_map;
6096 
6097 	nid = folio_nid(folio);
6098 	nr_pages = folio_nr_pages(folio);
6099 
6100 	target_nid = numa_migrate_check(folio, vmf, vmf->address, &flags,
6101 					writable, &last_cpupid);
6102 	if (target_nid == NUMA_NO_NODE)
6103 		goto out_map;
6104 	if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
6105 		flags |= TNF_MIGRATE_FAIL;
6106 		goto out_map;
6107 	}
6108 	/* The folio is isolated and isolation code holds a folio reference. */
6109 	pte_unmap_unlock(vmf->pte, vmf->ptl);
6110 	writable = false;
6111 	ignore_writable = true;
6112 
6113 	/* Migrate to the requested node */
6114 	if (!migrate_misplaced_folio(folio, target_nid)) {
6115 		nid = target_nid;
6116 		flags |= TNF_MIGRATED;
6117 		task_numa_fault(last_cpupid, nid, nr_pages, flags);
6118 		return 0;
6119 	}
6120 
6121 	flags |= TNF_MIGRATE_FAIL;
6122 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
6123 				       vmf->address, &vmf->ptl);
6124 	if (unlikely(!vmf->pte))
6125 		return 0;
6126 	if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
6127 		pte_unmap_unlock(vmf->pte, vmf->ptl);
6128 		return 0;
6129 	}
6130 out_map:
6131 	/*
6132 	 * Make it present again, depending on how arch implements
6133 	 * non-accessible ptes, some can allow access by kernel mode.
6134 	 */
6135 	if (folio && folio_test_large(folio))
6136 		numa_rebuild_large_mapping(vmf, vma, folio, pte, ignore_writable,
6137 					   pte_write_upgrade);
6138 	else
6139 		numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte,
6140 					    writable);
6141 	pte_unmap_unlock(vmf->pte, vmf->ptl);
6142 
6143 	if (nid != NUMA_NO_NODE)
6144 		task_numa_fault(last_cpupid, nid, nr_pages, flags);
6145 	return 0;
6146 }
6147 
6148 static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
6149 {
6150 	struct vm_area_struct *vma = vmf->vma;
6151 	if (vma_is_anonymous(vma))
6152 		return do_huge_pmd_anonymous_page(vmf);
6153 	if (vma->vm_ops->huge_fault)
6154 		return vma->vm_ops->huge_fault(vmf, PMD_ORDER);
6155 	return VM_FAULT_FALLBACK;
6156 }
6157 
6158 /* `inline' is required to avoid gcc 4.1.2 build error */
6159 static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf)
6160 {
6161 	struct vm_area_struct *vma = vmf->vma;
6162 	const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
6163 	vm_fault_t ret;
6164 
6165 	if (vma_is_anonymous(vma)) {
6166 		if (likely(!unshare) &&
6167 		    userfaultfd_huge_pmd_wp(vma, vmf->orig_pmd)) {
6168 			if (userfaultfd_wp_async(vmf->vma))
6169 				goto split;
6170 			return handle_userfault(vmf, VM_UFFD_WP);
6171 		}
6172 		return do_huge_pmd_wp_page(vmf);
6173 	}
6174 
6175 	if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
6176 		if (vma->vm_ops->huge_fault) {
6177 			ret = vma->vm_ops->huge_fault(vmf, PMD_ORDER);
6178 			if (!(ret & VM_FAULT_FALLBACK))
6179 				return ret;
6180 		}
6181 	}
6182 
6183 split:
6184 	/* COW or write-notify handled on pte level: split pmd. */
6185 	__split_huge_pmd(vma, vmf->pmd, vmf->address, false);
6186 
6187 	return VM_FAULT_FALLBACK;
6188 }
6189 
6190 static vm_fault_t create_huge_pud(struct vm_fault *vmf)
6191 {
6192 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&			\
6193 	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
6194 	struct vm_area_struct *vma = vmf->vma;
6195 	/* No support for anonymous transparent PUD pages yet */
6196 	if (vma_is_anonymous(vma))
6197 		return VM_FAULT_FALLBACK;
6198 	if (vma->vm_ops->huge_fault)
6199 		return vma->vm_ops->huge_fault(vmf, PUD_ORDER);
6200 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
6201 	return VM_FAULT_FALLBACK;
6202 }
6203 
6204 static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
6205 {
6206 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&			\
6207 	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
6208 	struct vm_area_struct *vma = vmf->vma;
6209 	vm_fault_t ret;
6210 
6211 	/* No support for anonymous transparent PUD pages yet */
6212 	if (vma_is_anonymous(vma))
6213 		goto split;
6214 	if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
6215 		if (vma->vm_ops->huge_fault) {
6216 			ret = vma->vm_ops->huge_fault(vmf, PUD_ORDER);
6217 			if (!(ret & VM_FAULT_FALLBACK))
6218 				return ret;
6219 		}
6220 	}
6221 split:
6222 	/* COW or write-notify not handled on PUD level: split pud.*/
6223 	__split_huge_pud(vma, vmf->pud, vmf->address);
6224 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
6225 	return VM_FAULT_FALLBACK;
6226 }
6227 
6228 /*
6229  * The page faults may be spurious because of the racy access to the
6230  * page table.  For example, a non-populated virtual page is accessed
6231  * on 2 CPUs simultaneously, thus the page faults are triggered on
6232  * both CPUs.  However, it's possible that one CPU (say CPU A) cannot
6233  * find the reason for the page fault if the other CPU (say CPU B) has
6234  * changed the page table before the PTE is checked on CPU A.  Most of
6235  * the time, the spurious page faults can be ignored safely.  However,
6236  * if the page fault is for the write access, it's possible that a
6237  * stale read-only TLB entry exists in the local CPU and needs to be
6238  * flushed on some architectures.  This is called the spurious page
6239  * fault fixing.
6240  *
6241  * Note: flush_tlb_fix_spurious_fault() is defined as flush_tlb_page()
6242  * by default and used as such on most architectures, while
6243  * flush_tlb_fix_spurious_fault_pmd() is defined as NOP by default and
6244  * used as such on most architectures.
6245  */
6246 static void fix_spurious_fault(struct vm_fault *vmf,
6247 			       enum pgtable_level ptlevel)
6248 {
6249 	/* Skip spurious TLB flush for retried page fault */
6250 	if (vmf->flags & FAULT_FLAG_TRIED)
6251 		return;
6252 	/*
6253 	 * This is needed only for protection faults but the arch code
6254 	 * is not yet telling us if this is a protection fault or not.
6255 	 * This still avoids useless tlb flushes for .text page faults
6256 	 * with threads.
6257 	 */
6258 	if (vmf->flags & FAULT_FLAG_WRITE) {
6259 		if (ptlevel == PGTABLE_LEVEL_PTE)
6260 			flush_tlb_fix_spurious_fault(vmf->vma, vmf->address,
6261 						     vmf->pte);
6262 		else
6263 			flush_tlb_fix_spurious_fault_pmd(vmf->vma, vmf->address,
6264 							 vmf->pmd);
6265 	}
6266 }
6267 /*
6268  * These routines also need to handle stuff like marking pages dirty
6269  * and/or accessed for architectures that don't do it in hardware (most
6270  * RISC architectures).  The early dirtying is also good on the i386.
6271  *
6272  * There is also a hook called "update_mmu_cache()" that architectures
6273  * with external mmu caches can use to update those (ie the Sparc or
6274  * PowerPC hashed page tables that act as extended TLBs).
6275  *
6276  * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow
6277  * concurrent faults).
6278  *
6279  * The mmap_lock may have been released depending on flags and our return value.
6280  * See filemap_fault() and __folio_lock_or_retry().
6281  */
6282 static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
6283 {
6284 	pte_t entry;
6285 
6286 	if (unlikely(pmd_none(*vmf->pmd))) {
6287 		/*
6288 		 * Leave __pte_alloc() until later: because vm_ops->fault may
6289 		 * want to allocate huge page, and if we expose page table
6290 		 * for an instant, it will be difficult to retract from
6291 		 * concurrent faults and from rmap lookups.
6292 		 */
6293 		vmf->pte = NULL;
6294 		vmf->flags &= ~FAULT_FLAG_ORIG_PTE_VALID;
6295 	} else {
6296 		pmd_t dummy_pmdval;
6297 
6298 		/*
6299 		 * A regular pmd is established and it can't morph into a huge
6300 		 * pmd by anon khugepaged, since that takes mmap_lock in write
6301 		 * mode; but shmem or file collapse to THP could still morph
6302 		 * it into a huge pmd: just retry later if so.
6303 		 *
6304 		 * Use the maywrite version to indicate that vmf->pte may be
6305 		 * modified, but since we will use pte_same() to detect the
6306 		 * change of the !pte_none() entry, there is no need to recheck
6307 		 * the pmdval. Here we choose to pass a dummy variable instead
6308 		 * of NULL, which helps new user think about why this place is
6309 		 * special.
6310 		 */
6311 		vmf->pte = pte_offset_map_rw_nolock(vmf->vma->vm_mm, vmf->pmd,
6312 						    vmf->address, &dummy_pmdval,
6313 						    &vmf->ptl);
6314 		if (unlikely(!vmf->pte))
6315 			return 0;
6316 		vmf->orig_pte = ptep_get_lockless(vmf->pte);
6317 		vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID;
6318 
6319 		if (pte_none(vmf->orig_pte)) {
6320 			pte_unmap(vmf->pte);
6321 			vmf->pte = NULL;
6322 		}
6323 	}
6324 
6325 	if (!vmf->pte)
6326 		return do_pte_missing(vmf);
6327 
6328 	if (!pte_present(vmf->orig_pte))
6329 		return do_swap_page(vmf);
6330 
6331 	if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
6332 		return do_numa_page(vmf);
6333 
6334 	spin_lock(vmf->ptl);
6335 	entry = vmf->orig_pte;
6336 	if (unlikely(!pte_same(ptep_get(vmf->pte), entry))) {
6337 		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
6338 		goto unlock;
6339 	}
6340 	if (vmf->flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
6341 		if (!pte_write(entry))
6342 			return do_wp_page(vmf);
6343 		else if (likely(vmf->flags & FAULT_FLAG_WRITE))
6344 			entry = pte_mkdirty(entry);
6345 	}
6346 	entry = pte_mkyoung(entry);
6347 	if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
6348 				vmf->flags & FAULT_FLAG_WRITE))
6349 		update_mmu_cache_range(vmf, vmf->vma, vmf->address,
6350 				vmf->pte, 1);
6351 	else
6352 		fix_spurious_fault(vmf, PGTABLE_LEVEL_PTE);
6353 unlock:
6354 	pte_unmap_unlock(vmf->pte, vmf->ptl);
6355 	return 0;
6356 }
6357 
6358 /*
6359  * On entry, we hold either the VMA lock or the mmap_lock
6360  * (FAULT_FLAG_VMA_LOCK tells you which).  If VM_FAULT_RETRY is set in
6361  * the result, the mmap_lock is not held on exit.  See filemap_fault()
6362  * and __folio_lock_or_retry().
6363  */
6364 static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
6365 		unsigned long address, unsigned int flags)
6366 {
6367 	struct vm_fault vmf = {
6368 		.vma = vma,
6369 		.address = address & PAGE_MASK,
6370 		.real_address = address,
6371 		.flags = flags,
6372 		.pgoff = linear_page_index(vma, address),
6373 		.gfp_mask = __get_fault_gfp_mask(vma),
6374 	};
6375 	struct mm_struct *mm = vma->vm_mm;
6376 	vm_flags_t vm_flags = vma->vm_flags;
6377 	pgd_t *pgd;
6378 	p4d_t *p4d;
6379 	vm_fault_t ret;
6380 
6381 	pgd = pgd_offset(mm, address);
6382 	p4d = p4d_alloc(mm, pgd, address);
6383 	if (!p4d)
6384 		return VM_FAULT_OOM;
6385 
6386 	vmf.pud = pud_alloc(mm, p4d, address);
6387 	if (!vmf.pud)
6388 		return VM_FAULT_OOM;
6389 retry_pud:
6390 	if (pud_none(*vmf.pud) &&
6391 	    thp_vma_allowable_order(vma, vm_flags, TVA_PAGEFAULT, PUD_ORDER)) {
6392 		ret = create_huge_pud(&vmf);
6393 		if (!(ret & VM_FAULT_FALLBACK))
6394 			return ret;
6395 	} else {
6396 		pud_t orig_pud = *vmf.pud;
6397 
6398 		barrier();
6399 		if (pud_trans_huge(orig_pud)) {
6400 
6401 			/*
6402 			 * TODO once we support anonymous PUDs: NUMA case and
6403 			 * FAULT_FLAG_UNSHARE handling.
6404 			 */
6405 			if ((flags & FAULT_FLAG_WRITE) && !pud_write(orig_pud)) {
6406 				ret = wp_huge_pud(&vmf, orig_pud);
6407 				if (!(ret & VM_FAULT_FALLBACK))
6408 					return ret;
6409 			} else {
6410 				huge_pud_set_accessed(&vmf, orig_pud);
6411 				return 0;
6412 			}
6413 		}
6414 	}
6415 
6416 	vmf.pmd = pmd_alloc(mm, vmf.pud, address);
6417 	if (!vmf.pmd)
6418 		return VM_FAULT_OOM;
6419 
6420 	/* Huge pud page fault raced with pmd_alloc? */
6421 	if (pud_trans_unstable(vmf.pud))
6422 		goto retry_pud;
6423 
6424 	if (pmd_none(*vmf.pmd) &&
6425 	    thp_vma_allowable_order(vma, vm_flags, TVA_PAGEFAULT, PMD_ORDER)) {
6426 		ret = create_huge_pmd(&vmf);
6427 		if (ret & VM_FAULT_FALLBACK)
6428 			goto fallback;
6429 		else
6430 			return ret;
6431 	}
6432 
6433 	vmf.orig_pmd = pmdp_get_lockless(vmf.pmd);
6434 	if (pmd_none(vmf.orig_pmd))
6435 		goto fallback;
6436 
6437 	if (unlikely(!pmd_present(vmf.orig_pmd))) {
6438 		if (pmd_is_device_private_entry(vmf.orig_pmd))
6439 			return do_huge_pmd_device_private(&vmf);
6440 
6441 		if (pmd_is_migration_entry(vmf.orig_pmd))
6442 			pmd_migration_entry_wait(mm, vmf.pmd);
6443 		return 0;
6444 	}
6445 	if (pmd_trans_huge(vmf.orig_pmd)) {
6446 		if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma))
6447 			return do_huge_pmd_numa_page(&vmf);
6448 
6449 		if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
6450 		    !pmd_write(vmf.orig_pmd)) {
6451 			ret = wp_huge_pmd(&vmf);
6452 			if (!(ret & VM_FAULT_FALLBACK))
6453 				return ret;
6454 		} else {
6455 			vmf.ptl = pmd_lock(mm, vmf.pmd);
6456 			if (!huge_pmd_set_accessed(&vmf))
6457 				fix_spurious_fault(&vmf, PGTABLE_LEVEL_PMD);
6458 			spin_unlock(vmf.ptl);
6459 			return 0;
6460 		}
6461 	}
6462 
6463 fallback:
6464 	return handle_pte_fault(&vmf);
6465 }
6466 
6467 /**
6468  * mm_account_fault - Do page fault accounting
6469  * @mm: mm from which memcg should be extracted. It can be NULL.
6470  * @regs: the pt_regs struct pointer.  When set to NULL, will skip accounting
6471  *        of perf event counters, but we'll still do the per-task accounting to
6472  *        the task who triggered this page fault.
6473  * @address: the faulted address.
6474  * @flags: the fault flags.
6475  * @ret: the fault retcode.
6476  *
6477  * This will take care of most of the page fault accounting.  Meanwhile, it
6478  * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter
6479  * updates.  However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should
6480  * still be in per-arch page fault handlers at the entry of page fault.
6481  */
6482 static inline void mm_account_fault(struct mm_struct *mm, struct pt_regs *regs,
6483 				    unsigned long address, unsigned int flags,
6484 				    vm_fault_t ret)
6485 {
6486 	bool major;
6487 
6488 	/* Incomplete faults will be accounted upon completion. */
6489 	if (ret & VM_FAULT_RETRY)
6490 		return;
6491 
6492 	/*
6493 	 * To preserve the behavior of older kernels, PGFAULT counters record
6494 	 * both successful and failed faults, as opposed to perf counters,
6495 	 * which ignore failed cases.
6496 	 */
6497 	count_vm_event(PGFAULT);
6498 	count_memcg_event_mm(mm, PGFAULT);
6499 
6500 	/*
6501 	 * Do not account for unsuccessful faults (e.g. when the address wasn't
6502 	 * valid).  That includes arch_vma_access_permitted() failing before
6503 	 * reaching here. So this is not a "this many hardware page faults"
6504 	 * counter.  We should use the hw profiling for that.
6505 	 */
6506 	if (ret & VM_FAULT_ERROR)
6507 		return;
6508 
6509 	/*
6510 	 * We define the fault as a major fault when the final successful fault
6511 	 * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't
6512 	 * handle it immediately previously).
6513 	 */
6514 	major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED);
6515 
6516 	if (major)
6517 		current->maj_flt++;
6518 	else
6519 		current->min_flt++;
6520 
6521 	/*
6522 	 * If the fault is done for GUP, regs will be NULL.  We only do the
6523 	 * accounting for the per thread fault counters who triggered the
6524 	 * fault, and we skip the perf event updates.
6525 	 */
6526 	if (!regs)
6527 		return;
6528 
6529 	if (major)
6530 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
6531 	else
6532 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
6533 }
6534 
6535 #ifdef CONFIG_LRU_GEN
6536 static void lru_gen_enter_fault(struct vm_area_struct *vma)
6537 {
6538 	/* the LRU algorithm only applies to accesses with recency */
6539 	current->in_lru_fault = vma_has_recency(vma);
6540 }
6541 
6542 static void lru_gen_exit_fault(void)
6543 {
6544 	current->in_lru_fault = false;
6545 }
6546 #else
6547 static void lru_gen_enter_fault(struct vm_area_struct *vma)
6548 {
6549 }
6550 
6551 static void lru_gen_exit_fault(void)
6552 {
6553 }
6554 #endif /* CONFIG_LRU_GEN */
6555 
6556 static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma,
6557 				       unsigned int *flags)
6558 {
6559 	if (unlikely(*flags & FAULT_FLAG_UNSHARE)) {
6560 		if (WARN_ON_ONCE(*flags & FAULT_FLAG_WRITE))
6561 			return VM_FAULT_SIGSEGV;
6562 		/*
6563 		 * FAULT_FLAG_UNSHARE only applies to COW mappings. Let's
6564 		 * just treat it like an ordinary read-fault otherwise.
6565 		 */
6566 		if (!is_cow_mapping(vma->vm_flags))
6567 			*flags &= ~FAULT_FLAG_UNSHARE;
6568 	} else if (*flags & FAULT_FLAG_WRITE) {
6569 		/* Write faults on read-only mappings are impossible ... */
6570 		if (WARN_ON_ONCE(!(vma->vm_flags & VM_MAYWRITE)))
6571 			return VM_FAULT_SIGSEGV;
6572 		/* ... and FOLL_FORCE only applies to COW mappings. */
6573 		if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE) &&
6574 				 !is_cow_mapping(vma->vm_flags)))
6575 			return VM_FAULT_SIGSEGV;
6576 	}
6577 #ifdef CONFIG_PER_VMA_LOCK
6578 	/*
6579 	 * Per-VMA locks can't be used with FAULT_FLAG_RETRY_NOWAIT because of
6580 	 * the assumption that lock is dropped on VM_FAULT_RETRY.
6581 	 */
6582 	if (WARN_ON_ONCE((*flags &
6583 			(FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT)) ==
6584 			(FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT)))
6585 		return VM_FAULT_SIGSEGV;
6586 #endif
6587 
6588 	return 0;
6589 }
6590 
6591 /*
6592  * By the time we get here, we already hold either the VMA lock or the
6593  * mmap_lock (FAULT_FLAG_VMA_LOCK tells you which).
6594  *
6595  * The mmap_lock may have been released depending on flags and our
6596  * return value.  See filemap_fault() and __folio_lock_or_retry().
6597  */
6598 vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
6599 			   unsigned int flags, struct pt_regs *regs)
6600 {
6601 	/* If the fault handler drops the mmap_lock, vma may be freed */
6602 	struct mm_struct *mm = vma->vm_mm;
6603 	vm_fault_t ret;
6604 	bool is_droppable;
6605 
6606 	__set_current_state(TASK_RUNNING);
6607 
6608 	ret = sanitize_fault_flags(vma, &flags);
6609 	if (ret)
6610 		goto out;
6611 
6612 	if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
6613 					    flags & FAULT_FLAG_INSTRUCTION,
6614 					    flags & FAULT_FLAG_REMOTE)) {
6615 		ret = VM_FAULT_SIGSEGV;
6616 		goto out;
6617 	}
6618 
6619 	is_droppable = !!(vma->vm_flags & VM_DROPPABLE);
6620 
6621 	/*
6622 	 * Enable the memcg OOM handling for faults triggered in user
6623 	 * space.  Kernel faults are handled more gracefully.
6624 	 */
6625 	if (flags & FAULT_FLAG_USER)
6626 		mem_cgroup_enter_user_fault();
6627 
6628 	lru_gen_enter_fault(vma);
6629 
6630 	if (unlikely(is_vm_hugetlb_page(vma)))
6631 		ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
6632 	else
6633 		ret = __handle_mm_fault(vma, address, flags);
6634 
6635 	/*
6636 	 * Warning: It is no longer safe to dereference vma-> after this point,
6637 	 * because mmap_lock might have been dropped by __handle_mm_fault(), so
6638 	 * vma might be destroyed from underneath us.
6639 	 */
6640 
6641 	lru_gen_exit_fault();
6642 
6643 	/* If the mapping is droppable, then errors due to OOM aren't fatal. */
6644 	if (is_droppable)
6645 		ret &= ~VM_FAULT_OOM;
6646 
6647 	if (flags & FAULT_FLAG_USER) {
6648 		mem_cgroup_exit_user_fault();
6649 		/*
6650 		 * The task may have entered a memcg OOM situation but
6651 		 * if the allocation error was handled gracefully (no
6652 		 * VM_FAULT_OOM), there is no need to kill anything.
6653 		 * Just clean up the OOM state peacefully.
6654 		 */
6655 		if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
6656 			mem_cgroup_oom_synchronize(false);
6657 	}
6658 out:
6659 	mm_account_fault(mm, regs, address, flags, ret);
6660 
6661 	return ret;
6662 }
6663 EXPORT_SYMBOL_GPL(handle_mm_fault);
6664 
6665 #ifndef __PAGETABLE_P4D_FOLDED
6666 /*
6667  * Allocate p4d page table.
6668  * We've already handled the fast-path in-line.
6669  */
6670 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
6671 {
6672 	p4d_t *new = p4d_alloc_one(mm, address);
6673 	if (!new)
6674 		return -ENOMEM;
6675 
6676 	spin_lock(&mm->page_table_lock);
6677 	if (pgd_present(*pgd)) {	/* Another has populated it */
6678 		p4d_free(mm, new);
6679 	} else {
6680 		smp_wmb(); /* See comment in pmd_install() */
6681 		pgd_populate(mm, pgd, new);
6682 	}
6683 	spin_unlock(&mm->page_table_lock);
6684 	return 0;
6685 }
6686 #endif /* __PAGETABLE_P4D_FOLDED */
6687 
6688 #ifndef __PAGETABLE_PUD_FOLDED
6689 /*
6690  * Allocate page upper directory.
6691  * We've already handled the fast-path in-line.
6692  */
6693 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
6694 {
6695 	pud_t *new = pud_alloc_one(mm, address);
6696 	if (!new)
6697 		return -ENOMEM;
6698 
6699 	spin_lock(&mm->page_table_lock);
6700 	if (!p4d_present(*p4d)) {
6701 		mm_inc_nr_puds(mm);
6702 		smp_wmb(); /* See comment in pmd_install() */
6703 		p4d_populate(mm, p4d, new);
6704 	} else	/* Another has populated it */
6705 		pud_free(mm, new);
6706 	spin_unlock(&mm->page_table_lock);
6707 	return 0;
6708 }
6709 #endif /* __PAGETABLE_PUD_FOLDED */
6710 
6711 #ifndef __PAGETABLE_PMD_FOLDED
6712 /*
6713  * Allocate page middle directory.
6714  * We've already handled the fast-path in-line.
6715  */
6716 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
6717 {
6718 	spinlock_t *ptl;
6719 	pmd_t *new = pmd_alloc_one(mm, address);
6720 	if (!new)
6721 		return -ENOMEM;
6722 
6723 	ptl = pud_lock(mm, pud);
6724 	if (!pud_present(*pud)) {
6725 		mm_inc_nr_pmds(mm);
6726 		smp_wmb(); /* See comment in pmd_install() */
6727 		pud_populate(mm, pud, new);
6728 	} else {	/* Another has populated it */
6729 		pmd_free(mm, new);
6730 	}
6731 	spin_unlock(ptl);
6732 	return 0;
6733 }
6734 #endif /* __PAGETABLE_PMD_FOLDED */
6735 
6736 static inline void pfnmap_args_setup(struct follow_pfnmap_args *args,
6737 				     spinlock_t *lock, pte_t *ptep,
6738 				     pgprot_t pgprot, unsigned long pfn_base,
6739 				     unsigned long addr_mask, bool writable,
6740 				     bool special)
6741 {
6742 	args->lock = lock;
6743 	args->ptep = ptep;
6744 	args->pfn = pfn_base + ((args->address & ~addr_mask) >> PAGE_SHIFT);
6745 	args->addr_mask = addr_mask;
6746 	args->pgprot = pgprot;
6747 	args->writable = writable;
6748 	args->special = special;
6749 }
6750 
6751 static inline void pfnmap_lockdep_assert(struct vm_area_struct *vma)
6752 {
6753 #ifdef CONFIG_LOCKDEP
6754 	struct file *file = vma->vm_file;
6755 	struct address_space *mapping = file ? file->f_mapping : NULL;
6756 
6757 	if (mapping)
6758 		lockdep_assert(lockdep_is_held(&mapping->i_mmap_rwsem) ||
6759 			       lockdep_is_held(&vma->vm_mm->mmap_lock));
6760 	else
6761 		lockdep_assert(lockdep_is_held(&vma->vm_mm->mmap_lock));
6762 #endif
6763 }
6764 
6765 /**
6766  * follow_pfnmap_start() - Look up a pfn mapping at a user virtual address
6767  * @args: Pointer to struct @follow_pfnmap_args
6768  *
6769  * The caller needs to setup args->vma and args->address to point to the
6770  * virtual address as the target of such lookup.  On a successful return,
6771  * the results will be put into other output fields.
6772  *
6773  * After the caller finished using the fields, the caller must invoke
6774  * another follow_pfnmap_end() to proper releases the locks and resources
6775  * of such look up request.
6776  *
6777  * During the start() and end() calls, the results in @args will be valid
6778  * as proper locks will be held.  After the end() is called, all the fields
6779  * in @follow_pfnmap_args will be invalid to be further accessed.  Further
6780  * use of such information after end() may require proper synchronizations
6781  * by the caller with page table updates, otherwise it can create a
6782  * security bug.
6783  *
6784  * If the PTE maps a refcounted page, callers are responsible to protect
6785  * against invalidation with MMU notifiers; otherwise access to the PFN at
6786  * a later point in time can trigger use-after-free.
6787  *
6788  * Only IO mappings and raw PFN mappings are allowed.  The mmap semaphore
6789  * should be taken for read, and the mmap semaphore cannot be released
6790  * before the end() is invoked.
6791  *
6792  * This function must not be used to modify PTE content.
6793  *
6794  * Return: zero on success, negative otherwise.
6795  */
6796 int follow_pfnmap_start(struct follow_pfnmap_args *args)
6797 {
6798 	struct vm_area_struct *vma = args->vma;
6799 	unsigned long address = args->address;
6800 	struct mm_struct *mm = vma->vm_mm;
6801 	spinlock_t *lock;
6802 	pgd_t *pgdp;
6803 	p4d_t *p4dp, p4d;
6804 	pud_t *pudp, pud;
6805 	pmd_t *pmdp, pmd;
6806 	pte_t *ptep, pte;
6807 
6808 	pfnmap_lockdep_assert(vma);
6809 
6810 	if (unlikely(address < vma->vm_start || address >= vma->vm_end))
6811 		goto out;
6812 
6813 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
6814 		goto out;
6815 retry:
6816 	pgdp = pgd_offset(mm, address);
6817 	if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
6818 		goto out;
6819 
6820 	p4dp = p4d_offset(pgdp, address);
6821 	p4d = p4dp_get(p4dp);
6822 	if (p4d_none(p4d) || unlikely(p4d_bad(p4d)))
6823 		goto out;
6824 
6825 	pudp = pud_offset(p4dp, address);
6826 	pud = pudp_get(pudp);
6827 	if (!pud_present(pud))
6828 		goto out;
6829 	if (pud_leaf(pud)) {
6830 		lock = pud_lock(mm, pudp);
6831 		pud = pudp_get(pudp);
6832 
6833 		if (unlikely(!pud_present(pud))) {
6834 			spin_unlock(lock);
6835 			goto out;
6836 		} else if (unlikely(!pud_leaf(pud))) {
6837 			spin_unlock(lock);
6838 			goto retry;
6839 		}
6840 		pfnmap_args_setup(args, lock, NULL, pud_pgprot(pud),
6841 				  pud_pfn(pud), PUD_MASK, pud_write(pud),
6842 				  pud_special(pud));
6843 		return 0;
6844 	}
6845 
6846 	pmdp = pmd_offset(pudp, address);
6847 	pmd = pmdp_get_lockless(pmdp);
6848 	if (!pmd_present(pmd))
6849 		goto out;
6850 	if (pmd_leaf(pmd)) {
6851 		lock = pmd_lock(mm, pmdp);
6852 		pmd = pmdp_get(pmdp);
6853 
6854 		if (unlikely(!pmd_present(pmd))) {
6855 			spin_unlock(lock);
6856 			goto out;
6857 		} else if (unlikely(!pmd_leaf(pmd))) {
6858 			spin_unlock(lock);
6859 			goto retry;
6860 		}
6861 		pfnmap_args_setup(args, lock, NULL, pmd_pgprot(pmd),
6862 				  pmd_pfn(pmd), PMD_MASK, pmd_write(pmd),
6863 				  pmd_special(pmd));
6864 		return 0;
6865 	}
6866 
6867 	ptep = pte_offset_map_lock(mm, pmdp, address, &lock);
6868 	if (!ptep)
6869 		goto out;
6870 	pte = ptep_get(ptep);
6871 	if (!pte_present(pte))
6872 		goto unlock;
6873 	pfnmap_args_setup(args, lock, ptep, pte_pgprot(pte),
6874 			  pte_pfn(pte), PAGE_MASK, pte_write(pte),
6875 			  pte_special(pte));
6876 	return 0;
6877 unlock:
6878 	pte_unmap_unlock(ptep, lock);
6879 out:
6880 	return -EINVAL;
6881 }
6882 EXPORT_SYMBOL_GPL(follow_pfnmap_start);
6883 
6884 /**
6885  * follow_pfnmap_end(): End a follow_pfnmap_start() process
6886  * @args: Pointer to struct @follow_pfnmap_args
6887  *
6888  * Must be used in pair of follow_pfnmap_start().  See the start() function
6889  * above for more information.
6890  */
6891 void follow_pfnmap_end(struct follow_pfnmap_args *args)
6892 {
6893 	if (args->lock)
6894 		spin_unlock(args->lock);
6895 	if (args->ptep)
6896 		pte_unmap(args->ptep);
6897 }
6898 EXPORT_SYMBOL_GPL(follow_pfnmap_end);
6899 
6900 #ifdef CONFIG_HAVE_IOREMAP_PROT
6901 /**
6902  * generic_access_phys - generic implementation for iomem mmap access
6903  * @vma: the vma to access
6904  * @addr: userspace address, not relative offset within @vma
6905  * @buf: buffer to read/write
6906  * @len: length of transfer
6907  * @write: set to FOLL_WRITE when writing, otherwise reading
6908  *
6909  * This is a generic implementation for &vm_operations_struct.access for an
6910  * iomem mapping. This callback is used by access_process_vm() when the @vma is
6911  * not page based.
6912  */
6913 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
6914 			void *buf, int len, int write)
6915 {
6916 	resource_size_t phys_addr;
6917 	pgprot_t prot = __pgprot(0);
6918 	void __iomem *maddr;
6919 	int offset = offset_in_page(addr);
6920 	int ret = -EINVAL;
6921 	bool writable;
6922 	struct follow_pfnmap_args args = { .vma = vma, .address = addr };
6923 
6924 retry:
6925 	if (follow_pfnmap_start(&args))
6926 		return -EINVAL;
6927 	prot = args.pgprot;
6928 	phys_addr = (resource_size_t)args.pfn << PAGE_SHIFT;
6929 	writable = args.writable;
6930 	follow_pfnmap_end(&args);
6931 
6932 	if ((write & FOLL_WRITE) && !writable)
6933 		return -EINVAL;
6934 
6935 	maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
6936 	if (!maddr)
6937 		return -ENOMEM;
6938 
6939 	if (follow_pfnmap_start(&args))
6940 		goto out_unmap;
6941 
6942 	if ((pgprot_val(prot) != pgprot_val(args.pgprot)) ||
6943 	    (phys_addr != (args.pfn << PAGE_SHIFT)) ||
6944 	    (writable != args.writable)) {
6945 		follow_pfnmap_end(&args);
6946 		iounmap(maddr);
6947 		goto retry;
6948 	}
6949 
6950 	if (write)
6951 		memcpy_toio(maddr + offset, buf, len);
6952 	else
6953 		memcpy_fromio(buf, maddr + offset, len);
6954 	ret = len;
6955 	follow_pfnmap_end(&args);
6956 out_unmap:
6957 	iounmap(maddr);
6958 
6959 	return ret;
6960 }
6961 EXPORT_SYMBOL_GPL(generic_access_phys);
6962 #endif
6963 
6964 /*
6965  * Access another process' address space as given in mm.
6966  */
6967 static int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
6968 			      void *buf, int len, unsigned int gup_flags)
6969 {
6970 	void *old_buf = buf;
6971 	int write = gup_flags & FOLL_WRITE;
6972 
6973 	if (mmap_read_lock_killable(mm))
6974 		return 0;
6975 
6976 	/* Untag the address before looking up the VMA */
6977 	addr = untagged_addr_remote(mm, addr);
6978 
6979 	/* Avoid triggering the temporary warning in __get_user_pages */
6980 	if (!vma_lookup(mm, addr) && !expand_stack(mm, addr))
6981 		return 0;
6982 
6983 	/* ignore errors, just check how much was successfully transferred */
6984 	while (len) {
6985 		int bytes, offset;
6986 		void *maddr;
6987 		struct folio *folio;
6988 		struct vm_area_struct *vma = NULL;
6989 		struct page *page = get_user_page_vma_remote(mm, addr,
6990 							     gup_flags, &vma);
6991 
6992 		if (IS_ERR(page)) {
6993 			/* We might need to expand the stack to access it */
6994 			vma = vma_lookup(mm, addr);
6995 			if (!vma) {
6996 				vma = expand_stack(mm, addr);
6997 
6998 				/* mmap_lock was dropped on failure */
6999 				if (!vma)
7000 					return buf - old_buf;
7001 
7002 				/* Try again if stack expansion worked */
7003 				continue;
7004 			}
7005 
7006 			/*
7007 			 * Check if this is a VM_IO | VM_PFNMAP VMA, which
7008 			 * we can access using slightly different code.
7009 			 */
7010 			bytes = 0;
7011 #ifdef CONFIG_HAVE_IOREMAP_PROT
7012 			if (vma->vm_ops && vma->vm_ops->access)
7013 				bytes = vma->vm_ops->access(vma, addr, buf,
7014 							    len, write);
7015 #endif
7016 			if (bytes <= 0)
7017 				break;
7018 		} else {
7019 			folio = page_folio(page);
7020 			bytes = len;
7021 			offset = addr & (PAGE_SIZE-1);
7022 			if (bytes > PAGE_SIZE-offset)
7023 				bytes = PAGE_SIZE-offset;
7024 
7025 			maddr = kmap_local_folio(folio, folio_page_idx(folio, page) * PAGE_SIZE);
7026 			if (write) {
7027 				copy_to_user_page(vma, page, addr,
7028 						  maddr + offset, buf, bytes);
7029 				folio_mark_dirty_lock(folio);
7030 			} else {
7031 				copy_from_user_page(vma, page, addr,
7032 						    buf, maddr + offset, bytes);
7033 			}
7034 			folio_release_kmap(folio, maddr);
7035 		}
7036 		len -= bytes;
7037 		buf += bytes;
7038 		addr += bytes;
7039 	}
7040 	mmap_read_unlock(mm);
7041 
7042 	return buf - old_buf;
7043 }
7044 
7045 /**
7046  * access_remote_vm - access another process' address space
7047  * @mm:		the mm_struct of the target address space
7048  * @addr:	start address to access
7049  * @buf:	source or destination buffer
7050  * @len:	number of bytes to transfer
7051  * @gup_flags:	flags modifying lookup behaviour
7052  *
7053  * The caller must hold a reference on @mm.
7054  *
7055  * Return: number of bytes copied from source to destination.
7056  */
7057 int access_remote_vm(struct mm_struct *mm, unsigned long addr,
7058 		void *buf, int len, unsigned int gup_flags)
7059 {
7060 	return __access_remote_vm(mm, addr, buf, len, gup_flags);
7061 }
7062 
7063 /*
7064  * Access another process' address space.
7065  * Source/target buffer must be kernel space,
7066  * Do not walk the page table directly, use get_user_pages
7067  */
7068 int access_process_vm(struct task_struct *tsk, unsigned long addr,
7069 		void *buf, int len, unsigned int gup_flags)
7070 {
7071 	struct mm_struct *mm;
7072 	int ret;
7073 
7074 	mm = get_task_mm(tsk);
7075 	if (!mm)
7076 		return 0;
7077 
7078 	ret = __access_remote_vm(mm, addr, buf, len, gup_flags);
7079 
7080 	mmput(mm);
7081 
7082 	return ret;
7083 }
7084 EXPORT_SYMBOL_GPL(access_process_vm);
7085 
7086 #ifdef CONFIG_BPF_SYSCALL
7087 /*
7088  * Copy a string from another process's address space as given in mm.
7089  * If there is any error return -EFAULT.
7090  */
7091 static int __copy_remote_vm_str(struct mm_struct *mm, unsigned long addr,
7092 				void *buf, int len, unsigned int gup_flags)
7093 {
7094 	void *old_buf = buf;
7095 	int err = 0;
7096 
7097 	*(char *)buf = '\0';
7098 
7099 	if (mmap_read_lock_killable(mm))
7100 		return -EFAULT;
7101 
7102 	addr = untagged_addr_remote(mm, addr);
7103 
7104 	/* Avoid triggering the temporary warning in __get_user_pages */
7105 	if (!vma_lookup(mm, addr)) {
7106 		err = -EFAULT;
7107 		goto out;
7108 	}
7109 
7110 	while (len) {
7111 		int bytes, offset, retval;
7112 		void *maddr;
7113 		struct folio *folio;
7114 		struct page *page;
7115 		struct vm_area_struct *vma = NULL;
7116 
7117 		page = get_user_page_vma_remote(mm, addr, gup_flags, &vma);
7118 		if (IS_ERR(page)) {
7119 			/*
7120 			 * Treat as a total failure for now until we decide how
7121 			 * to handle the CONFIG_HAVE_IOREMAP_PROT case and
7122 			 * stack expansion.
7123 			 */
7124 			*(char *)buf = '\0';
7125 			err = -EFAULT;
7126 			goto out;
7127 		}
7128 
7129 		folio = page_folio(page);
7130 		bytes = len;
7131 		offset = addr & (PAGE_SIZE - 1);
7132 		if (bytes > PAGE_SIZE - offset)
7133 			bytes = PAGE_SIZE - offset;
7134 
7135 		maddr = kmap_local_folio(folio, folio_page_idx(folio, page) * PAGE_SIZE);
7136 		retval = strscpy(buf, maddr + offset, bytes);
7137 		if (retval >= 0) {
7138 			/* Found the end of the string */
7139 			buf += retval;
7140 			folio_release_kmap(folio, maddr);
7141 			break;
7142 		}
7143 
7144 		buf += bytes - 1;
7145 		/*
7146 		 * Because strscpy always NUL terminates we need to
7147 		 * copy the last byte in the page if we are going to
7148 		 * load more pages
7149 		 */
7150 		if (bytes != len) {
7151 			addr += bytes - 1;
7152 			copy_from_user_page(vma, page, addr, buf, maddr + (PAGE_SIZE - 1), 1);
7153 			buf += 1;
7154 			addr += 1;
7155 		}
7156 		len -= bytes;
7157 
7158 		folio_release_kmap(folio, maddr);
7159 	}
7160 
7161 out:
7162 	mmap_read_unlock(mm);
7163 	if (err)
7164 		return err;
7165 	return buf - old_buf;
7166 }
7167 
7168 /**
7169  * copy_remote_vm_str - copy a string from another process's address space.
7170  * @tsk:	the task of the target address space
7171  * @addr:	start address to read from
7172  * @buf:	destination buffer
7173  * @len:	number of bytes to copy
7174  * @gup_flags:	flags modifying lookup behaviour
7175  *
7176  * The caller must hold a reference on @mm.
7177  *
7178  * Return: number of bytes copied from @addr (source) to @buf (destination);
7179  * not including the trailing NUL. Always guaranteed to leave NUL-terminated
7180  * buffer. On any error, return -EFAULT.
7181  */
7182 int copy_remote_vm_str(struct task_struct *tsk, unsigned long addr,
7183 		       void *buf, int len, unsigned int gup_flags)
7184 {
7185 	struct mm_struct *mm;
7186 	int ret;
7187 
7188 	if (unlikely(len == 0))
7189 		return 0;
7190 
7191 	mm = get_task_mm(tsk);
7192 	if (!mm) {
7193 		*(char *)buf = '\0';
7194 		return -EFAULT;
7195 	}
7196 
7197 	ret = __copy_remote_vm_str(mm, addr, buf, len, gup_flags);
7198 
7199 	mmput(mm);
7200 
7201 	return ret;
7202 }
7203 EXPORT_SYMBOL_GPL(copy_remote_vm_str);
7204 #endif /* CONFIG_BPF_SYSCALL */
7205 
7206 /*
7207  * Print the name of a VMA.
7208  */
7209 void print_vma_addr(char *prefix, unsigned long ip)
7210 {
7211 	struct mm_struct *mm = current->mm;
7212 	struct vm_area_struct *vma;
7213 
7214 	/*
7215 	 * we might be running from an atomic context so we cannot sleep
7216 	 */
7217 	if (!mmap_read_trylock(mm))
7218 		return;
7219 
7220 	vma = vma_lookup(mm, ip);
7221 	if (vma && vma->vm_file) {
7222 		struct file *f = vma->vm_file;
7223 		ip -= vma->vm_start;
7224 		ip += vma->vm_pgoff << PAGE_SHIFT;
7225 		printk("%s%pD[%lx,%lx+%lx]", prefix, f, ip,
7226 				vma->vm_start,
7227 				vma->vm_end - vma->vm_start);
7228 	}
7229 	mmap_read_unlock(mm);
7230 }
7231 
7232 #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
7233 void __might_fault(const char *file, int line)
7234 {
7235 	if (pagefault_disabled())
7236 		return;
7237 	__might_sleep(file, line);
7238 	if (current->mm)
7239 		might_lock_read(&current->mm->mmap_lock);
7240 }
7241 EXPORT_SYMBOL(__might_fault);
7242 #endif
7243 
7244 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
7245 /*
7246  * Process all subpages of the specified huge page with the specified
7247  * operation.  The target subpage will be processed last to keep its
7248  * cache lines hot.
7249  */
7250 static inline int process_huge_page(
7251 	unsigned long addr_hint, unsigned int nr_pages,
7252 	int (*process_subpage)(unsigned long addr, int idx, void *arg),
7253 	void *arg)
7254 {
7255 	int i, n, base, l, ret;
7256 	unsigned long addr = addr_hint &
7257 		~(((unsigned long)nr_pages << PAGE_SHIFT) - 1);
7258 
7259 	/* Process target subpage last to keep its cache lines hot */
7260 	might_sleep();
7261 	n = (addr_hint - addr) / PAGE_SIZE;
7262 	if (2 * n <= nr_pages) {
7263 		/* If target subpage in first half of huge page */
7264 		base = 0;
7265 		l = n;
7266 		/* Process subpages at the end of huge page */
7267 		for (i = nr_pages - 1; i >= 2 * n; i--) {
7268 			cond_resched();
7269 			ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
7270 			if (ret)
7271 				return ret;
7272 		}
7273 	} else {
7274 		/* If target subpage in second half of huge page */
7275 		base = nr_pages - 2 * (nr_pages - n);
7276 		l = nr_pages - n;
7277 		/* Process subpages at the begin of huge page */
7278 		for (i = 0; i < base; i++) {
7279 			cond_resched();
7280 			ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
7281 			if (ret)
7282 				return ret;
7283 		}
7284 	}
7285 	/*
7286 	 * Process remaining subpages in left-right-left-right pattern
7287 	 * towards the target subpage
7288 	 */
7289 	for (i = 0; i < l; i++) {
7290 		int left_idx = base + i;
7291 		int right_idx = base + 2 * l - 1 - i;
7292 
7293 		cond_resched();
7294 		ret = process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
7295 		if (ret)
7296 			return ret;
7297 		cond_resched();
7298 		ret = process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
7299 		if (ret)
7300 			return ret;
7301 	}
7302 	return 0;
7303 }
7304 
7305 static void clear_contig_highpages(struct page *page, unsigned long addr,
7306 				   unsigned int nr_pages)
7307 {
7308 	unsigned int i, count;
7309 	/*
7310 	 * When clearing we want to operate on the largest extent possible to
7311 	 * allow for architecture specific extent based optimizations.
7312 	 *
7313 	 * However, since clear_user_highpages() (and primitives clear_user_pages(),
7314 	 * clear_pages()), do not call cond_resched(), limit the unit size when
7315 	 * running under non-preemptible scheduling models.
7316 	 */
7317 	const unsigned int unit = preempt_model_preemptible() ?
7318 				   nr_pages : PROCESS_PAGES_NON_PREEMPT_BATCH;
7319 
7320 	might_sleep();
7321 
7322 	for (i = 0; i < nr_pages; i += count) {
7323 		cond_resched();
7324 
7325 		count = min(unit, nr_pages - i);
7326 		clear_user_highpages(page + i, addr + i * PAGE_SIZE, count);
7327 	}
7328 }
7329 
7330 /*
7331  * When zeroing a folio, we want to differentiate between pages in the
7332  * vicinity of the faulting address where we have spatial and temporal
7333  * locality, and those far away where we don't.
7334  *
7335  * Use a radius of 2 for determining the local neighbourhood.
7336  */
7337 #define FOLIO_ZERO_LOCALITY_RADIUS	2
7338 
7339 /**
7340  * folio_zero_user - Zero a folio which will be mapped to userspace.
7341  * @folio: The folio to zero.
7342  * @addr_hint: The address accessed by the user or the base address.
7343  */
7344 void folio_zero_user(struct folio *folio, unsigned long addr_hint)
7345 {
7346 	const unsigned long base_addr = ALIGN_DOWN(addr_hint, folio_size(folio));
7347 	const long fault_idx = (addr_hint - base_addr) / PAGE_SIZE;
7348 	const struct range pg = DEFINE_RANGE(0, folio_nr_pages(folio) - 1);
7349 	const long radius = FOLIO_ZERO_LOCALITY_RADIUS;
7350 	struct range r[3];
7351 	int i;
7352 
7353 	/*
7354 	 * Faulting page and its immediate neighbourhood. Will be cleared at the
7355 	 * end to keep its cachelines hot.
7356 	 */
7357 	r[2] = DEFINE_RANGE(fault_idx - radius < (long)pg.start ? pg.start : fault_idx - radius,
7358 			    fault_idx + radius > (long)pg.end   ? pg.end   : fault_idx + radius);
7359 
7360 
7361 	/* Region to the left of the fault */
7362 	r[1] = DEFINE_RANGE(pg.start, r[2].start - 1);
7363 
7364 	/* Region to the right of the fault: always valid for the common fault_idx=0 case. */
7365 	r[0] = DEFINE_RANGE(r[2].end + 1, pg.end);
7366 
7367 	for (i = 0; i < ARRAY_SIZE(r); i++) {
7368 		const unsigned long addr = base_addr + r[i].start * PAGE_SIZE;
7369 		const long nr_pages = (long)range_len(&r[i]);
7370 		struct page *page = folio_page(folio, r[i].start);
7371 
7372 		if (nr_pages > 0)
7373 			clear_contig_highpages(page, addr, nr_pages);
7374 	}
7375 }
7376 
7377 static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
7378 				   unsigned long addr_hint,
7379 				   struct vm_area_struct *vma,
7380 				   unsigned int nr_pages)
7381 {
7382 	unsigned long addr = ALIGN_DOWN(addr_hint, folio_size(dst));
7383 	struct page *dst_page;
7384 	struct page *src_page;
7385 	int i;
7386 
7387 	for (i = 0; i < nr_pages; i++) {
7388 		dst_page = folio_page(dst, i);
7389 		src_page = folio_page(src, i);
7390 
7391 		cond_resched();
7392 		if (copy_mc_user_highpage(dst_page, src_page,
7393 					  addr + i*PAGE_SIZE, vma))
7394 			return -EHWPOISON;
7395 	}
7396 	return 0;
7397 }
7398 
7399 struct copy_subpage_arg {
7400 	struct folio *dst;
7401 	struct folio *src;
7402 	struct vm_area_struct *vma;
7403 };
7404 
7405 static int copy_subpage(unsigned long addr, int idx, void *arg)
7406 {
7407 	struct copy_subpage_arg *copy_arg = arg;
7408 	struct page *dst = folio_page(copy_arg->dst, idx);
7409 	struct page *src = folio_page(copy_arg->src, idx);
7410 
7411 	if (copy_mc_user_highpage(dst, src, addr, copy_arg->vma))
7412 		return -EHWPOISON;
7413 	return 0;
7414 }
7415 
7416 int copy_user_large_folio(struct folio *dst, struct folio *src,
7417 			  unsigned long addr_hint, struct vm_area_struct *vma)
7418 {
7419 	unsigned int nr_pages = folio_nr_pages(dst);
7420 	struct copy_subpage_arg arg = {
7421 		.dst = dst,
7422 		.src = src,
7423 		.vma = vma,
7424 	};
7425 
7426 	if (unlikely(nr_pages > MAX_ORDER_NR_PAGES))
7427 		return copy_user_gigantic_page(dst, src, addr_hint, vma, nr_pages);
7428 
7429 	return process_huge_page(addr_hint, nr_pages, copy_subpage, &arg);
7430 }
7431 
7432 long copy_folio_from_user(struct folio *dst_folio,
7433 			   const void __user *usr_src,
7434 			   bool allow_pagefault)
7435 {
7436 	void *kaddr;
7437 	unsigned long i, rc = 0;
7438 	unsigned int nr_pages = folio_nr_pages(dst_folio);
7439 	unsigned long ret_val = nr_pages * PAGE_SIZE;
7440 	struct page *subpage;
7441 
7442 	for (i = 0; i < nr_pages; i++) {
7443 		subpage = folio_page(dst_folio, i);
7444 		kaddr = kmap_local_page(subpage);
7445 		if (!allow_pagefault)
7446 			pagefault_disable();
7447 		rc = copy_from_user(kaddr, usr_src + i * PAGE_SIZE, PAGE_SIZE);
7448 		if (!allow_pagefault)
7449 			pagefault_enable();
7450 		kunmap_local(kaddr);
7451 
7452 		ret_val -= (PAGE_SIZE - rc);
7453 		if (rc)
7454 			break;
7455 
7456 		flush_dcache_page(subpage);
7457 
7458 		cond_resched();
7459 	}
7460 	return ret_val;
7461 }
7462 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
7463 
7464 #if defined(CONFIG_SPLIT_PTE_PTLOCKS) && ALLOC_SPLIT_PTLOCKS
7465 
7466 static struct kmem_cache *page_ptl_cachep;
7467 
7468 void __init ptlock_cache_init(void)
7469 {
7470 	page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
7471 			SLAB_PANIC, NULL);
7472 }
7473 
7474 bool ptlock_alloc(struct ptdesc *ptdesc)
7475 {
7476 	spinlock_t *ptl;
7477 
7478 	ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
7479 	if (!ptl)
7480 		return false;
7481 	ptdesc->ptl = ptl;
7482 	return true;
7483 }
7484 
7485 void ptlock_free(struct ptdesc *ptdesc)
7486 {
7487 	if (ptdesc->ptl)
7488 		kmem_cache_free(page_ptl_cachep, ptdesc->ptl);
7489 }
7490 #endif
7491 
7492 void vma_pgtable_walk_begin(struct vm_area_struct *vma)
7493 {
7494 	if (is_vm_hugetlb_page(vma))
7495 		hugetlb_vma_lock_read(vma);
7496 }
7497 
7498 void vma_pgtable_walk_end(struct vm_area_struct *vma)
7499 {
7500 	if (is_vm_hugetlb_page(vma))
7501 		hugetlb_vma_unlock_read(vma);
7502 }
7503