xref: /linux/mm/memory.c (revision 62c65fd740e979a3967db08971b93aefcec510d4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/memory.c
4  *
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  */
7 
8 /*
9  * demand-loading started 01.12.91 - seems it is high on the list of
10  * things wanted, and it should be easy to implement. - Linus
11  */
12 
13 /*
14  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
15  * pages started 02.12.91, seems to work. - Linus.
16  *
17  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
18  * would have taken more than the 6M I have free, but it worked well as
19  * far as I could see.
20  *
21  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
22  */
23 
24 /*
25  * Real VM (paging to/from disk) started 18.12.91. Much more work and
26  * thought has to go into this. Oh, well..
27  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
28  *		Found it. Everything seems to work now.
29  * 20.12.91  -  Ok, making the swap-device changeable like the root.
30  */
31 
32 /*
33  * 05.04.94  -  Multi-page memory management added for v1.1.
34  *              Idea by Alex Bligh (alex@cconcepts.co.uk)
35  *
36  * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
37  *		(Gerhard.Wichert@pdb.siemens.de)
38  *
39  * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
40  */
41 
42 #include <linux/kernel_stat.h>
43 #include <linux/mm.h>
44 #include <linux/mm_inline.h>
45 #include <linux/sched/mm.h>
46 #include <linux/sched/numa_balancing.h>
47 #include <linux/sched/task.h>
48 #include <linux/hugetlb.h>
49 #include <linux/mman.h>
50 #include <linux/swap.h>
51 #include <linux/highmem.h>
52 #include <linux/pagemap.h>
53 #include <linux/memremap.h>
54 #include <linux/kmsan.h>
55 #include <linux/ksm.h>
56 #include <linux/rmap.h>
57 #include <linux/export.h>
58 #include <linux/delayacct.h>
59 #include <linux/init.h>
60 #include <linux/writeback.h>
61 #include <linux/memcontrol.h>
62 #include <linux/mmu_notifier.h>
63 #include <linux/leafops.h>
64 #include <linux/elf.h>
65 #include <linux/gfp.h>
66 #include <linux/migrate.h>
67 #include <linux/string.h>
68 #include <linux/shmem_fs.h>
69 #include <linux/memory-tiers.h>
70 #include <linux/debugfs.h>
71 #include <linux/userfaultfd_k.h>
72 #include <linux/dax.h>
73 #include <linux/oom.h>
74 #include <linux/numa.h>
75 #include <linux/perf_event.h>
76 #include <linux/ptrace.h>
77 #include <linux/vmalloc.h>
78 #include <linux/sched/sysctl.h>
79 #include <linux/pgalloc.h>
80 #include <linux/uaccess.h>
81 
82 #include <trace/events/kmem.h>
83 
84 #include <asm/io.h>
85 #include <asm/mmu_context.h>
86 #include <asm/tlb.h>
87 #include <asm/tlbflush.h>
88 
89 #include "pgalloc-track.h"
90 #include "internal.h"
91 #include "swap.h"
92 
93 #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
94 #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
95 #endif
96 
97 static vm_fault_t do_fault(struct vm_fault *vmf);
98 static vm_fault_t do_anonymous_page(struct vm_fault *vmf);
99 static bool vmf_pte_changed(struct vm_fault *vmf);
100 
101 /*
102  * Return true if the original pte was a uffd-wp pte marker (so the pte was
103  * wr-protected).
104  */
105 static __always_inline bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf)
106 {
107 	if (!userfaultfd_wp(vmf->vma))
108 		return false;
109 	if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID))
110 		return false;
111 
112 	return pte_is_uffd_wp_marker(vmf->orig_pte);
113 }
114 
115 /*
116  * Randomize the address space (stacks, mmaps, brk, etc.).
117  *
118  * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
119  *   as ancient (libc5 based) binaries can segfault. )
120  */
121 int randomize_va_space __read_mostly =
122 #ifdef CONFIG_COMPAT_BRK
123 					1;
124 #else
125 					2;
126 #endif
127 
128 static const struct ctl_table mmu_sysctl_table[] = {
129 	{
130 		.procname	= "randomize_va_space",
131 		.data		= &randomize_va_space,
132 		.maxlen		= sizeof(int),
133 		.mode		= 0644,
134 		.proc_handler	= proc_dointvec,
135 	},
136 };
137 
138 static int __init init_mm_sysctl(void)
139 {
140 	register_sysctl_init("kernel", mmu_sysctl_table);
141 	return 0;
142 }
143 
144 subsys_initcall(init_mm_sysctl);
145 
146 #ifndef arch_wants_old_prefaulted_pte
147 static inline bool arch_wants_old_prefaulted_pte(void)
148 {
149 	/*
150 	 * Transitioning a PTE from 'old' to 'young' can be expensive on
151 	 * some architectures, even if it's performed in hardware. By
152 	 * default, "false" means prefaulted entries will be 'young'.
153 	 */
154 	return false;
155 }
156 #endif
157 
158 static int __init disable_randmaps(char *s)
159 {
160 	randomize_va_space = 0;
161 	return 1;
162 }
163 __setup("norandmaps", disable_randmaps);
164 
165 unsigned long highest_memmap_pfn __read_mostly;
166 
167 void mm_trace_rss_stat(struct mm_struct *mm, int member)
168 {
169 	trace_rss_stat(mm, member);
170 }
171 
172 /*
173  * Note: this doesn't free the actual pages themselves. That
174  * has been handled earlier when unmapping all the memory regions.
175  */
176 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
177 			   unsigned long addr)
178 {
179 	pgtable_t token = pmd_pgtable(*pmd);
180 	pmd_clear(pmd);
181 	pte_free_tlb(tlb, token, addr);
182 	mm_dec_nr_ptes(tlb->mm);
183 }
184 
185 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
186 				unsigned long addr, unsigned long end,
187 				unsigned long floor, unsigned long ceiling)
188 {
189 	pmd_t *pmd;
190 	unsigned long next;
191 	unsigned long start;
192 
193 	start = addr;
194 	pmd = pmd_offset(pud, addr);
195 	do {
196 		next = pmd_addr_end(addr, end);
197 		if (pmd_none_or_clear_bad(pmd))
198 			continue;
199 		free_pte_range(tlb, pmd, addr);
200 	} while (pmd++, addr = next, addr != end);
201 
202 	start &= PUD_MASK;
203 	if (start < floor)
204 		return;
205 	if (ceiling) {
206 		ceiling &= PUD_MASK;
207 		if (!ceiling)
208 			return;
209 	}
210 	if (end - 1 > ceiling - 1)
211 		return;
212 
213 	pmd = pmd_offset(pud, start);
214 	pud_clear(pud);
215 	pmd_free_tlb(tlb, pmd, start);
216 	mm_dec_nr_pmds(tlb->mm);
217 }
218 
219 static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
220 				unsigned long addr, unsigned long end,
221 				unsigned long floor, unsigned long ceiling)
222 {
223 	pud_t *pud;
224 	unsigned long next;
225 	unsigned long start;
226 
227 	start = addr;
228 	pud = pud_offset(p4d, addr);
229 	do {
230 		next = pud_addr_end(addr, end);
231 		if (pud_none_or_clear_bad(pud))
232 			continue;
233 		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
234 	} while (pud++, addr = next, addr != end);
235 
236 	start &= P4D_MASK;
237 	if (start < floor)
238 		return;
239 	if (ceiling) {
240 		ceiling &= P4D_MASK;
241 		if (!ceiling)
242 			return;
243 	}
244 	if (end - 1 > ceiling - 1)
245 		return;
246 
247 	pud = pud_offset(p4d, start);
248 	p4d_clear(p4d);
249 	pud_free_tlb(tlb, pud, start);
250 	mm_dec_nr_puds(tlb->mm);
251 }
252 
253 static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
254 				unsigned long addr, unsigned long end,
255 				unsigned long floor, unsigned long ceiling)
256 {
257 	p4d_t *p4d;
258 	unsigned long next;
259 	unsigned long start;
260 
261 	start = addr;
262 	p4d = p4d_offset(pgd, addr);
263 	do {
264 		next = p4d_addr_end(addr, end);
265 		if (p4d_none_or_clear_bad(p4d))
266 			continue;
267 		free_pud_range(tlb, p4d, addr, next, floor, ceiling);
268 	} while (p4d++, addr = next, addr != end);
269 
270 	start &= PGDIR_MASK;
271 	if (start < floor)
272 		return;
273 	if (ceiling) {
274 		ceiling &= PGDIR_MASK;
275 		if (!ceiling)
276 			return;
277 	}
278 	if (end - 1 > ceiling - 1)
279 		return;
280 
281 	p4d = p4d_offset(pgd, start);
282 	pgd_clear(pgd);
283 	p4d_free_tlb(tlb, p4d, start);
284 }
285 
286 /**
287  * free_pgd_range - Unmap and free page tables in the range
288  * @tlb: the mmu_gather containing pending TLB flush info
289  * @addr: virtual address start
290  * @end: virtual address end
291  * @floor: lowest address boundary
292  * @ceiling: highest address boundary
293  *
294  * This function tears down all user-level page tables in the
295  * specified virtual address range [@addr..@end). It is part of
296  * the memory unmap flow.
297  */
298 void free_pgd_range(struct mmu_gather *tlb,
299 			unsigned long addr, unsigned long end,
300 			unsigned long floor, unsigned long ceiling)
301 {
302 	pgd_t *pgd;
303 	unsigned long next;
304 
305 	/*
306 	 * The next few lines have given us lots of grief...
307 	 *
308 	 * Why are we testing PMD* at this top level?  Because often
309 	 * there will be no work to do at all, and we'd prefer not to
310 	 * go all the way down to the bottom just to discover that.
311 	 *
312 	 * Why all these "- 1"s?  Because 0 represents both the bottom
313 	 * of the address space and the top of it (using -1 for the
314 	 * top wouldn't help much: the masks would do the wrong thing).
315 	 * The rule is that addr 0 and floor 0 refer to the bottom of
316 	 * the address space, but end 0 and ceiling 0 refer to the top
317 	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
318 	 * that end 0 case should be mythical).
319 	 *
320 	 * Wherever addr is brought up or ceiling brought down, we must
321 	 * be careful to reject "the opposite 0" before it confuses the
322 	 * subsequent tests.  But what about where end is brought down
323 	 * by PMD_SIZE below? no, end can't go down to 0 there.
324 	 *
325 	 * Whereas we round start (addr) and ceiling down, by different
326 	 * masks at different levels, in order to test whether a table
327 	 * now has no other vmas using it, so can be freed, we don't
328 	 * bother to round floor or end up - the tests don't need that.
329 	 */
330 
331 	addr &= PMD_MASK;
332 	if (addr < floor) {
333 		addr += PMD_SIZE;
334 		if (!addr)
335 			return;
336 	}
337 	if (ceiling) {
338 		ceiling &= PMD_MASK;
339 		if (!ceiling)
340 			return;
341 	}
342 	if (end - 1 > ceiling - 1)
343 		end -= PMD_SIZE;
344 	if (addr > end - 1)
345 		return;
346 	/*
347 	 * We add page table cache pages with PAGE_SIZE,
348 	 * (see pte_free_tlb()), flush the tlb if we need
349 	 */
350 	tlb_change_page_size(tlb, PAGE_SIZE);
351 	pgd = pgd_offset(tlb->mm, addr);
352 	do {
353 		next = pgd_addr_end(addr, end);
354 		if (pgd_none_or_clear_bad(pgd))
355 			continue;
356 		free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
357 	} while (pgd++, addr = next, addr != end);
358 }
359 
360 /**
361  * free_pgtables() - Free a range of page tables
362  * @tlb: The mmu gather
363  * @unmap: The unmap_desc
364  *
365  * Note: pg_start and pg_end are provided to indicate the absolute range of the
366  * page tables that should be removed.  This can differ from the vma mappings on
367  * some archs that may have mappings that need to be removed outside the vmas.
368  * Note that the prev->vm_end and next->vm_start are often used.
369  *
370  * The vma_end differs from the pg_end when a dup_mmap() failed and the tree has
371  * unrelated data to the mm_struct being torn down.
372  */
373 void free_pgtables(struct mmu_gather *tlb, struct unmap_desc *unmap)
374 {
375 	struct unlink_vma_file_batch vb;
376 	struct ma_state *mas = unmap->mas;
377 	struct vm_area_struct *vma = unmap->first;
378 
379 	/*
380 	 * Note: USER_PGTABLES_CEILING may be passed as the value of pg_end and
381 	 * may be 0.  Underflow is expected in this case.  Otherwise the
382 	 * pagetable end is exclusive.  vma_end is exclusive.  The last vma
383 	 * address should never be larger than the pagetable end.
384 	 */
385 	WARN_ON_ONCE(unmap->vma_end - 1 > unmap->pg_end - 1);
386 
387 	tlb_free_vmas(tlb);
388 
389 	do {
390 		unsigned long addr = vma->vm_start;
391 		struct vm_area_struct *next;
392 
393 		next = mas_find(mas, unmap->tree_end - 1);
394 
395 		/*
396 		 * Hide vma from rmap and truncate_pagecache before freeing
397 		 * pgtables
398 		 */
399 		if (unmap->mm_wr_locked)
400 			vma_start_write(vma);
401 		unlink_anon_vmas(vma);
402 
403 		unlink_file_vma_batch_init(&vb);
404 		unlink_file_vma_batch_add(&vb, vma);
405 
406 		/*
407 		 * Optimization: gather nearby vmas into one call down
408 		 */
409 		while (next && next->vm_start <= vma->vm_end + PMD_SIZE) {
410 			vma = next;
411 			next = mas_find(mas, unmap->tree_end - 1);
412 			if (unmap->mm_wr_locked)
413 				vma_start_write(vma);
414 			unlink_anon_vmas(vma);
415 			unlink_file_vma_batch_add(&vb, vma);
416 		}
417 		unlink_file_vma_batch_final(&vb);
418 
419 		free_pgd_range(tlb, addr, vma->vm_end, unmap->pg_start,
420 			       next ? next->vm_start : unmap->pg_end);
421 		vma = next;
422 	} while (vma);
423 }
424 
425 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
426 {
427 	spinlock_t *ptl = pmd_lock(mm, pmd);
428 
429 	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
430 		mm_inc_nr_ptes(mm);
431 		/*
432 		 * Ensure all pte setup (eg. pte page lock and page clearing) are
433 		 * visible before the pte is made visible to other CPUs by being
434 		 * put into page tables.
435 		 *
436 		 * The other side of the story is the pointer chasing in the page
437 		 * table walking code (when walking the page table without locking;
438 		 * ie. most of the time). Fortunately, these data accesses consist
439 		 * of a chain of data-dependent loads, meaning most CPUs (alpha
440 		 * being the notable exception) will already guarantee loads are
441 		 * seen in-order. See the alpha page table accessors for the
442 		 * smp_rmb() barriers in page table walking code.
443 		 */
444 		smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
445 		pmd_populate(mm, pmd, *pte);
446 		*pte = NULL;
447 	}
448 	spin_unlock(ptl);
449 }
450 
451 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
452 {
453 	pgtable_t new = pte_alloc_one(mm);
454 	if (!new)
455 		return -ENOMEM;
456 
457 	pmd_install(mm, pmd, &new);
458 	if (new)
459 		pte_free(mm, new);
460 	return 0;
461 }
462 
463 int __pte_alloc_kernel(pmd_t *pmd)
464 {
465 	pte_t *new = pte_alloc_one_kernel(&init_mm);
466 	if (!new)
467 		return -ENOMEM;
468 
469 	spin_lock(&init_mm.page_table_lock);
470 	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
471 		smp_wmb(); /* See comment in pmd_install() */
472 		pmd_populate_kernel(&init_mm, pmd, new);
473 		new = NULL;
474 	}
475 	spin_unlock(&init_mm.page_table_lock);
476 	if (new)
477 		pte_free_kernel(&init_mm, new);
478 	return 0;
479 }
480 
481 static inline void init_rss_vec(int *rss)
482 {
483 	memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
484 }
485 
486 static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
487 {
488 	int i;
489 
490 	for (i = 0; i < NR_MM_COUNTERS; i++)
491 		if (rss[i])
492 			add_mm_counter(mm, i, rss[i]);
493 }
494 
495 static bool is_bad_page_map_ratelimited(void)
496 {
497 	static unsigned long resume;
498 	static unsigned long nr_shown;
499 	static unsigned long nr_unshown;
500 
501 	/*
502 	 * Allow a burst of 60 reports, then keep quiet for that minute;
503 	 * or allow a steady drip of one report per second.
504 	 */
505 	if (nr_shown == 60) {
506 		if (time_before(jiffies, resume)) {
507 			nr_unshown++;
508 			return true;
509 		}
510 		if (nr_unshown) {
511 			pr_alert("BUG: Bad page map: %lu messages suppressed\n",
512 				 nr_unshown);
513 			nr_unshown = 0;
514 		}
515 		nr_shown = 0;
516 	}
517 	if (nr_shown++ == 0)
518 		resume = jiffies + 60 * HZ;
519 	return false;
520 }
521 
522 static void __print_bad_page_map_pgtable(struct mm_struct *mm, unsigned long addr)
523 {
524 	unsigned long long pgdv, p4dv, pudv, pmdv;
525 	p4d_t p4d, *p4dp;
526 	pud_t pud, *pudp;
527 	pmd_t pmd, *pmdp;
528 	pgd_t *pgdp;
529 
530 	/*
531 	 * Although this looks like a fully lockless pgtable walk, it is not:
532 	 * see locking requirements for print_bad_page_map().
533 	 */
534 	pgdp = pgd_offset(mm, addr);
535 	pgdv = pgd_val(*pgdp);
536 
537 	if (!pgd_present(*pgdp) || pgd_leaf(*pgdp)) {
538 		pr_alert("pgd:%08llx\n", pgdv);
539 		return;
540 	}
541 
542 	p4dp = p4d_offset(pgdp, addr);
543 	p4d = p4dp_get(p4dp);
544 	p4dv = p4d_val(p4d);
545 
546 	if (!p4d_present(p4d) || p4d_leaf(p4d)) {
547 		pr_alert("pgd:%08llx p4d:%08llx\n", pgdv, p4dv);
548 		return;
549 	}
550 
551 	pudp = pud_offset(p4dp, addr);
552 	pud = pudp_get(pudp);
553 	pudv = pud_val(pud);
554 
555 	if (!pud_present(pud) || pud_leaf(pud)) {
556 		pr_alert("pgd:%08llx p4d:%08llx pud:%08llx\n", pgdv, p4dv, pudv);
557 		return;
558 	}
559 
560 	pmdp = pmd_offset(pudp, addr);
561 	pmd = pmdp_get(pmdp);
562 	pmdv = pmd_val(pmd);
563 
564 	/*
565 	 * Dumping the PTE would be nice, but it's tricky with CONFIG_HIGHPTE,
566 	 * because the table should already be mapped by the caller and
567 	 * doing another map would be bad. print_bad_page_map() should
568 	 * already take care of printing the PTE.
569 	 */
570 	pr_alert("pgd:%08llx p4d:%08llx pud:%08llx pmd:%08llx\n", pgdv,
571 		 p4dv, pudv, pmdv);
572 }
573 
574 /*
575  * This function is called to print an error when a bad page table entry (e.g.,
576  * corrupted page table entry) is found. For example, we might have a
577  * PFN-mapped pte in a region that doesn't allow it.
578  *
579  * The calling function must still handle the error.
580  *
581  * This function must be called during a proper page table walk, as it will
582  * re-walk the page table to dump information: the caller MUST prevent page
583  * table teardown (by holding mmap, vma or rmap lock) and MUST hold the leaf
584  * page table lock.
585  */
586 static void print_bad_page_map(struct vm_area_struct *vma,
587 		unsigned long addr, unsigned long long entry, struct page *page,
588 		enum pgtable_level level)
589 {
590 	struct address_space *mapping;
591 	pgoff_t index;
592 
593 	if (is_bad_page_map_ratelimited())
594 		return;
595 
596 	mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
597 	index = linear_page_index(vma, addr);
598 
599 	pr_alert("BUG: Bad page map in process %s  %s:%08llx", current->comm,
600 		 pgtable_level_to_str(level), entry);
601 	__print_bad_page_map_pgtable(vma->vm_mm, addr);
602 	if (page)
603 		dump_page(page, "bad page map");
604 	pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
605 		 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
606 	pr_alert("file:%pD fault:%ps mmap:%ps mmap_prepare: %ps read_folio:%ps\n",
607 		 vma->vm_file,
608 		 vma->vm_ops ? vma->vm_ops->fault : NULL,
609 		 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
610 		 vma->vm_file ? vma->vm_file->f_op->mmap_prepare : NULL,
611 		 mapping ? mapping->a_ops->read_folio : NULL);
612 	dump_stack();
613 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
614 }
615 #define print_bad_pte(vma, addr, pte, page) \
616 	print_bad_page_map(vma, addr, pte_val(pte), page, PGTABLE_LEVEL_PTE)
617 
618 /**
619  * __vm_normal_page() - Get the "struct page" associated with a page table entry.
620  * @vma: The VMA mapping the page table entry.
621  * @addr: The address where the page table entry is mapped.
622  * @pfn: The PFN stored in the page table entry.
623  * @special: Whether the page table entry is marked "special".
624  * @level: The page table level for error reporting purposes only.
625  * @entry: The page table entry value for error reporting purposes only.
626  *
627  * "Special" mappings do not wish to be associated with a "struct page" (either
628  * it doesn't exist, or it exists but they don't want to touch it). In this
629  * case, NULL is returned here. "Normal" mappings do have a struct page and
630  * are ordinarily refcounted.
631  *
632  * Page mappings of the shared zero folios are always considered "special", as
633  * they are not ordinarily refcounted: neither the refcount nor the mapcount
634  * of these folios is adjusted when mapping them into user page tables.
635  * Selected page table walkers (such as GUP) can still identify mappings of the
636  * shared zero folios and work with the underlying "struct page".
637  *
638  * There are 2 broad cases. Firstly, an architecture may define a "special"
639  * page table entry bit, such as pte_special(), in which case this function is
640  * trivial. Secondly, an architecture may not have a spare page table
641  * entry bit, which requires a more complicated scheme, described below.
642  *
643  * With CONFIG_FIND_NORMAL_PAGE, we might have the "special" bit set on
644  * page table entries that actually map "normal" pages: however, that page
645  * cannot be looked up through the PFN stored in the page table entry, but
646  * instead will be looked up through vm_ops->find_normal_page(). So far, this
647  * only applies to PTEs.
648  *
649  * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
650  * special mapping (even if there are underlying and valid "struct pages").
651  * COWed pages of a VM_PFNMAP are always normal.
652  *
653  * The way we recognize COWed pages within VM_PFNMAP mappings is through the
654  * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
655  * set, and the vm_pgoff will point to the first PFN mapped: thus every special
656  * mapping will always honor the rule
657  *
658  *	pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
659  *
660  * And for normal mappings this is false.
661  *
662  * This restricts such mappings to be a linear translation from virtual address
663  * to pfn. To get around this restriction, we allow arbitrary mappings so long
664  * as the vma is not a COW mapping; in that case, we know that all ptes are
665  * special (because none can have been COWed).
666  *
667  *
668  * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
669  *
670  * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
671  * page" backing, however the difference is that _all_ pages with a struct
672  * page (that is, those where pfn_valid is true, except the shared zero
673  * folios) are refcounted and considered normal pages by the VM.
674  *
675  * The disadvantage is that pages are refcounted (which can be slower and
676  * simply not an option for some PFNMAP users). The advantage is that we
677  * don't have to follow the strict linearity rule of PFNMAP mappings in
678  * order to support COWable mappings.
679  *
680  * Return: Returns the "struct page" if this is a "normal" mapping. Returns
681  *	   NULL if this is a "special" mapping.
682  */
683 static inline struct page *__vm_normal_page(struct vm_area_struct *vma,
684 		unsigned long addr, unsigned long pfn, bool special,
685 		unsigned long long entry, enum pgtable_level level)
686 {
687 	if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
688 		if (unlikely(special)) {
689 #ifdef CONFIG_FIND_NORMAL_PAGE
690 			if (vma->vm_ops && vma->vm_ops->find_normal_page)
691 				return vma->vm_ops->find_normal_page(vma, addr);
692 #endif /* CONFIG_FIND_NORMAL_PAGE */
693 			if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
694 				return NULL;
695 			if (is_zero_pfn(pfn) || is_huge_zero_pfn(pfn))
696 				return NULL;
697 
698 			print_bad_page_map(vma, addr, entry, NULL, level);
699 			return NULL;
700 		}
701 		/*
702 		 * With CONFIG_ARCH_HAS_PTE_SPECIAL, any special page table
703 		 * mappings (incl. shared zero folios) are marked accordingly.
704 		 */
705 	} else {
706 		if (unlikely(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))) {
707 			if (vma->vm_flags & VM_MIXEDMAP) {
708 				/* If it has a "struct page", it's "normal". */
709 				if (!pfn_valid(pfn))
710 					return NULL;
711 			} else {
712 				unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT;
713 
714 				/* Only CoW'ed anon folios are "normal". */
715 				if (pfn == vma->vm_pgoff + off)
716 					return NULL;
717 				if (!is_cow_mapping(vma->vm_flags))
718 					return NULL;
719 			}
720 		}
721 
722 		if (is_zero_pfn(pfn) || is_huge_zero_pfn(pfn))
723 			return NULL;
724 	}
725 
726 	if (unlikely(pfn > highest_memmap_pfn)) {
727 		/* Corrupted page table entry. */
728 		print_bad_page_map(vma, addr, entry, NULL, level);
729 		return NULL;
730 	}
731 	/*
732 	 * NOTE! We still have PageReserved() pages in the page tables.
733 	 * For example, VDSO mappings can cause them to exist.
734 	 */
735 	VM_WARN_ON_ONCE(is_zero_pfn(pfn) || is_huge_zero_pfn(pfn));
736 	return pfn_to_page(pfn);
737 }
738 
739 /**
740  * vm_normal_page() - Get the "struct page" associated with a PTE
741  * @vma: The VMA mapping the @pte.
742  * @addr: The address where the @pte is mapped.
743  * @pte: The PTE.
744  *
745  * Get the "struct page" associated with a PTE. See __vm_normal_page()
746  * for details on "normal" and "special" mappings.
747  *
748  * Return: Returns the "struct page" if this is a "normal" mapping. Returns
749  *	   NULL if this is a "special" mapping.
750  */
751 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
752 			    pte_t pte)
753 {
754 	return __vm_normal_page(vma, addr, pte_pfn(pte), pte_special(pte),
755 				pte_val(pte), PGTABLE_LEVEL_PTE);
756 }
757 
758 /**
759  * vm_normal_folio() - Get the "struct folio" associated with a PTE
760  * @vma: The VMA mapping the @pte.
761  * @addr: The address where the @pte is mapped.
762  * @pte: The PTE.
763  *
764  * Get the "struct folio" associated with a PTE. See __vm_normal_page()
765  * for details on "normal" and "special" mappings.
766  *
767  * Return: Returns the "struct folio" if this is a "normal" mapping. Returns
768  *	   NULL if this is a "special" mapping.
769  */
770 struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
771 			    pte_t pte)
772 {
773 	struct page *page = vm_normal_page(vma, addr, pte);
774 
775 	if (page)
776 		return page_folio(page);
777 	return NULL;
778 }
779 
780 #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
781 /**
782  * vm_normal_page_pmd() - Get the "struct page" associated with a PMD
783  * @vma: The VMA mapping the @pmd.
784  * @addr: The address where the @pmd is mapped.
785  * @pmd: The PMD.
786  *
787  * Get the "struct page" associated with a PTE. See __vm_normal_page()
788  * for details on "normal" and "special" mappings.
789  *
790  * Return: Returns the "struct page" if this is a "normal" mapping. Returns
791  *	   NULL if this is a "special" mapping.
792  */
793 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
794 				pmd_t pmd)
795 {
796 	return __vm_normal_page(vma, addr, pmd_pfn(pmd), pmd_special(pmd),
797 				pmd_val(pmd), PGTABLE_LEVEL_PMD);
798 }
799 
800 /**
801  * vm_normal_folio_pmd() - Get the "struct folio" associated with a PMD
802  * @vma: The VMA mapping the @pmd.
803  * @addr: The address where the @pmd is mapped.
804  * @pmd: The PMD.
805  *
806  * Get the "struct folio" associated with a PTE. See __vm_normal_page()
807  * for details on "normal" and "special" mappings.
808  *
809  * Return: Returns the "struct folio" if this is a "normal" mapping. Returns
810  *	   NULL if this is a "special" mapping.
811  */
812 struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
813 				  unsigned long addr, pmd_t pmd)
814 {
815 	struct page *page = vm_normal_page_pmd(vma, addr, pmd);
816 
817 	if (page)
818 		return page_folio(page);
819 	return NULL;
820 }
821 
822 /**
823  * vm_normal_page_pud() - Get the "struct page" associated with a PUD
824  * @vma: The VMA mapping the @pud.
825  * @addr: The address where the @pud is mapped.
826  * @pud: The PUD.
827  *
828  * Get the "struct page" associated with a PUD. See __vm_normal_page()
829  * for details on "normal" and "special" mappings.
830  *
831  * Return: Returns the "struct page" if this is a "normal" mapping. Returns
832  *	   NULL if this is a "special" mapping.
833  */
834 struct page *vm_normal_page_pud(struct vm_area_struct *vma,
835 		unsigned long addr, pud_t pud)
836 {
837 	return __vm_normal_page(vma, addr, pud_pfn(pud), pud_special(pud),
838 				pud_val(pud), PGTABLE_LEVEL_PUD);
839 }
840 #endif
841 
842 /**
843  * restore_exclusive_pte - Restore a device-exclusive entry
844  * @vma: VMA covering @address
845  * @folio: the mapped folio
846  * @page: the mapped folio page
847  * @address: the virtual address
848  * @ptep: pte pointer into the locked page table mapping the folio page
849  * @orig_pte: pte value at @ptep
850  *
851  * Restore a device-exclusive non-swap entry to an ordinary present pte.
852  *
853  * The folio and the page table must be locked, and MMU notifiers must have
854  * been called to invalidate any (exclusive) device mappings.
855  *
856  * Locking the folio makes sure that anybody who just converted the pte to
857  * a device-exclusive entry can map it into the device to make forward
858  * progress without others converting it back until the folio was unlocked.
859  *
860  * If the folio lock ever becomes an issue, we can stop relying on the folio
861  * lock; it might make some scenarios with heavy thrashing less likely to
862  * make forward progress, but these scenarios might not be valid use cases.
863  *
864  * Note that the folio lock does not protect against all cases of concurrent
865  * page table modifications (e.g., MADV_DONTNEED, mprotect), so device drivers
866  * must use MMU notifiers to sync against any concurrent changes.
867  */
868 static void restore_exclusive_pte(struct vm_area_struct *vma,
869 		struct folio *folio, struct page *page, unsigned long address,
870 		pte_t *ptep, pte_t orig_pte)
871 {
872 	pte_t pte;
873 
874 	VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
875 
876 	pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
877 	if (pte_swp_soft_dirty(orig_pte))
878 		pte = pte_mksoft_dirty(pte);
879 
880 	if (pte_swp_uffd_wp(orig_pte))
881 		pte = pte_mkuffd_wp(pte);
882 
883 	if ((vma->vm_flags & VM_WRITE) &&
884 	    can_change_pte_writable(vma, address, pte)) {
885 		if (folio_test_dirty(folio))
886 			pte = pte_mkdirty(pte);
887 		pte = pte_mkwrite(pte, vma);
888 	}
889 	set_pte_at(vma->vm_mm, address, ptep, pte);
890 
891 	/*
892 	 * No need to invalidate - it was non-present before. However
893 	 * secondary CPUs may have mappings that need invalidating.
894 	 */
895 	update_mmu_cache(vma, address, ptep);
896 }
897 
898 /*
899  * Tries to restore an exclusive pte if the page lock can be acquired without
900  * sleeping.
901  */
902 static int try_restore_exclusive_pte(struct vm_area_struct *vma,
903 		unsigned long addr, pte_t *ptep, pte_t orig_pte)
904 {
905 	const softleaf_t entry = softleaf_from_pte(orig_pte);
906 	struct page *page = softleaf_to_page(entry);
907 	struct folio *folio = page_folio(page);
908 
909 	if (folio_trylock(folio)) {
910 		restore_exclusive_pte(vma, folio, page, addr, ptep, orig_pte);
911 		folio_unlock(folio);
912 		return 0;
913 	}
914 
915 	return -EBUSY;
916 }
917 
918 /*
919  * copy one vm_area from one task to the other. Assumes the page tables
920  * already present in the new task to be cleared in the whole range
921  * covered by this vma.
922  */
923 
924 static unsigned long
925 copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
926 		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma,
927 		struct vm_area_struct *src_vma, unsigned long addr, int *rss)
928 {
929 	vm_flags_t vm_flags = dst_vma->vm_flags;
930 	pte_t orig_pte = ptep_get(src_pte);
931 	softleaf_t entry = softleaf_from_pte(orig_pte);
932 	pte_t pte = orig_pte;
933 	struct folio *folio;
934 	struct page *page;
935 
936 	if (likely(softleaf_is_swap(entry))) {
937 		if (swap_dup_entry_direct(entry) < 0)
938 			return -EIO;
939 
940 		/* make sure dst_mm is on swapoff's mmlist. */
941 		if (unlikely(list_empty(&dst_mm->mmlist))) {
942 			spin_lock(&mmlist_lock);
943 			if (list_empty(&dst_mm->mmlist))
944 				list_add(&dst_mm->mmlist,
945 						&src_mm->mmlist);
946 			spin_unlock(&mmlist_lock);
947 		}
948 		/* Mark the swap entry as shared. */
949 		if (pte_swp_exclusive(orig_pte)) {
950 			pte = pte_swp_clear_exclusive(orig_pte);
951 			set_pte_at(src_mm, addr, src_pte, pte);
952 		}
953 		rss[MM_SWAPENTS]++;
954 	} else if (softleaf_is_migration(entry)) {
955 		folio = softleaf_to_folio(entry);
956 
957 		rss[mm_counter(folio)]++;
958 
959 		if (!softleaf_is_migration_read(entry) &&
960 				is_cow_mapping(vm_flags)) {
961 			/*
962 			 * COW mappings require pages in both parent and child
963 			 * to be set to read. A previously exclusive entry is
964 			 * now shared.
965 			 */
966 			entry = make_readable_migration_entry(
967 							swp_offset(entry));
968 			pte = softleaf_to_pte(entry);
969 			if (pte_swp_soft_dirty(orig_pte))
970 				pte = pte_swp_mksoft_dirty(pte);
971 			if (pte_swp_uffd_wp(orig_pte))
972 				pte = pte_swp_mkuffd_wp(pte);
973 			set_pte_at(src_mm, addr, src_pte, pte);
974 		}
975 	} else if (softleaf_is_device_private(entry)) {
976 		page = softleaf_to_page(entry);
977 		folio = page_folio(page);
978 
979 		/*
980 		 * Update rss count even for unaddressable pages, as
981 		 * they should treated just like normal pages in this
982 		 * respect.
983 		 *
984 		 * We will likely want to have some new rss counters
985 		 * for unaddressable pages, at some point. But for now
986 		 * keep things as they are.
987 		 */
988 		folio_get(folio);
989 		rss[mm_counter(folio)]++;
990 		/* Cannot fail as these pages cannot get pinned. */
991 		folio_try_dup_anon_rmap_pte(folio, page, dst_vma, src_vma);
992 
993 		/*
994 		 * We do not preserve soft-dirty information, because so
995 		 * far, checkpoint/restore is the only feature that
996 		 * requires that. And checkpoint/restore does not work
997 		 * when a device driver is involved (you cannot easily
998 		 * save and restore device driver state).
999 		 */
1000 		if (softleaf_is_device_private_write(entry) &&
1001 		    is_cow_mapping(vm_flags)) {
1002 			entry = make_readable_device_private_entry(
1003 							swp_offset(entry));
1004 			pte = swp_entry_to_pte(entry);
1005 			if (pte_swp_uffd_wp(orig_pte))
1006 				pte = pte_swp_mkuffd_wp(pte);
1007 			set_pte_at(src_mm, addr, src_pte, pte);
1008 		}
1009 	} else if (softleaf_is_device_exclusive(entry)) {
1010 		/*
1011 		 * Make device exclusive entries present by restoring the
1012 		 * original entry then copying as for a present pte. Device
1013 		 * exclusive entries currently only support private writable
1014 		 * (ie. COW) mappings.
1015 		 */
1016 		VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags));
1017 		if (try_restore_exclusive_pte(src_vma, addr, src_pte, orig_pte))
1018 			return -EBUSY;
1019 		return -ENOENT;
1020 	} else if (softleaf_is_marker(entry)) {
1021 		pte_marker marker = copy_pte_marker(entry, dst_vma);
1022 
1023 		if (marker)
1024 			set_pte_at(dst_mm, addr, dst_pte,
1025 				   make_pte_marker(marker));
1026 		return 0;
1027 	}
1028 	if (!userfaultfd_wp(dst_vma))
1029 		pte = pte_swp_clear_uffd_wp(pte);
1030 	set_pte_at(dst_mm, addr, dst_pte, pte);
1031 	return 0;
1032 }
1033 
1034 /*
1035  * Copy a present and normal page.
1036  *
1037  * NOTE! The usual case is that this isn't required;
1038  * instead, the caller can just increase the page refcount
1039  * and re-use the pte the traditional way.
1040  *
1041  * And if we need a pre-allocated page but don't yet have
1042  * one, return a negative error to let the preallocation
1043  * code know so that it can do so outside the page table
1044  * lock.
1045  */
1046 static inline int
1047 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1048 		  pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
1049 		  struct folio **prealloc, struct page *page)
1050 {
1051 	struct folio *new_folio;
1052 	pte_t pte;
1053 
1054 	new_folio = *prealloc;
1055 	if (!new_folio)
1056 		return -EAGAIN;
1057 
1058 	/*
1059 	 * We have a prealloc page, all good!  Take it
1060 	 * over and copy the page & arm it.
1061 	 */
1062 
1063 	if (copy_mc_user_highpage(&new_folio->page, page, addr, src_vma))
1064 		return -EHWPOISON;
1065 
1066 	*prealloc = NULL;
1067 	__folio_mark_uptodate(new_folio);
1068 	folio_add_new_anon_rmap(new_folio, dst_vma, addr, RMAP_EXCLUSIVE);
1069 	folio_add_lru_vma(new_folio, dst_vma);
1070 	rss[MM_ANONPAGES]++;
1071 
1072 	/* All done, just insert the new page copy in the child */
1073 	pte = folio_mk_pte(new_folio, dst_vma->vm_page_prot);
1074 	pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma);
1075 	if (userfaultfd_pte_wp(dst_vma, ptep_get(src_pte)))
1076 		/* Uffd-wp needs to be delivered to dest pte as well */
1077 		pte = pte_mkuffd_wp(pte);
1078 	set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
1079 	return 0;
1080 }
1081 
1082 static __always_inline void __copy_present_ptes(struct vm_area_struct *dst_vma,
1083 		struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte,
1084 		pte_t pte, unsigned long addr, int nr)
1085 {
1086 	struct mm_struct *src_mm = src_vma->vm_mm;
1087 
1088 	/* If it's a COW mapping, write protect it both processes. */
1089 	if (is_cow_mapping(src_vma->vm_flags) && pte_write(pte)) {
1090 		wrprotect_ptes(src_mm, addr, src_pte, nr);
1091 		pte = pte_wrprotect(pte);
1092 	}
1093 
1094 	/* If it's a shared mapping, mark it clean in the child. */
1095 	if (src_vma->vm_flags & VM_SHARED)
1096 		pte = pte_mkclean(pte);
1097 	pte = pte_mkold(pte);
1098 
1099 	if (!userfaultfd_wp(dst_vma))
1100 		pte = pte_clear_uffd_wp(pte);
1101 
1102 	set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr);
1103 }
1104 
1105 /*
1106  * Copy one present PTE, trying to batch-process subsequent PTEs that map
1107  * consecutive pages of the same folio by copying them as well.
1108  *
1109  * Returns -EAGAIN if one preallocated page is required to copy the next PTE.
1110  * Otherwise, returns the number of copied PTEs (at least 1).
1111  */
1112 static inline int
1113 copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1114 		 pte_t *dst_pte, pte_t *src_pte, pte_t pte, unsigned long addr,
1115 		 int max_nr, int *rss, struct folio **prealloc)
1116 {
1117 	fpb_t flags = FPB_MERGE_WRITE;
1118 	struct page *page;
1119 	struct folio *folio;
1120 	int err, nr;
1121 
1122 	page = vm_normal_page(src_vma, addr, pte);
1123 	if (unlikely(!page))
1124 		goto copy_pte;
1125 
1126 	folio = page_folio(page);
1127 
1128 	/*
1129 	 * If we likely have to copy, just don't bother with batching. Make
1130 	 * sure that the common "small folio" case is as fast as possible
1131 	 * by keeping the batching logic separate.
1132 	 */
1133 	if (unlikely(!*prealloc && folio_test_large(folio) && max_nr != 1)) {
1134 		if (!(src_vma->vm_flags & VM_SHARED))
1135 			flags |= FPB_RESPECT_DIRTY;
1136 		if (vma_soft_dirty_enabled(src_vma))
1137 			flags |= FPB_RESPECT_SOFT_DIRTY;
1138 
1139 		nr = folio_pte_batch_flags(folio, src_vma, src_pte, &pte, max_nr, flags);
1140 		folio_ref_add(folio, nr);
1141 		if (folio_test_anon(folio)) {
1142 			if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page,
1143 								  nr, dst_vma, src_vma))) {
1144 				folio_ref_sub(folio, nr);
1145 				return -EAGAIN;
1146 			}
1147 			rss[MM_ANONPAGES] += nr;
1148 			VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio);
1149 		} else {
1150 			folio_dup_file_rmap_ptes(folio, page, nr, dst_vma);
1151 			rss[mm_counter_file(folio)] += nr;
1152 		}
1153 		__copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte,
1154 				    addr, nr);
1155 		return nr;
1156 	}
1157 
1158 	folio_get(folio);
1159 	if (folio_test_anon(folio)) {
1160 		/*
1161 		 * If this page may have been pinned by the parent process,
1162 		 * copy the page immediately for the child so that we'll always
1163 		 * guarantee the pinned page won't be randomly replaced in the
1164 		 * future.
1165 		 */
1166 		if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, dst_vma, src_vma))) {
1167 			/* Page may be pinned, we have to copy. */
1168 			folio_put(folio);
1169 			err = copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
1170 						addr, rss, prealloc, page);
1171 			return err ? err : 1;
1172 		}
1173 		rss[MM_ANONPAGES]++;
1174 		VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio);
1175 	} else {
1176 		folio_dup_file_rmap_pte(folio, page, dst_vma);
1177 		rss[mm_counter_file(folio)]++;
1178 	}
1179 
1180 copy_pte:
1181 	__copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte, addr, 1);
1182 	return 1;
1183 }
1184 
1185 static inline struct folio *folio_prealloc(struct mm_struct *src_mm,
1186 		struct vm_area_struct *vma, unsigned long addr, bool need_zero)
1187 {
1188 	struct folio *new_folio;
1189 
1190 	if (need_zero)
1191 		new_folio = vma_alloc_zeroed_movable_folio(vma, addr);
1192 	else
1193 		new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr);
1194 
1195 	if (!new_folio)
1196 		return NULL;
1197 
1198 	if (mem_cgroup_charge(new_folio, src_mm, GFP_KERNEL)) {
1199 		folio_put(new_folio);
1200 		return NULL;
1201 	}
1202 	folio_throttle_swaprate(new_folio, GFP_KERNEL);
1203 
1204 	return new_folio;
1205 }
1206 
1207 static int
1208 copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1209 	       pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1210 	       unsigned long end)
1211 {
1212 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1213 	struct mm_struct *src_mm = src_vma->vm_mm;
1214 	pte_t *orig_src_pte, *orig_dst_pte;
1215 	pte_t *src_pte, *dst_pte;
1216 	pmd_t dummy_pmdval;
1217 	pte_t ptent;
1218 	spinlock_t *src_ptl, *dst_ptl;
1219 	int progress, max_nr, ret = 0;
1220 	int rss[NR_MM_COUNTERS];
1221 	softleaf_t entry = softleaf_mk_none();
1222 	struct folio *prealloc = NULL;
1223 	int nr;
1224 
1225 again:
1226 	progress = 0;
1227 	init_rss_vec(rss);
1228 
1229 	/*
1230 	 * copy_pmd_range()'s prior pmd_none_or_clear_bad(src_pmd), and the
1231 	 * error handling here, assume that exclusive mmap_lock on dst and src
1232 	 * protects anon from unexpected THP transitions; with shmem and file
1233 	 * protected by mmap_lock-less collapse skipping areas with anon_vma
1234 	 * (whereas vma_needs_copy() skips areas without anon_vma).  A rework
1235 	 * can remove such assumptions later, but this is good enough for now.
1236 	 */
1237 	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
1238 	if (!dst_pte) {
1239 		ret = -ENOMEM;
1240 		goto out;
1241 	}
1242 
1243 	/*
1244 	 * We already hold the exclusive mmap_lock, the copy_pte_range() and
1245 	 * retract_page_tables() are using vma->anon_vma to be exclusive, so
1246 	 * the PTE page is stable, and there is no need to get pmdval and do
1247 	 * pmd_same() check.
1248 	 */
1249 	src_pte = pte_offset_map_rw_nolock(src_mm, src_pmd, addr, &dummy_pmdval,
1250 					   &src_ptl);
1251 	if (!src_pte) {
1252 		pte_unmap_unlock(dst_pte, dst_ptl);
1253 		/* ret == 0 */
1254 		goto out;
1255 	}
1256 	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1257 	orig_src_pte = src_pte;
1258 	orig_dst_pte = dst_pte;
1259 	lazy_mmu_mode_enable();
1260 
1261 	do {
1262 		nr = 1;
1263 
1264 		/*
1265 		 * We are holding two locks at this point - either of them
1266 		 * could generate latencies in another task on another CPU.
1267 		 */
1268 		if (progress >= 32) {
1269 			progress = 0;
1270 			if (need_resched() ||
1271 			    spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
1272 				break;
1273 		}
1274 		ptent = ptep_get(src_pte);
1275 		if (pte_none(ptent)) {
1276 			progress++;
1277 			continue;
1278 		}
1279 		if (unlikely(!pte_present(ptent))) {
1280 			ret = copy_nonpresent_pte(dst_mm, src_mm,
1281 						  dst_pte, src_pte,
1282 						  dst_vma, src_vma,
1283 						  addr, rss);
1284 			if (ret == -EIO) {
1285 				entry = softleaf_from_pte(ptep_get(src_pte));
1286 				break;
1287 			} else if (ret == -EBUSY) {
1288 				break;
1289 			} else if (!ret) {
1290 				progress += 8;
1291 				continue;
1292 			}
1293 			ptent = ptep_get(src_pte);
1294 			VM_WARN_ON_ONCE(!pte_present(ptent));
1295 
1296 			/*
1297 			 * Device exclusive entry restored, continue by copying
1298 			 * the now present pte.
1299 			 */
1300 			WARN_ON_ONCE(ret != -ENOENT);
1301 		}
1302 		/* copy_present_ptes() will clear `*prealloc' if consumed */
1303 		max_nr = (end - addr) / PAGE_SIZE;
1304 		ret = copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte,
1305 					ptent, addr, max_nr, rss, &prealloc);
1306 		/*
1307 		 * If we need a pre-allocated page for this pte, drop the
1308 		 * locks, allocate, and try again.
1309 		 * If copy failed due to hwpoison in source page, break out.
1310 		 */
1311 		if (unlikely(ret == -EAGAIN || ret == -EHWPOISON))
1312 			break;
1313 		if (unlikely(prealloc)) {
1314 			/*
1315 			 * pre-alloc page cannot be reused by next time so as
1316 			 * to strictly follow mempolicy (e.g., alloc_page_vma()
1317 			 * will allocate page according to address).  This
1318 			 * could only happen if one pinned pte changed.
1319 			 */
1320 			folio_put(prealloc);
1321 			prealloc = NULL;
1322 		}
1323 		nr = ret;
1324 		progress += 8 * nr;
1325 	} while (dst_pte += nr, src_pte += nr, addr += PAGE_SIZE * nr,
1326 		 addr != end);
1327 
1328 	lazy_mmu_mode_disable();
1329 	pte_unmap_unlock(orig_src_pte, src_ptl);
1330 	add_mm_rss_vec(dst_mm, rss);
1331 	pte_unmap_unlock(orig_dst_pte, dst_ptl);
1332 	cond_resched();
1333 
1334 	if (ret == -EIO) {
1335 		VM_WARN_ON_ONCE(!entry.val);
1336 		if (swap_retry_table_alloc(entry, GFP_KERNEL) < 0) {
1337 			ret = -ENOMEM;
1338 			goto out;
1339 		}
1340 		entry.val = 0;
1341 	} else if (ret == -EBUSY || unlikely(ret == -EHWPOISON)) {
1342 		goto out;
1343 	} else if (ret ==  -EAGAIN) {
1344 		prealloc = folio_prealloc(src_mm, src_vma, addr, false);
1345 		if (!prealloc)
1346 			return -ENOMEM;
1347 	} else if (ret < 0) {
1348 		VM_WARN_ON_ONCE(1);
1349 	}
1350 
1351 	/* We've captured and resolved the error. Reset, try again. */
1352 	ret = 0;
1353 
1354 	if (addr != end)
1355 		goto again;
1356 out:
1357 	if (unlikely(prealloc))
1358 		folio_put(prealloc);
1359 	return ret;
1360 }
1361 
1362 static inline int
1363 copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1364 	       pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1365 	       unsigned long end)
1366 {
1367 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1368 	struct mm_struct *src_mm = src_vma->vm_mm;
1369 	pmd_t *src_pmd, *dst_pmd;
1370 	unsigned long next;
1371 
1372 	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
1373 	if (!dst_pmd)
1374 		return -ENOMEM;
1375 	src_pmd = pmd_offset(src_pud, addr);
1376 	do {
1377 		next = pmd_addr_end(addr, end);
1378 		if (pmd_is_huge(*src_pmd)) {
1379 			int err;
1380 
1381 			VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
1382 			err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd,
1383 					    addr, dst_vma, src_vma);
1384 			if (err == -ENOMEM)
1385 				return -ENOMEM;
1386 			if (!err)
1387 				continue;
1388 			/* fall through */
1389 		}
1390 		if (pmd_none_or_clear_bad(src_pmd))
1391 			continue;
1392 		if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd,
1393 				   addr, next))
1394 			return -ENOMEM;
1395 	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
1396 	return 0;
1397 }
1398 
1399 static inline int
1400 copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1401 	       p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr,
1402 	       unsigned long end)
1403 {
1404 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1405 	struct mm_struct *src_mm = src_vma->vm_mm;
1406 	pud_t *src_pud, *dst_pud;
1407 	unsigned long next;
1408 
1409 	dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
1410 	if (!dst_pud)
1411 		return -ENOMEM;
1412 	src_pud = pud_offset(src_p4d, addr);
1413 	do {
1414 		next = pud_addr_end(addr, end);
1415 		if (pud_trans_huge(*src_pud)) {
1416 			int err;
1417 
1418 			VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma);
1419 			err = copy_huge_pud(dst_mm, src_mm,
1420 					    dst_pud, src_pud, addr, src_vma);
1421 			if (err == -ENOMEM)
1422 				return -ENOMEM;
1423 			if (!err)
1424 				continue;
1425 			/* fall through */
1426 		}
1427 		if (pud_none_or_clear_bad(src_pud))
1428 			continue;
1429 		if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud,
1430 				   addr, next))
1431 			return -ENOMEM;
1432 	} while (dst_pud++, src_pud++, addr = next, addr != end);
1433 	return 0;
1434 }
1435 
1436 static inline int
1437 copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1438 	       pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr,
1439 	       unsigned long end)
1440 {
1441 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1442 	p4d_t *src_p4d, *dst_p4d;
1443 	unsigned long next;
1444 
1445 	dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
1446 	if (!dst_p4d)
1447 		return -ENOMEM;
1448 	src_p4d = p4d_offset(src_pgd, addr);
1449 	do {
1450 		next = p4d_addr_end(addr, end);
1451 		if (p4d_none_or_clear_bad(src_p4d))
1452 			continue;
1453 		if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d,
1454 				   addr, next))
1455 			return -ENOMEM;
1456 	} while (dst_p4d++, src_p4d++, addr = next, addr != end);
1457 	return 0;
1458 }
1459 
1460 /*
1461  * Return true if the vma needs to copy the pgtable during this fork().  Return
1462  * false when we can speed up fork() by allowing lazy page faults later until
1463  * when the child accesses the memory range.
1464  */
1465 static bool
1466 vma_needs_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1467 {
1468 	/*
1469 	 * We check against dst_vma as while sane VMA flags will have been
1470 	 * copied, VM_UFFD_WP may be set only on dst_vma.
1471 	 */
1472 	if (dst_vma->vm_flags & VM_COPY_ON_FORK)
1473 		return true;
1474 	/*
1475 	 * The presence of an anon_vma indicates an anonymous VMA has page
1476 	 * tables which naturally cannot be reconstituted on page fault.
1477 	 */
1478 	if (src_vma->anon_vma)
1479 		return true;
1480 
1481 	/*
1482 	 * Don't copy ptes where a page fault will fill them correctly.  Fork
1483 	 * becomes much lighter when there are big shared or private readonly
1484 	 * mappings. The tradeoff is that copy_page_range is more efficient
1485 	 * than faulting.
1486 	 */
1487 	return false;
1488 }
1489 
1490 int
1491 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1492 {
1493 	pgd_t *src_pgd, *dst_pgd;
1494 	unsigned long addr = src_vma->vm_start;
1495 	unsigned long end = src_vma->vm_end;
1496 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1497 	struct mm_struct *src_mm = src_vma->vm_mm;
1498 	struct mmu_notifier_range range;
1499 	unsigned long next;
1500 	bool is_cow;
1501 	int ret;
1502 
1503 	if (!vma_needs_copy(dst_vma, src_vma))
1504 		return 0;
1505 
1506 	if (is_vm_hugetlb_page(src_vma))
1507 		return copy_hugetlb_page_range(dst_mm, src_mm, dst_vma, src_vma);
1508 
1509 	/*
1510 	 * We need to invalidate the secondary MMU mappings only when
1511 	 * there could be a permission downgrade on the ptes of the
1512 	 * parent mm. And a permission downgrade will only happen if
1513 	 * is_cow_mapping() returns true.
1514 	 */
1515 	is_cow = is_cow_mapping(src_vma->vm_flags);
1516 
1517 	if (is_cow) {
1518 		mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
1519 					0, src_mm, addr, end);
1520 		mmu_notifier_invalidate_range_start(&range);
1521 		/*
1522 		 * Disabling preemption is not needed for the write side, as
1523 		 * the read side doesn't spin, but goes to the mmap_lock.
1524 		 *
1525 		 * Use the raw variant of the seqcount_t write API to avoid
1526 		 * lockdep complaining about preemptibility.
1527 		 */
1528 		vma_assert_write_locked(src_vma);
1529 		raw_write_seqcount_begin(&src_mm->write_protect_seq);
1530 	}
1531 
1532 	ret = 0;
1533 	dst_pgd = pgd_offset(dst_mm, addr);
1534 	src_pgd = pgd_offset(src_mm, addr);
1535 	do {
1536 		next = pgd_addr_end(addr, end);
1537 		if (pgd_none_or_clear_bad(src_pgd))
1538 			continue;
1539 		if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,
1540 					    addr, next))) {
1541 			ret = -ENOMEM;
1542 			break;
1543 		}
1544 	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
1545 
1546 	if (is_cow) {
1547 		raw_write_seqcount_end(&src_mm->write_protect_seq);
1548 		mmu_notifier_invalidate_range_end(&range);
1549 	}
1550 	return ret;
1551 }
1552 
1553 /* Whether we should zap all COWed (private) pages too */
1554 static inline bool should_zap_cows(struct zap_details *details)
1555 {
1556 	/* By default, zap all pages */
1557 	if (!details)
1558 		return true;
1559 
1560 	VM_WARN_ON_ONCE(details->skip_cows && details->reclaim_pt);
1561 
1562 	/* Or, we zap COWed pages only if the caller wants to */
1563 	return !details->skip_cows;
1564 }
1565 
1566 /* Decides whether we should zap this folio with the folio pointer specified */
1567 static inline bool should_zap_folio(struct zap_details *details,
1568 				    struct folio *folio)
1569 {
1570 	/* If we can make a decision without *folio.. */
1571 	if (should_zap_cows(details))
1572 		return true;
1573 
1574 	/* Otherwise we should only zap non-anon folios */
1575 	return !folio_test_anon(folio);
1576 }
1577 
1578 static inline bool zap_drop_markers(struct zap_details *details)
1579 {
1580 	if (!details)
1581 		return false;
1582 
1583 	return details->zap_flags & ZAP_FLAG_DROP_MARKER;
1584 }
1585 
1586 /*
1587  * This function makes sure that we'll replace the none pte with an uffd-wp
1588  * swap special pte marker when necessary. Must be with the pgtable lock held.
1589  *
1590  * Returns true if uffd-wp ptes was installed, false otherwise.
1591  */
1592 static inline bool
1593 zap_install_uffd_wp_if_needed(struct vm_area_struct *vma,
1594 			      unsigned long addr, pte_t *pte, int nr,
1595 			      struct zap_details *details, pte_t pteval)
1596 {
1597 	bool was_installed = false;
1598 
1599 	if (!uffd_supports_wp_marker())
1600 		return false;
1601 
1602 	/* Zap on anonymous always means dropping everything */
1603 	if (vma_is_anonymous(vma))
1604 		return false;
1605 
1606 	if (zap_drop_markers(details))
1607 		return false;
1608 
1609 	for (;;) {
1610 		/* the PFN in the PTE is irrelevant. */
1611 		if (pte_install_uffd_wp_if_needed(vma, addr, pte, pteval))
1612 			was_installed = true;
1613 		if (--nr == 0)
1614 			break;
1615 		pte++;
1616 		addr += PAGE_SIZE;
1617 	}
1618 
1619 	return was_installed;
1620 }
1621 
1622 static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb,
1623 		struct vm_area_struct *vma, struct folio *folio,
1624 		struct page *page, pte_t *pte, pte_t ptent, unsigned int nr,
1625 		unsigned long addr, struct zap_details *details, int *rss,
1626 		bool *force_flush, bool *force_break, bool *any_skipped)
1627 {
1628 	struct mm_struct *mm = tlb->mm;
1629 	bool delay_rmap = false;
1630 
1631 	if (!folio_test_anon(folio)) {
1632 		ptent = get_and_clear_full_ptes(mm, addr, pte, nr, tlb->fullmm);
1633 		if (pte_dirty(ptent)) {
1634 			folio_mark_dirty(folio);
1635 			if (tlb_delay_rmap(tlb)) {
1636 				delay_rmap = true;
1637 				*force_flush = true;
1638 			}
1639 		}
1640 		if (pte_young(ptent) && likely(vma_has_recency(vma)))
1641 			folio_mark_accessed(folio);
1642 		rss[mm_counter(folio)] -= nr;
1643 	} else {
1644 		/* We don't need up-to-date accessed/dirty bits. */
1645 		clear_full_ptes(mm, addr, pte, nr, tlb->fullmm);
1646 		rss[MM_ANONPAGES] -= nr;
1647 	}
1648 	/* Checking a single PTE in a batch is sufficient. */
1649 	arch_check_zapped_pte(vma, ptent);
1650 	tlb_remove_tlb_entries(tlb, pte, nr, addr);
1651 	if (unlikely(userfaultfd_pte_wp(vma, ptent)))
1652 		*any_skipped = zap_install_uffd_wp_if_needed(vma, addr, pte,
1653 							     nr, details, ptent);
1654 
1655 	if (!delay_rmap) {
1656 		folio_remove_rmap_ptes(folio, page, nr, vma);
1657 
1658 		if (unlikely(folio_mapcount(folio) < 0))
1659 			print_bad_pte(vma, addr, ptent, page);
1660 	}
1661 	if (unlikely(__tlb_remove_folio_pages(tlb, page, nr, delay_rmap))) {
1662 		*force_flush = true;
1663 		*force_break = true;
1664 	}
1665 }
1666 
1667 /*
1668  * Zap or skip at least one present PTE, trying to batch-process subsequent
1669  * PTEs that map consecutive pages of the same folio.
1670  *
1671  * Returns the number of processed (skipped or zapped) PTEs (at least 1).
1672  */
1673 static inline int zap_present_ptes(struct mmu_gather *tlb,
1674 		struct vm_area_struct *vma, pte_t *pte, pte_t ptent,
1675 		unsigned int max_nr, unsigned long addr,
1676 		struct zap_details *details, int *rss, bool *force_flush,
1677 		bool *force_break, bool *any_skipped)
1678 {
1679 	struct mm_struct *mm = tlb->mm;
1680 	struct folio *folio;
1681 	struct page *page;
1682 	int nr;
1683 
1684 	page = vm_normal_page(vma, addr, ptent);
1685 	if (!page) {
1686 		/* We don't need up-to-date accessed/dirty bits. */
1687 		ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm);
1688 		arch_check_zapped_pte(vma, ptent);
1689 		tlb_remove_tlb_entry(tlb, pte, addr);
1690 		if (userfaultfd_pte_wp(vma, ptent))
1691 			*any_skipped = zap_install_uffd_wp_if_needed(vma, addr,
1692 						pte, 1, details, ptent);
1693 		ksm_might_unmap_zero_page(mm, ptent);
1694 		return 1;
1695 	}
1696 
1697 	folio = page_folio(page);
1698 	if (unlikely(!should_zap_folio(details, folio))) {
1699 		*any_skipped = true;
1700 		return 1;
1701 	}
1702 
1703 	/*
1704 	 * Make sure that the common "small folio" case is as fast as possible
1705 	 * by keeping the batching logic separate.
1706 	 */
1707 	if (unlikely(folio_test_large(folio) && max_nr != 1)) {
1708 		nr = folio_pte_batch(folio, pte, ptent, max_nr);
1709 		zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr,
1710 				       addr, details, rss, force_flush,
1711 				       force_break, any_skipped);
1712 		return nr;
1713 	}
1714 	zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, 1, addr,
1715 			       details, rss, force_flush, force_break, any_skipped);
1716 	return 1;
1717 }
1718 
1719 static inline int zap_nonpresent_ptes(struct mmu_gather *tlb,
1720 		struct vm_area_struct *vma, pte_t *pte, pte_t ptent,
1721 		unsigned int max_nr, unsigned long addr,
1722 		struct zap_details *details, int *rss, bool *any_skipped)
1723 {
1724 	softleaf_t entry;
1725 	int nr = 1;
1726 
1727 	*any_skipped = true;
1728 	entry = softleaf_from_pte(ptent);
1729 	if (softleaf_is_device_private(entry) ||
1730 	    softleaf_is_device_exclusive(entry)) {
1731 		struct page *page = softleaf_to_page(entry);
1732 		struct folio *folio = page_folio(page);
1733 
1734 		if (unlikely(!should_zap_folio(details, folio)))
1735 			return 1;
1736 		/*
1737 		 * Both device private/exclusive mappings should only
1738 		 * work with anonymous page so far, so we don't need to
1739 		 * consider uffd-wp bit when zap. For more information,
1740 		 * see zap_install_uffd_wp_if_needed().
1741 		 */
1742 		WARN_ON_ONCE(!vma_is_anonymous(vma));
1743 		rss[mm_counter(folio)]--;
1744 		folio_remove_rmap_pte(folio, page, vma);
1745 		folio_put(folio);
1746 	} else if (softleaf_is_swap(entry)) {
1747 		/* Genuine swap entries, hence a private anon pages */
1748 		if (!should_zap_cows(details))
1749 			return 1;
1750 
1751 		nr = swap_pte_batch(pte, max_nr, ptent);
1752 		rss[MM_SWAPENTS] -= nr;
1753 		swap_put_entries_direct(entry, nr);
1754 	} else if (softleaf_is_migration(entry)) {
1755 		struct folio *folio = softleaf_to_folio(entry);
1756 
1757 		if (!should_zap_folio(details, folio))
1758 			return 1;
1759 		rss[mm_counter(folio)]--;
1760 	} else if (softleaf_is_uffd_wp_marker(entry)) {
1761 		/*
1762 		 * For anon: always drop the marker; for file: only
1763 		 * drop the marker if explicitly requested.
1764 		 */
1765 		if (!vma_is_anonymous(vma) && !zap_drop_markers(details))
1766 			return 1;
1767 	} else if (softleaf_is_guard_marker(entry)) {
1768 		/*
1769 		 * Ordinary zapping should not remove guard PTE
1770 		 * markers. Only do so if we should remove PTE markers
1771 		 * in general.
1772 		 */
1773 		if (!zap_drop_markers(details))
1774 			return 1;
1775 	} else if (softleaf_is_hwpoison(entry) ||
1776 		   softleaf_is_poison_marker(entry)) {
1777 		if (!should_zap_cows(details))
1778 			return 1;
1779 	} else {
1780 		/* We should have covered all the swap entry types */
1781 		pr_alert("unrecognized swap entry 0x%lx\n", entry.val);
1782 		WARN_ON_ONCE(1);
1783 	}
1784 	clear_not_present_full_ptes(vma->vm_mm, addr, pte, nr, tlb->fullmm);
1785 	*any_skipped = zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, ptent);
1786 
1787 	return nr;
1788 }
1789 
1790 static inline int do_zap_pte_range(struct mmu_gather *tlb,
1791 				   struct vm_area_struct *vma, pte_t *pte,
1792 				   unsigned long addr, unsigned long end,
1793 				   struct zap_details *details, int *rss,
1794 				   bool *force_flush, bool *force_break,
1795 				   bool *any_skipped)
1796 {
1797 	pte_t ptent = ptep_get(pte);
1798 	int max_nr = (end - addr) / PAGE_SIZE;
1799 	int nr = 0;
1800 
1801 	/* Skip all consecutive none ptes */
1802 	if (pte_none(ptent)) {
1803 		for (nr = 1; nr < max_nr; nr++) {
1804 			ptent = ptep_get(pte + nr);
1805 			if (!pte_none(ptent))
1806 				break;
1807 		}
1808 		max_nr -= nr;
1809 		if (!max_nr)
1810 			return nr;
1811 		pte += nr;
1812 		addr += nr * PAGE_SIZE;
1813 	}
1814 
1815 	if (pte_present(ptent))
1816 		nr += zap_present_ptes(tlb, vma, pte, ptent, max_nr, addr,
1817 				       details, rss, force_flush, force_break,
1818 				       any_skipped);
1819 	else
1820 		nr += zap_nonpresent_ptes(tlb, vma, pte, ptent, max_nr, addr,
1821 					  details, rss, any_skipped);
1822 
1823 	return nr;
1824 }
1825 
1826 static bool pte_table_reclaim_possible(unsigned long start, unsigned long end,
1827 		struct zap_details *details)
1828 {
1829 	if (!IS_ENABLED(CONFIG_PT_RECLAIM))
1830 		return false;
1831 	/* Only zap if we are allowed to and cover the full page table. */
1832 	return details && details->reclaim_pt && (end - start >= PMD_SIZE);
1833 }
1834 
1835 static bool zap_empty_pte_table(struct mm_struct *mm, pmd_t *pmd,
1836 		spinlock_t *ptl, pmd_t *pmdval)
1837 {
1838 	spinlock_t *pml = pmd_lockptr(mm, pmd);
1839 
1840 	if (ptl != pml && !spin_trylock(pml))
1841 		return false;
1842 
1843 	*pmdval = pmdp_get(pmd);
1844 	pmd_clear(pmd);
1845 	if (ptl != pml)
1846 		spin_unlock(pml);
1847 	return true;
1848 }
1849 
1850 static bool zap_pte_table_if_empty(struct mm_struct *mm, pmd_t *pmd,
1851 		unsigned long addr, pmd_t *pmdval)
1852 {
1853 	spinlock_t *pml, *ptl = NULL;
1854 	pte_t *start_pte, *pte;
1855 	int i;
1856 
1857 	pml = pmd_lock(mm, pmd);
1858 	start_pte = pte_offset_map_rw_nolock(mm, pmd, addr, pmdval, &ptl);
1859 	if (!start_pte)
1860 		goto out_ptl;
1861 	if (ptl != pml)
1862 		spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
1863 
1864 	for (i = 0, pte = start_pte; i < PTRS_PER_PTE; i++, pte++) {
1865 		if (!pte_none(ptep_get(pte)))
1866 			goto out_ptl;
1867 	}
1868 	pte_unmap(start_pte);
1869 
1870 	pmd_clear(pmd);
1871 
1872 	if (ptl != pml)
1873 		spin_unlock(ptl);
1874 	spin_unlock(pml);
1875 	return true;
1876 out_ptl:
1877 	if (start_pte)
1878 		pte_unmap_unlock(start_pte, ptl);
1879 	if (ptl != pml)
1880 		spin_unlock(pml);
1881 	return false;
1882 }
1883 
1884 static unsigned long zap_pte_range(struct mmu_gather *tlb,
1885 				struct vm_area_struct *vma, pmd_t *pmd,
1886 				unsigned long addr, unsigned long end,
1887 				struct zap_details *details)
1888 {
1889 	bool can_reclaim_pt = pte_table_reclaim_possible(addr, end, details);
1890 	bool force_flush = false, force_break = false;
1891 	struct mm_struct *mm = tlb->mm;
1892 	int rss[NR_MM_COUNTERS];
1893 	spinlock_t *ptl;
1894 	pte_t *start_pte;
1895 	pte_t *pte;
1896 	pmd_t pmdval;
1897 	unsigned long start = addr;
1898 	bool direct_reclaim = true;
1899 	int nr;
1900 
1901 retry:
1902 	tlb_change_page_size(tlb, PAGE_SIZE);
1903 	init_rss_vec(rss);
1904 	start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1905 	if (!pte)
1906 		return addr;
1907 
1908 	flush_tlb_batched_pending(mm);
1909 	lazy_mmu_mode_enable();
1910 	do {
1911 		bool any_skipped = false;
1912 
1913 		if (need_resched()) {
1914 			direct_reclaim = false;
1915 			break;
1916 		}
1917 
1918 		nr = do_zap_pte_range(tlb, vma, pte, addr, end, details, rss,
1919 				      &force_flush, &force_break, &any_skipped);
1920 		if (any_skipped)
1921 			can_reclaim_pt = false;
1922 		if (unlikely(force_break)) {
1923 			addr += nr * PAGE_SIZE;
1924 			direct_reclaim = false;
1925 			break;
1926 		}
1927 	} while (pte += nr, addr += PAGE_SIZE * nr, addr != end);
1928 
1929 	/*
1930 	 * Fast path: try to hold the pmd lock and unmap the PTE page.
1931 	 *
1932 	 * If the pte lock was released midway (retry case), or if the attempt
1933 	 * to hold the pmd lock failed, then we need to recheck all pte entries
1934 	 * to ensure they are still none, thereby preventing the pte entries
1935 	 * from being repopulated by another thread.
1936 	 */
1937 	if (can_reclaim_pt && direct_reclaim && addr == end)
1938 		direct_reclaim = zap_empty_pte_table(mm, pmd, ptl, &pmdval);
1939 
1940 	add_mm_rss_vec(mm, rss);
1941 	lazy_mmu_mode_disable();
1942 
1943 	/* Do the actual TLB flush before dropping ptl */
1944 	if (force_flush) {
1945 		tlb_flush_mmu_tlbonly(tlb);
1946 		tlb_flush_rmaps(tlb, vma);
1947 	}
1948 	pte_unmap_unlock(start_pte, ptl);
1949 
1950 	/*
1951 	 * If we forced a TLB flush (either due to running out of
1952 	 * batch buffers or because we needed to flush dirty TLB
1953 	 * entries before releasing the ptl), free the batched
1954 	 * memory too. Come back again if we didn't do everything.
1955 	 */
1956 	if (force_flush)
1957 		tlb_flush_mmu(tlb);
1958 
1959 	if (addr != end) {
1960 		cond_resched();
1961 		force_flush = false;
1962 		force_break = false;
1963 		goto retry;
1964 	}
1965 
1966 	if (can_reclaim_pt) {
1967 		if (direct_reclaim || zap_pte_table_if_empty(mm, pmd, start, &pmdval)) {
1968 			pte_free_tlb(tlb, pmd_pgtable(pmdval), addr);
1969 			mm_dec_nr_ptes(mm);
1970 		}
1971 	}
1972 
1973 	return addr;
1974 }
1975 
1976 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1977 				struct vm_area_struct *vma, pud_t *pud,
1978 				unsigned long addr, unsigned long end,
1979 				struct zap_details *details)
1980 {
1981 	pmd_t *pmd;
1982 	unsigned long next;
1983 
1984 	pmd = pmd_offset(pud, addr);
1985 	do {
1986 		next = pmd_addr_end(addr, end);
1987 		if (pmd_is_huge(*pmd)) {
1988 			if (next - addr != HPAGE_PMD_SIZE)
1989 				__split_huge_pmd(vma, pmd, addr, false);
1990 			else if (zap_huge_pmd(tlb, vma, pmd, addr)) {
1991 				addr = next;
1992 				continue;
1993 			}
1994 			/* fall through */
1995 		} else if (details && details->single_folio &&
1996 			   folio_test_pmd_mappable(details->single_folio) &&
1997 			   next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
1998 			sync_with_folio_pmd_zap(tlb->mm, pmd);
1999 		}
2000 		if (pmd_none(*pmd)) {
2001 			addr = next;
2002 			continue;
2003 		}
2004 		addr = zap_pte_range(tlb, vma, pmd, addr, next, details);
2005 		if (addr != next)
2006 			pmd--;
2007 	} while (pmd++, cond_resched(), addr != end);
2008 
2009 	return addr;
2010 }
2011 
2012 static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
2013 				struct vm_area_struct *vma, p4d_t *p4d,
2014 				unsigned long addr, unsigned long end,
2015 				struct zap_details *details)
2016 {
2017 	pud_t *pud;
2018 	unsigned long next;
2019 
2020 	pud = pud_offset(p4d, addr);
2021 	do {
2022 		next = pud_addr_end(addr, end);
2023 		if (pud_trans_huge(*pud)) {
2024 			if (next - addr != HPAGE_PUD_SIZE)
2025 				split_huge_pud(vma, pud, addr);
2026 			else if (zap_huge_pud(tlb, vma, pud, addr))
2027 				goto next;
2028 			/* fall through */
2029 		}
2030 		if (pud_none_or_clear_bad(pud))
2031 			continue;
2032 		next = zap_pmd_range(tlb, vma, pud, addr, next, details);
2033 next:
2034 		cond_resched();
2035 	} while (pud++, addr = next, addr != end);
2036 
2037 	return addr;
2038 }
2039 
2040 static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
2041 				struct vm_area_struct *vma, pgd_t *pgd,
2042 				unsigned long addr, unsigned long end,
2043 				struct zap_details *details)
2044 {
2045 	p4d_t *p4d;
2046 	unsigned long next;
2047 
2048 	p4d = p4d_offset(pgd, addr);
2049 	do {
2050 		next = p4d_addr_end(addr, end);
2051 		if (p4d_none_or_clear_bad(p4d))
2052 			continue;
2053 		next = zap_pud_range(tlb, vma, p4d, addr, next, details);
2054 	} while (p4d++, addr = next, addr != end);
2055 
2056 	return addr;
2057 }
2058 
2059 static void __zap_vma_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
2060 		unsigned long start, unsigned long end,
2061 		struct zap_details *details)
2062 {
2063 	const bool reaping = details && details->reaping;
2064 
2065 	VM_WARN_ON_ONCE(start >= end || !range_in_vma(vma, start, end));
2066 
2067 	/* uprobe_munmap() might sleep, so skip it when reaping. */
2068 	if (vma->vm_file && !reaping)
2069 		uprobe_munmap(vma, start, end);
2070 
2071 	if (unlikely(is_vm_hugetlb_page(vma))) {
2072 		zap_flags_t zap_flags = details ? details->zap_flags : 0;
2073 
2074 		VM_WARN_ON_ONCE(reaping);
2075 		/*
2076 		 * vm_file will be NULL when we fail early while instantiating
2077 		 * a new mapping. In this case, no pages were mapped yet and
2078 		 * there is nothing to do.
2079 		 */
2080 		if (!vma->vm_file)
2081 			return;
2082 		__unmap_hugepage_range(tlb, vma, start, end, NULL, zap_flags);
2083 	} else {
2084 		unsigned long next, addr = start;
2085 		pgd_t *pgd;
2086 
2087 		tlb_start_vma(tlb, vma);
2088 		pgd = pgd_offset(vma->vm_mm, addr);
2089 		do {
2090 			next = pgd_addr_end(addr, end);
2091 			if (pgd_none_or_clear_bad(pgd))
2092 				continue;
2093 			next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
2094 		} while (pgd++, addr = next, addr != end);
2095 		tlb_end_vma(tlb, vma);
2096 	}
2097 }
2098 
2099 /**
2100  * zap_vma_for_reaping - zap all page table entries in the vma without blocking
2101  * @vma: The vma to zap.
2102  *
2103  * Zap all page table entries in the vma without blocking for use by the oom
2104  * killer. Hugetlb vmas are not supported.
2105  *
2106  * Returns: 0 on success, -EBUSY if we would have to block.
2107  */
2108 int zap_vma_for_reaping(struct vm_area_struct *vma)
2109 {
2110 	struct zap_details details = {
2111 		.reaping = true,
2112 	};
2113 	struct mmu_notifier_range range;
2114 	struct mmu_gather tlb;
2115 
2116 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2117 				vma->vm_start, vma->vm_end);
2118 	tlb_gather_mmu(&tlb, vma->vm_mm);
2119 	if (mmu_notifier_invalidate_range_start_nonblock(&range)) {
2120 		tlb_finish_mmu(&tlb);
2121 		return -EBUSY;
2122 	}
2123 	__zap_vma_range(&tlb, vma, range.start, range.end, &details);
2124 	mmu_notifier_invalidate_range_end(&range);
2125 	tlb_finish_mmu(&tlb);
2126 	return 0;
2127 }
2128 
2129 /**
2130  * unmap_vmas - unmap a range of memory covered by a list of vma's
2131  * @tlb: address of the caller's struct mmu_gather
2132  * @unmap: The unmap_desc
2133  *
2134  * Unmap all pages in the vma list.
2135  *
2136  * Only addresses between `start' and `end' will be unmapped.
2137  *
2138  * The VMA list must be sorted in ascending virtual address order.
2139  *
2140  * unmap_vmas() assumes that the caller will flush the whole unmapped address
2141  * range after unmap_vmas() returns.  So the only responsibility here is to
2142  * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
2143  * drops the lock and schedules.
2144  */
2145 void unmap_vmas(struct mmu_gather *tlb, struct unmap_desc *unmap)
2146 {
2147 	struct vm_area_struct *vma;
2148 	struct mmu_notifier_range range;
2149 	struct zap_details details = {
2150 		.zap_flags = ZAP_FLAG_DROP_MARKER | ZAP_FLAG_UNMAP,
2151 	};
2152 
2153 	vma = unmap->first;
2154 	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm,
2155 				unmap->vma_start, unmap->vma_end);
2156 	mmu_notifier_invalidate_range_start(&range);
2157 	do {
2158 		unsigned long start = max(vma->vm_start, unmap->vma_start);
2159 		unsigned long end = min(vma->vm_end, unmap->vma_end);
2160 
2161 		hugetlb_zap_begin(vma, &start, &end);
2162 		__zap_vma_range(tlb, vma, start, end, &details);
2163 		hugetlb_zap_end(vma, &details);
2164 		vma = mas_find(unmap->mas, unmap->tree_end - 1);
2165 	} while (vma);
2166 	mmu_notifier_invalidate_range_end(&range);
2167 }
2168 
2169 /**
2170  * zap_vma_range_batched - zap page table entries in a vma range
2171  * @tlb: pointer to the caller's struct mmu_gather
2172  * @vma: the vma covering the range to zap
2173  * @address: starting address of the range to zap
2174  * @size: number of bytes to zap
2175  * @details: details specifying zapping behavior
2176  *
2177  * @tlb must not be NULL. The provided address range must be fully
2178  * contained within @vma. If @vma is for hugetlb, @tlb is flushed and
2179  * re-initialized by this function.
2180  *
2181  * If @details is NULL, this function will zap all page table entries.
2182  */
2183 void zap_vma_range_batched(struct mmu_gather *tlb,
2184 		struct vm_area_struct *vma, unsigned long address,
2185 		unsigned long size, struct zap_details *details)
2186 {
2187 	const unsigned long end = address + size;
2188 	struct mmu_notifier_range range;
2189 
2190 	VM_WARN_ON_ONCE(!tlb || tlb->mm != vma->vm_mm);
2191 
2192 	if (unlikely(!size))
2193 		return;
2194 
2195 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2196 				address, end);
2197 	hugetlb_zap_begin(vma, &range.start, &range.end);
2198 	update_hiwater_rss(vma->vm_mm);
2199 	mmu_notifier_invalidate_range_start(&range);
2200 	/*
2201 	 * unmap 'address-end' not 'range.start-range.end' as range
2202 	 * could have been expanded for hugetlb pmd sharing.
2203 	 */
2204 	__zap_vma_range(tlb, vma, address, end, details);
2205 	mmu_notifier_invalidate_range_end(&range);
2206 	if (is_vm_hugetlb_page(vma)) {
2207 		/*
2208 		 * flush tlb and free resources before hugetlb_zap_end(), to
2209 		 * avoid concurrent page faults' allocation failure.
2210 		 */
2211 		tlb_finish_mmu(tlb);
2212 		hugetlb_zap_end(vma, details);
2213 		tlb_gather_mmu(tlb, vma->vm_mm);
2214 	}
2215 }
2216 
2217 /**
2218  * zap_vma_range - zap all page table entries in a vma range
2219  * @vma: the vma covering the range to zap
2220  * @address: starting address of the range to zap
2221  * @size: number of bytes to zap
2222  *
2223  * The provided address range must be fully contained within @vma.
2224  */
2225 void zap_vma_range(struct vm_area_struct *vma, unsigned long address,
2226 		unsigned long size)
2227 {
2228 	struct mmu_gather tlb;
2229 
2230 	tlb_gather_mmu(&tlb, vma->vm_mm);
2231 	zap_vma_range_batched(&tlb, vma, address, size, NULL);
2232 	tlb_finish_mmu(&tlb);
2233 }
2234 
2235 /**
2236  * zap_special_vma_range - zap all page table entries in a special vma range
2237  * @vma: the vma covering the range to zap
2238  * @address: starting address of the range to zap
2239  * @size: number of bytes to zap
2240  *
2241  * This function does nothing when the provided address range is not fully
2242  * contained in @vma, or when the @vma is not VM_PFNMAP or VM_MIXEDMAP.
2243  */
2244 void zap_special_vma_range(struct vm_area_struct *vma, unsigned long address,
2245 		unsigned long size)
2246 {
2247 	if (!range_in_vma(vma, address, address + size) ||
2248 	   !(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)))
2249 		return;
2250 
2251 	zap_vma_range(vma, address, size);
2252 }
2253 EXPORT_SYMBOL_GPL(zap_special_vma_range);
2254 
2255 static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
2256 {
2257 	pgd_t *pgd;
2258 	p4d_t *p4d;
2259 	pud_t *pud;
2260 	pmd_t *pmd;
2261 
2262 	pgd = pgd_offset(mm, addr);
2263 	p4d = p4d_alloc(mm, pgd, addr);
2264 	if (!p4d)
2265 		return NULL;
2266 	pud = pud_alloc(mm, p4d, addr);
2267 	if (!pud)
2268 		return NULL;
2269 	pmd = pmd_alloc(mm, pud, addr);
2270 	if (!pmd)
2271 		return NULL;
2272 
2273 	VM_BUG_ON(pmd_trans_huge(*pmd));
2274 	return pmd;
2275 }
2276 
2277 pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
2278 		      spinlock_t **ptl)
2279 {
2280 	pmd_t *pmd = walk_to_pmd(mm, addr);
2281 
2282 	if (!pmd)
2283 		return NULL;
2284 	return pte_alloc_map_lock(mm, pmd, addr, ptl);
2285 }
2286 
2287 static bool vm_mixed_zeropage_allowed(struct vm_area_struct *vma)
2288 {
2289 	VM_WARN_ON_ONCE(vma->vm_flags & VM_PFNMAP);
2290 	/*
2291 	 * Whoever wants to forbid the zeropage after some zeropages
2292 	 * might already have been mapped has to scan the page tables and
2293 	 * bail out on any zeropages. Zeropages in COW mappings can
2294 	 * be unshared using FAULT_FLAG_UNSHARE faults.
2295 	 */
2296 	if (mm_forbids_zeropage(vma->vm_mm))
2297 		return false;
2298 	/* zeropages in COW mappings are common and unproblematic. */
2299 	if (is_cow_mapping(vma->vm_flags))
2300 		return true;
2301 	/* Mappings that do not allow for writable PTEs are unproblematic. */
2302 	if (!(vma->vm_flags & (VM_WRITE | VM_MAYWRITE)))
2303 		return true;
2304 	/*
2305 	 * Why not allow any VMA that has vm_ops->pfn_mkwrite? GUP could
2306 	 * find the shared zeropage and longterm-pin it, which would
2307 	 * be problematic as soon as the zeropage gets replaced by a different
2308 	 * page due to vma->vm_ops->pfn_mkwrite, because what's mapped would
2309 	 * now differ to what GUP looked up. FSDAX is incompatible to
2310 	 * FOLL_LONGTERM and VM_IO is incompatible to GUP completely (see
2311 	 * check_vma_flags).
2312 	 */
2313 	return vma->vm_ops && vma->vm_ops->pfn_mkwrite &&
2314 	       (vma_is_fsdax(vma) || vma->vm_flags & VM_IO);
2315 }
2316 
2317 static int validate_page_before_insert(struct vm_area_struct *vma,
2318 				       struct page *page)
2319 {
2320 	struct folio *folio = page_folio(page);
2321 
2322 	if (!folio_ref_count(folio))
2323 		return -EINVAL;
2324 	if (unlikely(is_zero_folio(folio))) {
2325 		if (!vm_mixed_zeropage_allowed(vma))
2326 			return -EINVAL;
2327 		return 0;
2328 	}
2329 	if (folio_test_anon(folio) || page_has_type(page))
2330 		return -EINVAL;
2331 	flush_dcache_folio(folio);
2332 	return 0;
2333 }
2334 
2335 static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte,
2336 				unsigned long addr, struct page *page,
2337 				pgprot_t prot, bool mkwrite)
2338 {
2339 	struct folio *folio = page_folio(page);
2340 	pte_t pteval = ptep_get(pte);
2341 
2342 	if (!pte_none(pteval)) {
2343 		if (!mkwrite)
2344 			return -EBUSY;
2345 
2346 		/* see insert_pfn(). */
2347 		if (pte_pfn(pteval) != page_to_pfn(page)) {
2348 			WARN_ON_ONCE(!is_zero_pfn(pte_pfn(pteval)));
2349 			return -EFAULT;
2350 		}
2351 		pteval = maybe_mkwrite(pteval, vma);
2352 		pteval = pte_mkyoung(pteval);
2353 		if (ptep_set_access_flags(vma, addr, pte, pteval, 1))
2354 			update_mmu_cache(vma, addr, pte);
2355 		return 0;
2356 	}
2357 
2358 	/* Ok, finally just insert the thing.. */
2359 	pteval = mk_pte(page, prot);
2360 	if (unlikely(is_zero_folio(folio))) {
2361 		pteval = pte_mkspecial(pteval);
2362 	} else {
2363 		folio_get(folio);
2364 		pteval = mk_pte(page, prot);
2365 		if (mkwrite) {
2366 			pteval = pte_mkyoung(pteval);
2367 			pteval = maybe_mkwrite(pte_mkdirty(pteval), vma);
2368 		}
2369 		inc_mm_counter(vma->vm_mm, mm_counter_file(folio));
2370 		folio_add_file_rmap_pte(folio, page, vma);
2371 	}
2372 	set_pte_at(vma->vm_mm, addr, pte, pteval);
2373 	return 0;
2374 }
2375 
2376 static int insert_page(struct vm_area_struct *vma, unsigned long addr,
2377 			struct page *page, pgprot_t prot, bool mkwrite)
2378 {
2379 	int retval;
2380 	pte_t *pte;
2381 	spinlock_t *ptl;
2382 
2383 	retval = validate_page_before_insert(vma, page);
2384 	if (retval)
2385 		goto out;
2386 	retval = -ENOMEM;
2387 	pte = get_locked_pte(vma->vm_mm, addr, &ptl);
2388 	if (!pte)
2389 		goto out;
2390 	retval = insert_page_into_pte_locked(vma, pte, addr, page, prot,
2391 					mkwrite);
2392 	pte_unmap_unlock(pte, ptl);
2393 out:
2394 	return retval;
2395 }
2396 
2397 static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte,
2398 			unsigned long addr, struct page *page, pgprot_t prot)
2399 {
2400 	int err;
2401 
2402 	err = validate_page_before_insert(vma, page);
2403 	if (err)
2404 		return err;
2405 	return insert_page_into_pte_locked(vma, pte, addr, page, prot, false);
2406 }
2407 
2408 /* insert_pages() amortizes the cost of spinlock operations
2409  * when inserting pages in a loop.
2410  */
2411 static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
2412 			struct page **pages, unsigned long *num, pgprot_t prot)
2413 {
2414 	pmd_t *pmd = NULL;
2415 	pte_t *start_pte, *pte;
2416 	spinlock_t *pte_lock;
2417 	struct mm_struct *const mm = vma->vm_mm;
2418 	unsigned long curr_page_idx = 0;
2419 	unsigned long remaining_pages_total = *num;
2420 	unsigned long pages_to_write_in_pmd;
2421 	int ret;
2422 more:
2423 	ret = -EFAULT;
2424 	pmd = walk_to_pmd(mm, addr);
2425 	if (!pmd)
2426 		goto out;
2427 
2428 	pages_to_write_in_pmd = min_t(unsigned long,
2429 		remaining_pages_total, PTRS_PER_PTE - pte_index(addr));
2430 
2431 	/* Allocate the PTE if necessary; takes PMD lock once only. */
2432 	ret = -ENOMEM;
2433 	if (pte_alloc(mm, pmd))
2434 		goto out;
2435 
2436 	while (pages_to_write_in_pmd) {
2437 		int pte_idx = 0;
2438 		const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
2439 
2440 		start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
2441 		if (!start_pte) {
2442 			ret = -EFAULT;
2443 			goto out;
2444 		}
2445 		for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
2446 			int err = insert_page_in_batch_locked(vma, pte,
2447 				addr, pages[curr_page_idx], prot);
2448 			if (unlikely(err)) {
2449 				pte_unmap_unlock(start_pte, pte_lock);
2450 				ret = err;
2451 				remaining_pages_total -= pte_idx;
2452 				goto out;
2453 			}
2454 			addr += PAGE_SIZE;
2455 			++curr_page_idx;
2456 		}
2457 		pte_unmap_unlock(start_pte, pte_lock);
2458 		pages_to_write_in_pmd -= batch_size;
2459 		remaining_pages_total -= batch_size;
2460 	}
2461 	if (remaining_pages_total)
2462 		goto more;
2463 	ret = 0;
2464 out:
2465 	*num = remaining_pages_total;
2466 	return ret;
2467 }
2468 
2469 /**
2470  * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
2471  * @vma: user vma to map to
2472  * @addr: target start user address of these pages
2473  * @pages: source kernel pages
2474  * @num: in: number of pages to map. out: number of pages that were *not*
2475  * mapped. (0 means all pages were successfully mapped).
2476  *
2477  * Preferred over vm_insert_page() when inserting multiple pages.
2478  *
2479  * In case of error, we may have mapped a subset of the provided
2480  * pages. It is the caller's responsibility to account for this case.
2481  *
2482  * The same restrictions apply as in vm_insert_page().
2483  */
2484 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
2485 			struct page **pages, unsigned long *num)
2486 {
2487 	const unsigned long nr_pages = *num;
2488 	const unsigned long end = addr + PAGE_SIZE * nr_pages;
2489 
2490 	if (!range_in_vma(vma, addr, end))
2491 		return -EFAULT;
2492 	if (!(vma->vm_flags & VM_MIXEDMAP)) {
2493 		VM_WARN_ON_ONCE(mmap_read_trylock(vma->vm_mm));
2494 		VM_WARN_ON_ONCE(vma->vm_flags & VM_PFNMAP);
2495 		vm_flags_set(vma, VM_MIXEDMAP);
2496 	}
2497 	/* Defer page refcount checking till we're about to map that page. */
2498 	return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
2499 }
2500 EXPORT_SYMBOL(vm_insert_pages);
2501 
2502 int map_kernel_pages_prepare(struct vm_area_desc *desc)
2503 {
2504 	const struct mmap_action *action = &desc->action;
2505 	const unsigned long addr = action->map_kernel.start;
2506 	unsigned long nr_pages, end;
2507 
2508 	if (!vma_desc_test(desc, VMA_MIXEDMAP_BIT)) {
2509 		VM_WARN_ON_ONCE(mmap_read_trylock(desc->mm));
2510 		VM_WARN_ON_ONCE(vma_desc_test(desc, VMA_PFNMAP_BIT));
2511 		vma_desc_set_flags(desc, VMA_MIXEDMAP_BIT);
2512 	}
2513 
2514 	nr_pages = action->map_kernel.nr_pages;
2515 	end = addr + PAGE_SIZE * nr_pages;
2516 	if (!range_in_vma_desc(desc, addr, end))
2517 		return -EFAULT;
2518 
2519 	return 0;
2520 }
2521 EXPORT_SYMBOL(map_kernel_pages_prepare);
2522 
2523 int map_kernel_pages_complete(struct vm_area_struct *vma,
2524 			      struct mmap_action *action)
2525 {
2526 	unsigned long nr_pages;
2527 
2528 	nr_pages = action->map_kernel.nr_pages;
2529 	return insert_pages(vma, action->map_kernel.start,
2530 			    action->map_kernel.pages,
2531 			    &nr_pages, vma->vm_page_prot);
2532 }
2533 EXPORT_SYMBOL(map_kernel_pages_complete);
2534 
2535 /**
2536  * vm_insert_page - insert single page into user vma
2537  * @vma: user vma to map to
2538  * @addr: target user address of this page
2539  * @page: source kernel page
2540  *
2541  * This allows drivers to insert individual pages they've allocated
2542  * into a user vma. The zeropage is supported in some VMAs,
2543  * see vm_mixed_zeropage_allowed().
2544  *
2545  * The page has to be a nice clean _individual_ kernel allocation.
2546  * If you allocate a compound page, you need to have marked it as
2547  * such (__GFP_COMP), or manually just split the page up yourself
2548  * (see split_page()).
2549  *
2550  * NOTE! Traditionally this was done with "remap_pfn_range()" which
2551  * took an arbitrary page protection parameter. This doesn't allow
2552  * that. Your vma protection will have to be set up correctly, which
2553  * means that if you want a shared writable mapping, you'd better
2554  * ask for a shared writable mapping!
2555  *
2556  * The page does not need to be reserved.
2557  *
2558  * Usually this function is called from f_op->mmap() handler
2559  * under mm->mmap_lock write-lock, so it can change vma->vm_flags.
2560  * Caller must set VM_MIXEDMAP on vma if it wants to call this
2561  * function from other places, for example from page-fault handler.
2562  *
2563  * Return: %0 on success, negative error code otherwise.
2564  */
2565 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
2566 			struct page *page)
2567 {
2568 	if (addr < vma->vm_start || addr >= vma->vm_end)
2569 		return -EFAULT;
2570 	if (!(vma->vm_flags & VM_MIXEDMAP)) {
2571 		BUG_ON(mmap_read_trylock(vma->vm_mm));
2572 		BUG_ON(vma->vm_flags & VM_PFNMAP);
2573 		vm_flags_set(vma, VM_MIXEDMAP);
2574 	}
2575 	return insert_page(vma, addr, page, vma->vm_page_prot, false);
2576 }
2577 EXPORT_SYMBOL(vm_insert_page);
2578 
2579 /*
2580  * __vm_map_pages - maps range of kernel pages into user vma
2581  * @vma: user vma to map to
2582  * @pages: pointer to array of source kernel pages
2583  * @num: number of pages in page array
2584  * @offset: user's requested vm_pgoff
2585  *
2586  * This allows drivers to map range of kernel pages into a user vma.
2587  * The zeropage is supported in some VMAs, see
2588  * vm_mixed_zeropage_allowed().
2589  *
2590  * Return: 0 on success and error code otherwise.
2591  */
2592 static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2593 				unsigned long num, unsigned long offset)
2594 {
2595 	unsigned long count = vma_pages(vma);
2596 	unsigned long uaddr = vma->vm_start;
2597 
2598 	/* Fail if the user requested offset is beyond the end of the object */
2599 	if (offset >= num)
2600 		return -ENXIO;
2601 
2602 	/* Fail if the user requested size exceeds available object size */
2603 	if (count > num - offset)
2604 		return -ENXIO;
2605 
2606 	return vm_insert_pages(vma, uaddr, pages + offset, &count);
2607 }
2608 
2609 /**
2610  * vm_map_pages - maps range of kernel pages starts with non zero offset
2611  * @vma: user vma to map to
2612  * @pages: pointer to array of source kernel pages
2613  * @num: number of pages in page array
2614  *
2615  * Maps an object consisting of @num pages, catering for the user's
2616  * requested vm_pgoff
2617  *
2618  * If we fail to insert any page into the vma, the function will return
2619  * immediately leaving any previously inserted pages present.  Callers
2620  * from the mmap handler may immediately return the error as their caller
2621  * will destroy the vma, removing any successfully inserted pages. Other
2622  * callers should make their own arrangements for calling unmap_region().
2623  *
2624  * Context: Process context. Called by mmap handlers.
2625  * Return: 0 on success and error code otherwise.
2626  */
2627 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2628 				unsigned long num)
2629 {
2630 	return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
2631 }
2632 EXPORT_SYMBOL(vm_map_pages);
2633 
2634 /**
2635  * vm_map_pages_zero - map range of kernel pages starts with zero offset
2636  * @vma: user vma to map to
2637  * @pages: pointer to array of source kernel pages
2638  * @num: number of pages in page array
2639  *
2640  * Similar to vm_map_pages(), except that it explicitly sets the offset
2641  * to 0. This function is intended for the drivers that did not consider
2642  * vm_pgoff.
2643  *
2644  * Context: Process context. Called by mmap handlers.
2645  * Return: 0 on success and error code otherwise.
2646  */
2647 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
2648 				unsigned long num)
2649 {
2650 	return __vm_map_pages(vma, pages, num, 0);
2651 }
2652 EXPORT_SYMBOL(vm_map_pages_zero);
2653 
2654 static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2655 			unsigned long pfn, pgprot_t prot, bool mkwrite)
2656 {
2657 	struct mm_struct *mm = vma->vm_mm;
2658 	pte_t *pte, entry;
2659 	spinlock_t *ptl;
2660 
2661 	pte = get_locked_pte(mm, addr, &ptl);
2662 	if (!pte)
2663 		return VM_FAULT_OOM;
2664 	entry = ptep_get(pte);
2665 	if (!pte_none(entry)) {
2666 		if (mkwrite) {
2667 			/*
2668 			 * For read faults on private mappings the PFN passed
2669 			 * in may not match the PFN we have mapped if the
2670 			 * mapped PFN is a writeable COW page.  In the mkwrite
2671 			 * case we are creating a writable PTE for a shared
2672 			 * mapping and we expect the PFNs to match. If they
2673 			 * don't match, we are likely racing with block
2674 			 * allocation and mapping invalidation so just skip the
2675 			 * update.
2676 			 */
2677 			if (pte_pfn(entry) != pfn) {
2678 				WARN_ON_ONCE(!is_zero_pfn(pte_pfn(entry)));
2679 				goto out_unlock;
2680 			}
2681 			entry = pte_mkyoung(entry);
2682 			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2683 			if (ptep_set_access_flags(vma, addr, pte, entry, 1))
2684 				update_mmu_cache(vma, addr, pte);
2685 		}
2686 		goto out_unlock;
2687 	}
2688 
2689 	/* Ok, finally just insert the thing.. */
2690 	entry = pte_mkspecial(pfn_pte(pfn, prot));
2691 
2692 	if (mkwrite) {
2693 		entry = pte_mkyoung(entry);
2694 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2695 	}
2696 
2697 	set_pte_at(mm, addr, pte, entry);
2698 	update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
2699 
2700 out_unlock:
2701 	pte_unmap_unlock(pte, ptl);
2702 	return VM_FAULT_NOPAGE;
2703 }
2704 
2705 /**
2706  * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
2707  * @vma: user vma to map to
2708  * @addr: target user address of this page
2709  * @pfn: source kernel pfn
2710  * @pgprot: pgprot flags for the inserted page
2711  *
2712  * This is exactly like vmf_insert_pfn(), except that it allows drivers
2713  * to override pgprot on a per-page basis.
2714  *
2715  * This only makes sense for IO mappings, and it makes no sense for
2716  * COW mappings.  In general, using multiple vmas is preferable;
2717  * vmf_insert_pfn_prot should only be used if using multiple VMAs is
2718  * impractical.
2719  *
2720  * pgprot typically only differs from @vma->vm_page_prot when drivers set
2721  * caching- and encryption bits different than those of @vma->vm_page_prot,
2722  * because the caching- or encryption mode may not be known at mmap() time.
2723  *
2724  * This is ok as long as @vma->vm_page_prot is not used by the core vm
2725  * to set caching and encryption bits for those vmas (except for COW pages).
2726  * This is ensured by core vm only modifying these page table entries using
2727  * functions that don't touch caching- or encryption bits, using pte_modify()
2728  * if needed. (See for example mprotect()).
2729  *
2730  * Also when new page-table entries are created, this is only done using the
2731  * fault() callback, and never using the value of vma->vm_page_prot,
2732  * except for page-table entries that point to anonymous pages as the result
2733  * of COW.
2734  *
2735  * Context: Process context.  May allocate using %GFP_KERNEL.
2736  * Return: vm_fault_t value.
2737  */
2738 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2739 			unsigned long pfn, pgprot_t pgprot)
2740 {
2741 	/*
2742 	 * Technically, architectures with pte_special can avoid all these
2743 	 * restrictions (same for remap_pfn_range).  However we would like
2744 	 * consistency in testing and feature parity among all, so we should
2745 	 * try to keep these invariants in place for everybody.
2746 	 */
2747 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
2748 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
2749 						(VM_PFNMAP|VM_MIXEDMAP));
2750 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
2751 	BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
2752 
2753 	if (addr < vma->vm_start || addr >= vma->vm_end)
2754 		return VM_FAULT_SIGBUS;
2755 
2756 	if (!pfn_modify_allowed(pfn, pgprot))
2757 		return VM_FAULT_SIGBUS;
2758 
2759 	pfnmap_setup_cachemode_pfn(pfn, &pgprot);
2760 
2761 	return insert_pfn(vma, addr, pfn, pgprot, false);
2762 }
2763 EXPORT_SYMBOL(vmf_insert_pfn_prot);
2764 
2765 /**
2766  * vmf_insert_pfn - insert single pfn into user vma
2767  * @vma: user vma to map to
2768  * @addr: target user address of this page
2769  * @pfn: source kernel pfn
2770  *
2771  * Similar to vm_insert_page, this allows drivers to insert individual pages
2772  * they've allocated into a user vma. Same comments apply.
2773  *
2774  * This function should only be called from a vm_ops->fault handler, and
2775  * in that case the handler should return the result of this function.
2776  *
2777  * vma cannot be a COW mapping.
2778  *
2779  * As this is called only for pages that do not currently exist, we
2780  * do not need to flush old virtual caches or the TLB.
2781  *
2782  * Context: Process context.  May allocate using %GFP_KERNEL.
2783  * Return: vm_fault_t value.
2784  */
2785 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2786 			unsigned long pfn)
2787 {
2788 	return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
2789 }
2790 EXPORT_SYMBOL(vmf_insert_pfn);
2791 
2792 static bool vm_mixed_ok(struct vm_area_struct *vma, unsigned long pfn,
2793 			bool mkwrite)
2794 {
2795 	if (unlikely(is_zero_pfn(pfn)) &&
2796 	    (mkwrite || !vm_mixed_zeropage_allowed(vma)))
2797 		return false;
2798 	/* these checks mirror the abort conditions in vm_normal_page */
2799 	if (vma->vm_flags & VM_MIXEDMAP)
2800 		return true;
2801 	if (is_zero_pfn(pfn))
2802 		return true;
2803 	return false;
2804 }
2805 
2806 static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
2807 		unsigned long addr, unsigned long pfn, bool mkwrite)
2808 {
2809 	pgprot_t pgprot = vma->vm_page_prot;
2810 	int err;
2811 
2812 	if (!vm_mixed_ok(vma, pfn, mkwrite))
2813 		return VM_FAULT_SIGBUS;
2814 
2815 	if (addr < vma->vm_start || addr >= vma->vm_end)
2816 		return VM_FAULT_SIGBUS;
2817 
2818 	pfnmap_setup_cachemode_pfn(pfn, &pgprot);
2819 
2820 	if (!pfn_modify_allowed(pfn, pgprot))
2821 		return VM_FAULT_SIGBUS;
2822 
2823 	/*
2824 	 * If we don't have pte special, then we have to use the pfn_valid()
2825 	 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
2826 	 * refcount the page if pfn_valid is true (hence insert_page rather
2827 	 * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
2828 	 * without pte special, it would there be refcounted as a normal page.
2829 	 */
2830 	if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pfn_valid(pfn)) {
2831 		struct page *page;
2832 
2833 		/*
2834 		 * At this point we are committed to insert_page()
2835 		 * regardless of whether the caller specified flags that
2836 		 * result in pfn_t_has_page() == false.
2837 		 */
2838 		page = pfn_to_page(pfn);
2839 		err = insert_page(vma, addr, page, pgprot, mkwrite);
2840 	} else {
2841 		return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
2842 	}
2843 
2844 	if (err == -ENOMEM)
2845 		return VM_FAULT_OOM;
2846 	if (err < 0 && err != -EBUSY)
2847 		return VM_FAULT_SIGBUS;
2848 
2849 	return VM_FAULT_NOPAGE;
2850 }
2851 
2852 vm_fault_t vmf_insert_page_mkwrite(struct vm_fault *vmf, struct page *page,
2853 			bool write)
2854 {
2855 	pgprot_t pgprot = vmf->vma->vm_page_prot;
2856 	unsigned long addr = vmf->address;
2857 	int err;
2858 
2859 	if (addr < vmf->vma->vm_start || addr >= vmf->vma->vm_end)
2860 		return VM_FAULT_SIGBUS;
2861 
2862 	err = insert_page(vmf->vma, addr, page, pgprot, write);
2863 	if (err == -ENOMEM)
2864 		return VM_FAULT_OOM;
2865 	if (err < 0 && err != -EBUSY)
2866 		return VM_FAULT_SIGBUS;
2867 
2868 	return VM_FAULT_NOPAGE;
2869 }
2870 EXPORT_SYMBOL_GPL(vmf_insert_page_mkwrite);
2871 
2872 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2873 		unsigned long pfn)
2874 {
2875 	return __vm_insert_mixed(vma, addr, pfn, false);
2876 }
2877 EXPORT_SYMBOL(vmf_insert_mixed);
2878 
2879 /*
2880  *  If the insertion of PTE failed because someone else already added a
2881  *  different entry in the mean time, we treat that as success as we assume
2882  *  the same entry was actually inserted.
2883  */
2884 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2885 		unsigned long addr, unsigned long pfn)
2886 {
2887 	return __vm_insert_mixed(vma, addr, pfn, true);
2888 }
2889 
2890 /*
2891  * maps a range of physical memory into the requested pages. the old
2892  * mappings are removed. any references to nonexistent pages results
2893  * in null mappings (currently treated as "copy-on-access")
2894  */
2895 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
2896 			unsigned long addr, unsigned long end,
2897 			unsigned long pfn, pgprot_t prot)
2898 {
2899 	pte_t *pte, *mapped_pte;
2900 	spinlock_t *ptl;
2901 	int err = 0;
2902 
2903 	mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
2904 	if (!pte)
2905 		return -ENOMEM;
2906 	lazy_mmu_mode_enable();
2907 	do {
2908 		BUG_ON(!pte_none(ptep_get(pte)));
2909 		if (!pfn_modify_allowed(pfn, prot)) {
2910 			err = -EACCES;
2911 			break;
2912 		}
2913 		set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
2914 		pfn++;
2915 	} while (pte++, addr += PAGE_SIZE, addr != end);
2916 	lazy_mmu_mode_disable();
2917 	pte_unmap_unlock(mapped_pte, ptl);
2918 	return err;
2919 }
2920 
2921 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
2922 			unsigned long addr, unsigned long end,
2923 			unsigned long pfn, pgprot_t prot)
2924 {
2925 	pmd_t *pmd;
2926 	unsigned long next;
2927 	int err;
2928 
2929 	pfn -= addr >> PAGE_SHIFT;
2930 	pmd = pmd_alloc(mm, pud, addr);
2931 	if (!pmd)
2932 		return -ENOMEM;
2933 	VM_BUG_ON(pmd_trans_huge(*pmd));
2934 	do {
2935 		next = pmd_addr_end(addr, end);
2936 		err = remap_pte_range(mm, pmd, addr, next,
2937 				pfn + (addr >> PAGE_SHIFT), prot);
2938 		if (err)
2939 			return err;
2940 	} while (pmd++, addr = next, addr != end);
2941 	return 0;
2942 }
2943 
2944 static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
2945 			unsigned long addr, unsigned long end,
2946 			unsigned long pfn, pgprot_t prot)
2947 {
2948 	pud_t *pud;
2949 	unsigned long next;
2950 	int err;
2951 
2952 	pfn -= addr >> PAGE_SHIFT;
2953 	pud = pud_alloc(mm, p4d, addr);
2954 	if (!pud)
2955 		return -ENOMEM;
2956 	do {
2957 		next = pud_addr_end(addr, end);
2958 		err = remap_pmd_range(mm, pud, addr, next,
2959 				pfn + (addr >> PAGE_SHIFT), prot);
2960 		if (err)
2961 			return err;
2962 	} while (pud++, addr = next, addr != end);
2963 	return 0;
2964 }
2965 
2966 static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2967 			unsigned long addr, unsigned long end,
2968 			unsigned long pfn, pgprot_t prot)
2969 {
2970 	p4d_t *p4d;
2971 	unsigned long next;
2972 	int err;
2973 
2974 	pfn -= addr >> PAGE_SHIFT;
2975 	p4d = p4d_alloc(mm, pgd, addr);
2976 	if (!p4d)
2977 		return -ENOMEM;
2978 	do {
2979 		next = p4d_addr_end(addr, end);
2980 		err = remap_pud_range(mm, p4d, addr, next,
2981 				pfn + (addr >> PAGE_SHIFT), prot);
2982 		if (err)
2983 			return err;
2984 	} while (p4d++, addr = next, addr != end);
2985 	return 0;
2986 }
2987 
2988 static int get_remap_pgoff(bool is_cow, unsigned long addr,
2989 		unsigned long end, unsigned long vm_start, unsigned long vm_end,
2990 		unsigned long pfn, pgoff_t *vm_pgoff_p)
2991 {
2992 	/*
2993 	 * There's a horrible special case to handle copy-on-write
2994 	 * behaviour that some programs depend on. We mark the "original"
2995 	 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
2996 	 * See vm_normal_page() for details.
2997 	 */
2998 	if (is_cow) {
2999 		if (addr != vm_start || end != vm_end)
3000 			return -EINVAL;
3001 		*vm_pgoff_p = pfn;
3002 	}
3003 
3004 	return 0;
3005 }
3006 
3007 static int remap_pfn_range_internal(struct vm_area_struct *vma, unsigned long addr,
3008 		unsigned long pfn, unsigned long size, pgprot_t prot)
3009 {
3010 	pgd_t *pgd;
3011 	unsigned long next;
3012 	unsigned long end = addr + PAGE_ALIGN(size);
3013 	struct mm_struct *mm = vma->vm_mm;
3014 	int err;
3015 
3016 	if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
3017 		return -EINVAL;
3018 
3019 	VM_WARN_ON_ONCE(!vma_test_all_mask(vma, VMA_REMAP_FLAGS));
3020 
3021 	BUG_ON(addr >= end);
3022 	pfn -= addr >> PAGE_SHIFT;
3023 	pgd = pgd_offset(mm, addr);
3024 	flush_cache_range(vma, addr, end);
3025 	do {
3026 		next = pgd_addr_end(addr, end);
3027 		err = remap_p4d_range(mm, pgd, addr, next,
3028 				pfn + (addr >> PAGE_SHIFT), prot);
3029 		if (err)
3030 			return err;
3031 	} while (pgd++, addr = next, addr != end);
3032 
3033 	return 0;
3034 }
3035 
3036 /*
3037  * Variant of remap_pfn_range that does not call track_pfn_remap.  The caller
3038  * must have pre-validated the caching bits of the pgprot_t.
3039  */
3040 static int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
3041 		unsigned long pfn, unsigned long size, pgprot_t prot)
3042 {
3043 	int error = remap_pfn_range_internal(vma, addr, pfn, size, prot);
3044 
3045 	if (!error)
3046 		return 0;
3047 
3048 	/*
3049 	 * A partial pfn range mapping is dangerous: it does not
3050 	 * maintain page reference counts, and callers may free
3051 	 * pages due to the error. So zap it early.
3052 	 */
3053 	zap_vma_range(vma, addr, size);
3054 	return error;
3055 }
3056 
3057 #ifdef __HAVE_PFNMAP_TRACKING
3058 static inline struct pfnmap_track_ctx *pfnmap_track_ctx_alloc(unsigned long pfn,
3059 		unsigned long size, pgprot_t *prot)
3060 {
3061 	struct pfnmap_track_ctx *ctx;
3062 
3063 	if (pfnmap_track(pfn, size, prot))
3064 		return ERR_PTR(-EINVAL);
3065 
3066 	ctx = kmalloc_obj(*ctx);
3067 	if (unlikely(!ctx)) {
3068 		pfnmap_untrack(pfn, size);
3069 		return ERR_PTR(-ENOMEM);
3070 	}
3071 
3072 	ctx->pfn = pfn;
3073 	ctx->size = size;
3074 	kref_init(&ctx->kref);
3075 	return ctx;
3076 }
3077 
3078 void pfnmap_track_ctx_release(struct kref *ref)
3079 {
3080 	struct pfnmap_track_ctx *ctx = container_of(ref, struct pfnmap_track_ctx, kref);
3081 
3082 	pfnmap_untrack(ctx->pfn, ctx->size);
3083 	kfree(ctx);
3084 }
3085 
3086 static int remap_pfn_range_track(struct vm_area_struct *vma, unsigned long addr,
3087 		unsigned long pfn, unsigned long size, pgprot_t prot)
3088 {
3089 	struct pfnmap_track_ctx *ctx = NULL;
3090 	int err;
3091 
3092 	size = PAGE_ALIGN(size);
3093 
3094 	/*
3095 	 * If we cover the full VMA, we'll perform actual tracking, and
3096 	 * remember to untrack when the last reference to our tracking
3097 	 * context from a VMA goes away. We'll keep tracking the whole pfn
3098 	 * range even during VMA splits and partial unmapping.
3099 	 *
3100 	 * If we only cover parts of the VMA, we'll only setup the cachemode
3101 	 * in the pgprot for the pfn range.
3102 	 */
3103 	if (addr == vma->vm_start && addr + size == vma->vm_end) {
3104 		if (vma->pfnmap_track_ctx)
3105 			return -EINVAL;
3106 		ctx = pfnmap_track_ctx_alloc(pfn, size, &prot);
3107 		if (IS_ERR(ctx))
3108 			return PTR_ERR(ctx);
3109 	} else if (pfnmap_setup_cachemode(pfn, size, &prot)) {
3110 		return -EINVAL;
3111 	}
3112 
3113 	err = remap_pfn_range_notrack(vma, addr, pfn, size, prot);
3114 	if (ctx) {
3115 		if (err)
3116 			kref_put(&ctx->kref, pfnmap_track_ctx_release);
3117 		else
3118 			vma->pfnmap_track_ctx = ctx;
3119 	}
3120 	return err;
3121 }
3122 
3123 static int do_remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
3124 		unsigned long pfn, unsigned long size, pgprot_t prot)
3125 {
3126 	return remap_pfn_range_track(vma, addr, pfn, size, prot);
3127 }
3128 #else
3129 static int do_remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
3130 		unsigned long pfn, unsigned long size, pgprot_t prot)
3131 {
3132 	return remap_pfn_range_notrack(vma, addr, pfn, size, prot);
3133 }
3134 #endif
3135 
3136 int remap_pfn_range_prepare(struct vm_area_desc *desc)
3137 {
3138 	const struct mmap_action *action = &desc->action;
3139 	const unsigned long start = action->remap.start;
3140 	const unsigned long end = start + action->remap.size;
3141 	const unsigned long pfn = action->remap.start_pfn;
3142 	const bool is_cow = vma_desc_is_cow_mapping(desc);
3143 	int err;
3144 
3145 	err = get_remap_pgoff(is_cow, start, end, desc->start, desc->end, pfn,
3146 			      &desc->pgoff);
3147 	if (err)
3148 		return err;
3149 
3150 	vma_desc_set_flags_mask(desc, VMA_REMAP_FLAGS);
3151 	return 0;
3152 }
3153 
3154 static int remap_pfn_range_prepare_vma(struct vm_area_struct *vma,
3155 				       unsigned long addr, unsigned long pfn,
3156 				       unsigned long size)
3157 {
3158 	const unsigned long end = addr + PAGE_ALIGN(size);
3159 	const bool is_cow = is_cow_mapping(vma->vm_flags);
3160 	int err;
3161 
3162 	err = get_remap_pgoff(is_cow, addr, end, vma->vm_start, vma->vm_end,
3163 			      pfn, &vma->vm_pgoff);
3164 	if (err)
3165 		return err;
3166 
3167 	vma_set_flags_mask(vma, VMA_REMAP_FLAGS);
3168 	return 0;
3169 }
3170 
3171 /**
3172  * remap_pfn_range - remap kernel memory to userspace
3173  * @vma: user vma to map to
3174  * @addr: target page aligned user address to start at
3175  * @pfn: page frame number of kernel physical memory address
3176  * @size: size of mapping area
3177  * @prot: page protection flags for this mapping
3178  *
3179  * Note: this is only safe if the mm semaphore is held when called.
3180  *
3181  * Return: %0 on success, negative error code otherwise.
3182  */
3183 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
3184 		    unsigned long pfn, unsigned long size, pgprot_t prot)
3185 {
3186 	int err;
3187 
3188 	err = remap_pfn_range_prepare_vma(vma, addr, pfn, size);
3189 	if (err)
3190 		return err;
3191 
3192 	return do_remap_pfn_range(vma, addr, pfn, size, prot);
3193 }
3194 EXPORT_SYMBOL(remap_pfn_range);
3195 
3196 int remap_pfn_range_complete(struct vm_area_struct *vma,
3197 			     struct mmap_action *action)
3198 {
3199 	const unsigned long start = action->remap.start;
3200 	const unsigned long pfn = action->remap.start_pfn;
3201 	const unsigned long size = action->remap.size;
3202 	const pgprot_t prot = action->remap.pgprot;
3203 
3204 	return do_remap_pfn_range(vma, start, pfn, size, prot);
3205 }
3206 
3207 static int __simple_ioremap_prep(unsigned long vm_len, pgoff_t vm_pgoff,
3208 				 phys_addr_t start_phys, unsigned long size,
3209 				 unsigned long *pfnp)
3210 {
3211 	unsigned long pfn, pages;
3212 
3213 	/* Check that the physical memory area passed in looks valid */
3214 	if (start_phys + size < start_phys)
3215 		return -EINVAL;
3216 	/*
3217 	 * You *really* shouldn't map things that aren't page-aligned,
3218 	 * but we've historically allowed it because IO memory might
3219 	 * just have smaller alignment.
3220 	 */
3221 	size += start_phys & ~PAGE_MASK;
3222 	pfn = start_phys >> PAGE_SHIFT;
3223 	pages = (size + ~PAGE_MASK) >> PAGE_SHIFT;
3224 	if (pfn + pages < pfn)
3225 		return -EINVAL;
3226 
3227 	/* We start the mapping 'vm_pgoff' pages into the area */
3228 	if (vm_pgoff > pages)
3229 		return -EINVAL;
3230 	pfn += vm_pgoff;
3231 	pages -= vm_pgoff;
3232 
3233 	/* Can we fit all of the mapping? */
3234 	if ((vm_len >> PAGE_SHIFT) > pages)
3235 		return -EINVAL;
3236 
3237 	*pfnp = pfn;
3238 	return 0;
3239 }
3240 
3241 int simple_ioremap_prepare(struct vm_area_desc *desc)
3242 {
3243 	struct mmap_action *action = &desc->action;
3244 	const phys_addr_t start = action->simple_ioremap.start_phys_addr;
3245 	const unsigned long size = action->simple_ioremap.size;
3246 	unsigned long pfn;
3247 	int err;
3248 
3249 	err = __simple_ioremap_prep(vma_desc_size(desc), desc->pgoff,
3250 				    start, size, &pfn);
3251 	if (err)
3252 		return err;
3253 
3254 	/* The I/O remap logic does the heavy lifting. */
3255 	mmap_action_ioremap_full(desc, pfn);
3256 	return io_remap_pfn_range_prepare(desc);
3257 }
3258 
3259 /**
3260  * vm_iomap_memory - remap memory to userspace
3261  * @vma: user vma to map to
3262  * @start: start of the physical memory to be mapped
3263  * @len: size of area
3264  *
3265  * This is a simplified io_remap_pfn_range() for common driver use. The
3266  * driver just needs to give us the physical memory range to be mapped,
3267  * we'll figure out the rest from the vma information.
3268  *
3269  * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
3270  * whatever write-combining details or similar.
3271  *
3272  * Return: %0 on success, negative error code otherwise.
3273  */
3274 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
3275 {
3276 	const unsigned long vm_start = vma->vm_start;
3277 	const unsigned long vm_end = vma->vm_end;
3278 	const unsigned long vm_len = vm_end - vm_start;
3279 	unsigned long pfn;
3280 	int err;
3281 
3282 	err = __simple_ioremap_prep(vm_len, vma->vm_pgoff, start, len, &pfn);
3283 	if (err)
3284 		return err;
3285 
3286 	/* Ok, let it rip */
3287 	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
3288 }
3289 EXPORT_SYMBOL(vm_iomap_memory);
3290 
3291 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
3292 				     unsigned long addr, unsigned long end,
3293 				     pte_fn_t fn, void *data, bool create,
3294 				     pgtbl_mod_mask *mask)
3295 {
3296 	pte_t *pte, *mapped_pte;
3297 	int err = 0;
3298 	spinlock_t *ptl;
3299 
3300 	if (create) {
3301 		mapped_pte = pte = (mm == &init_mm) ?
3302 			pte_alloc_kernel_track(pmd, addr, mask) :
3303 			pte_alloc_map_lock(mm, pmd, addr, &ptl);
3304 		if (!pte)
3305 			return -ENOMEM;
3306 	} else {
3307 		mapped_pte = pte = (mm == &init_mm) ?
3308 			pte_offset_kernel(pmd, addr) :
3309 			pte_offset_map_lock(mm, pmd, addr, &ptl);
3310 		if (!pte)
3311 			return -EINVAL;
3312 	}
3313 
3314 	lazy_mmu_mode_enable();
3315 
3316 	if (fn) {
3317 		do {
3318 			if (create || !pte_none(ptep_get(pte))) {
3319 				err = fn(pte, addr, data);
3320 				if (err)
3321 					break;
3322 			}
3323 		} while (pte++, addr += PAGE_SIZE, addr != end);
3324 	}
3325 	*mask |= PGTBL_PTE_MODIFIED;
3326 
3327 	lazy_mmu_mode_disable();
3328 
3329 	if (mm != &init_mm)
3330 		pte_unmap_unlock(mapped_pte, ptl);
3331 	return err;
3332 }
3333 
3334 static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
3335 				     unsigned long addr, unsigned long end,
3336 				     pte_fn_t fn, void *data, bool create,
3337 				     pgtbl_mod_mask *mask)
3338 {
3339 	pmd_t *pmd;
3340 	unsigned long next;
3341 	int err = 0;
3342 
3343 	BUG_ON(pud_leaf(*pud));
3344 
3345 	if (create) {
3346 		pmd = pmd_alloc_track(mm, pud, addr, mask);
3347 		if (!pmd)
3348 			return -ENOMEM;
3349 	} else {
3350 		pmd = pmd_offset(pud, addr);
3351 	}
3352 	do {
3353 		next = pmd_addr_end(addr, end);
3354 		if (pmd_none(*pmd) && !create)
3355 			continue;
3356 		if (WARN_ON_ONCE(pmd_leaf(*pmd)))
3357 			return -EINVAL;
3358 		if (!pmd_none(*pmd) && WARN_ON_ONCE(pmd_bad(*pmd))) {
3359 			if (!create)
3360 				continue;
3361 			pmd_clear_bad(pmd);
3362 		}
3363 		err = apply_to_pte_range(mm, pmd, addr, next,
3364 					 fn, data, create, mask);
3365 		if (err)
3366 			break;
3367 	} while (pmd++, addr = next, addr != end);
3368 
3369 	return err;
3370 }
3371 
3372 static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
3373 				     unsigned long addr, unsigned long end,
3374 				     pte_fn_t fn, void *data, bool create,
3375 				     pgtbl_mod_mask *mask)
3376 {
3377 	pud_t *pud;
3378 	unsigned long next;
3379 	int err = 0;
3380 
3381 	if (create) {
3382 		pud = pud_alloc_track(mm, p4d, addr, mask);
3383 		if (!pud)
3384 			return -ENOMEM;
3385 	} else {
3386 		pud = pud_offset(p4d, addr);
3387 	}
3388 	do {
3389 		next = pud_addr_end(addr, end);
3390 		if (pud_none(*pud) && !create)
3391 			continue;
3392 		if (WARN_ON_ONCE(pud_leaf(*pud)))
3393 			return -EINVAL;
3394 		if (!pud_none(*pud) && WARN_ON_ONCE(pud_bad(*pud))) {
3395 			if (!create)
3396 				continue;
3397 			pud_clear_bad(pud);
3398 		}
3399 		err = apply_to_pmd_range(mm, pud, addr, next,
3400 					 fn, data, create, mask);
3401 		if (err)
3402 			break;
3403 	} while (pud++, addr = next, addr != end);
3404 
3405 	return err;
3406 }
3407 
3408 static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
3409 				     unsigned long addr, unsigned long end,
3410 				     pte_fn_t fn, void *data, bool create,
3411 				     pgtbl_mod_mask *mask)
3412 {
3413 	p4d_t *p4d;
3414 	unsigned long next;
3415 	int err = 0;
3416 
3417 	if (create) {
3418 		p4d = p4d_alloc_track(mm, pgd, addr, mask);
3419 		if (!p4d)
3420 			return -ENOMEM;
3421 	} else {
3422 		p4d = p4d_offset(pgd, addr);
3423 	}
3424 	do {
3425 		next = p4d_addr_end(addr, end);
3426 		if (p4d_none(*p4d) && !create)
3427 			continue;
3428 		if (WARN_ON_ONCE(p4d_leaf(*p4d)))
3429 			return -EINVAL;
3430 		if (!p4d_none(*p4d) && WARN_ON_ONCE(p4d_bad(*p4d))) {
3431 			if (!create)
3432 				continue;
3433 			p4d_clear_bad(p4d);
3434 		}
3435 		err = apply_to_pud_range(mm, p4d, addr, next,
3436 					 fn, data, create, mask);
3437 		if (err)
3438 			break;
3439 	} while (p4d++, addr = next, addr != end);
3440 
3441 	return err;
3442 }
3443 
3444 static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
3445 				 unsigned long size, pte_fn_t fn,
3446 				 void *data, bool create)
3447 {
3448 	pgd_t *pgd;
3449 	unsigned long start = addr, next;
3450 	unsigned long end = addr + size;
3451 	pgtbl_mod_mask mask = 0;
3452 	int err = 0;
3453 
3454 	if (WARN_ON(addr >= end))
3455 		return -EINVAL;
3456 
3457 	pgd = pgd_offset(mm, addr);
3458 	do {
3459 		next = pgd_addr_end(addr, end);
3460 		if (pgd_none(*pgd) && !create)
3461 			continue;
3462 		if (WARN_ON_ONCE(pgd_leaf(*pgd))) {
3463 			err = -EINVAL;
3464 			break;
3465 		}
3466 		if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) {
3467 			if (!create)
3468 				continue;
3469 			pgd_clear_bad(pgd);
3470 		}
3471 		err = apply_to_p4d_range(mm, pgd, addr, next,
3472 					 fn, data, create, &mask);
3473 		if (err)
3474 			break;
3475 	} while (pgd++, addr = next, addr != end);
3476 
3477 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
3478 		arch_sync_kernel_mappings(start, start + size);
3479 
3480 	return err;
3481 }
3482 
3483 /*
3484  * Scan a region of virtual memory, filling in page tables as necessary
3485  * and calling a provided function on each leaf page table.
3486  */
3487 int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
3488 			unsigned long size, pte_fn_t fn, void *data)
3489 {
3490 	return __apply_to_page_range(mm, addr, size, fn, data, true);
3491 }
3492 EXPORT_SYMBOL_GPL(apply_to_page_range);
3493 
3494 /*
3495  * Scan a region of virtual memory, calling a provided function on
3496  * each leaf page table where it exists.
3497  *
3498  * Unlike apply_to_page_range, this does _not_ fill in page tables
3499  * where they are absent.
3500  */
3501 int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
3502 				 unsigned long size, pte_fn_t fn, void *data)
3503 {
3504 	return __apply_to_page_range(mm, addr, size, fn, data, false);
3505 }
3506 
3507 /*
3508  * handle_pte_fault chooses page fault handler according to an entry which was
3509  * read non-atomically.  Before making any commitment, on those architectures
3510  * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
3511  * parts, do_swap_page must check under lock before unmapping the pte and
3512  * proceeding (but do_wp_page is only called after already making such a check;
3513  * and do_anonymous_page can safely check later on).
3514  */
3515 static inline int pte_unmap_same(struct vm_fault *vmf)
3516 {
3517 	int same = 1;
3518 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION)
3519 	if (sizeof(pte_t) > sizeof(unsigned long)) {
3520 		spin_lock(vmf->ptl);
3521 		same = pte_same(ptep_get(vmf->pte), vmf->orig_pte);
3522 		spin_unlock(vmf->ptl);
3523 	}
3524 #endif
3525 	pte_unmap(vmf->pte);
3526 	vmf->pte = NULL;
3527 	return same;
3528 }
3529 
3530 /*
3531  * Return:
3532  *	0:		copied succeeded
3533  *	-EHWPOISON:	copy failed due to hwpoison in source page
3534  *	-EAGAIN:	copied failed (some other reason)
3535  */
3536 static inline int __wp_page_copy_user(struct page *dst, struct page *src,
3537 				      struct vm_fault *vmf)
3538 {
3539 	int ret;
3540 	void *kaddr;
3541 	void __user *uaddr;
3542 	struct vm_area_struct *vma = vmf->vma;
3543 	struct mm_struct *mm = vma->vm_mm;
3544 	unsigned long addr = vmf->address;
3545 
3546 	if (likely(src)) {
3547 		if (copy_mc_user_highpage(dst, src, addr, vma))
3548 			return -EHWPOISON;
3549 		return 0;
3550 	}
3551 
3552 	/*
3553 	 * If the source page was a PFN mapping, we don't have
3554 	 * a "struct page" for it. We do a best-effort copy by
3555 	 * just copying from the original user address. If that
3556 	 * fails, we just zero-fill it. Live with it.
3557 	 */
3558 	kaddr = kmap_local_page(dst);
3559 	pagefault_disable();
3560 	uaddr = (void __user *)(addr & PAGE_MASK);
3561 
3562 	/*
3563 	 * On architectures with software "accessed" bits, we would
3564 	 * take a double page fault, so mark it accessed here.
3565 	 */
3566 	vmf->pte = NULL;
3567 	if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) {
3568 		pte_t entry;
3569 
3570 		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
3571 		if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
3572 			/*
3573 			 * Other thread has already handled the fault
3574 			 * and update local tlb only
3575 			 */
3576 			if (vmf->pte)
3577 				update_mmu_tlb(vma, addr, vmf->pte);
3578 			ret = -EAGAIN;
3579 			goto pte_unlock;
3580 		}
3581 
3582 		entry = pte_mkyoung(vmf->orig_pte);
3583 		if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
3584 			update_mmu_cache_range(vmf, vma, addr, vmf->pte, 1);
3585 	}
3586 
3587 	/*
3588 	 * This really shouldn't fail, because the page is there
3589 	 * in the page tables. But it might just be unreadable,
3590 	 * in which case we just give up and fill the result with
3591 	 * zeroes.
3592 	 */
3593 	if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
3594 		if (vmf->pte)
3595 			goto warn;
3596 
3597 		/* Re-validate under PTL if the page is still mapped */
3598 		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
3599 		if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
3600 			/* The PTE changed under us, update local tlb */
3601 			if (vmf->pte)
3602 				update_mmu_tlb(vma, addr, vmf->pte);
3603 			ret = -EAGAIN;
3604 			goto pte_unlock;
3605 		}
3606 
3607 		/*
3608 		 * The same page can be mapped back since last copy attempt.
3609 		 * Try to copy again under PTL.
3610 		 */
3611 		if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
3612 			/*
3613 			 * Give a warn in case there can be some obscure
3614 			 * use-case
3615 			 */
3616 warn:
3617 			WARN_ON_ONCE(1);
3618 			clear_page(kaddr);
3619 		}
3620 	}
3621 
3622 	ret = 0;
3623 
3624 pte_unlock:
3625 	if (vmf->pte)
3626 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3627 	pagefault_enable();
3628 	kunmap_local(kaddr);
3629 	flush_dcache_page(dst);
3630 
3631 	return ret;
3632 }
3633 
3634 static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
3635 {
3636 	struct file *vm_file = vma->vm_file;
3637 
3638 	if (vm_file)
3639 		return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
3640 
3641 	/*
3642 	 * Special mappings (e.g. VDSO) do not have any file so fake
3643 	 * a default GFP_KERNEL for them.
3644 	 */
3645 	return GFP_KERNEL;
3646 }
3647 
3648 /*
3649  * Notify the address space that the page is about to become writable so that
3650  * it can prohibit this or wait for the page to get into an appropriate state.
3651  *
3652  * We do this without the lock held, so that it can sleep if it needs to.
3653  */
3654 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf, struct folio *folio)
3655 {
3656 	vm_fault_t ret;
3657 	unsigned int old_flags = vmf->flags;
3658 
3659 	vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
3660 
3661 	if (vmf->vma->vm_file &&
3662 	    IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
3663 		return VM_FAULT_SIGBUS;
3664 
3665 	ret = vmf->vma->vm_ops->page_mkwrite(vmf);
3666 	/* Restore original flags so that caller is not surprised */
3667 	vmf->flags = old_flags;
3668 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
3669 		return ret;
3670 	if (unlikely(!(ret & VM_FAULT_LOCKED))) {
3671 		folio_lock(folio);
3672 		if (!folio->mapping) {
3673 			folio_unlock(folio);
3674 			return 0; /* retry */
3675 		}
3676 		ret |= VM_FAULT_LOCKED;
3677 	} else
3678 		VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
3679 	return ret;
3680 }
3681 
3682 /*
3683  * Handle dirtying of a page in shared file mapping on a write fault.
3684  *
3685  * The function expects the page to be locked and unlocks it.
3686  */
3687 static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
3688 {
3689 	struct vm_area_struct *vma = vmf->vma;
3690 	struct address_space *mapping;
3691 	struct folio *folio = page_folio(vmf->page);
3692 	bool dirtied;
3693 	bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
3694 
3695 	dirtied = folio_mark_dirty(folio);
3696 	VM_BUG_ON_FOLIO(folio_test_anon(folio), folio);
3697 	/*
3698 	 * Take a local copy of the address_space - folio.mapping may be zeroed
3699 	 * by truncate after folio_unlock().   The address_space itself remains
3700 	 * pinned by vma->vm_file's reference.  We rely on folio_unlock()'s
3701 	 * release semantics to prevent the compiler from undoing this copying.
3702 	 */
3703 	mapping = folio_raw_mapping(folio);
3704 	folio_unlock(folio);
3705 
3706 	if (!page_mkwrite)
3707 		file_update_time(vma->vm_file);
3708 
3709 	/*
3710 	 * Throttle page dirtying rate down to writeback speed.
3711 	 *
3712 	 * mapping may be NULL here because some device drivers do not
3713 	 * set page.mapping but still dirty their pages
3714 	 *
3715 	 * Drop the mmap_lock before waiting on IO, if we can. The file
3716 	 * is pinning the mapping, as per above.
3717 	 */
3718 	if ((dirtied || page_mkwrite) && mapping) {
3719 		struct file *fpin;
3720 
3721 		fpin = maybe_unlock_mmap_for_io(vmf, NULL);
3722 		balance_dirty_pages_ratelimited(mapping);
3723 		if (fpin) {
3724 			fput(fpin);
3725 			return VM_FAULT_COMPLETED;
3726 		}
3727 	}
3728 
3729 	return 0;
3730 }
3731 
3732 /*
3733  * Handle write page faults for pages that can be reused in the current vma
3734  *
3735  * This can happen either due to the mapping being with the VM_SHARED flag,
3736  * or due to us being the last reference standing to the page. In either
3737  * case, all we need to do here is to mark the page as writable and update
3738  * any related book-keeping.
3739  */
3740 static inline void wp_page_reuse(struct vm_fault *vmf, struct folio *folio)
3741 	__releases(vmf->ptl)
3742 {
3743 	struct vm_area_struct *vma = vmf->vma;
3744 	pte_t entry;
3745 
3746 	VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE));
3747 	VM_WARN_ON(is_zero_pfn(pte_pfn(vmf->orig_pte)));
3748 
3749 	if (folio) {
3750 		VM_BUG_ON(folio_test_anon(folio) &&
3751 			  !PageAnonExclusive(vmf->page));
3752 		/*
3753 		 * Clear the folio's cpupid information as the existing
3754 		 * information potentially belongs to a now completely
3755 		 * unrelated process.
3756 		 */
3757 		folio_xchg_last_cpupid(folio, (1 << LAST_CPUPID_SHIFT) - 1);
3758 	}
3759 
3760 	flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3761 	entry = pte_mkyoung(vmf->orig_pte);
3762 	entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3763 	if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
3764 		update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
3765 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3766 	count_vm_event(PGREUSE);
3767 }
3768 
3769 /*
3770  * We could add a bitflag somewhere, but for now, we know that all
3771  * vm_ops that have a ->map_pages have been audited and don't need
3772  * the mmap_lock to be held.
3773  */
3774 static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf)
3775 {
3776 	struct vm_area_struct *vma = vmf->vma;
3777 
3778 	if (vma->vm_ops->map_pages || !(vmf->flags & FAULT_FLAG_VMA_LOCK))
3779 		return 0;
3780 	vma_end_read(vma);
3781 	return VM_FAULT_RETRY;
3782 }
3783 
3784 /**
3785  * __vmf_anon_prepare - Prepare to handle an anonymous fault.
3786  * @vmf: The vm_fault descriptor passed from the fault handler.
3787  *
3788  * When preparing to insert an anonymous page into a VMA from a
3789  * fault handler, call this function rather than anon_vma_prepare().
3790  * If this vma does not already have an associated anon_vma and we are
3791  * only protected by the per-VMA lock, the caller must retry with the
3792  * mmap_lock held.  __anon_vma_prepare() will look at adjacent VMAs to
3793  * determine if this VMA can share its anon_vma, and that's not safe to
3794  * do with only the per-VMA lock held for this VMA.
3795  *
3796  * Return: 0 if fault handling can proceed.  Any other value should be
3797  * returned to the caller.
3798  */
3799 vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf)
3800 {
3801 	struct vm_area_struct *vma = vmf->vma;
3802 	vm_fault_t ret = 0;
3803 
3804 	if (likely(vma->anon_vma))
3805 		return 0;
3806 	if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
3807 		if (!mmap_read_trylock(vma->vm_mm))
3808 			return VM_FAULT_RETRY;
3809 	}
3810 	if (__anon_vma_prepare(vma))
3811 		ret = VM_FAULT_OOM;
3812 	if (vmf->flags & FAULT_FLAG_VMA_LOCK)
3813 		mmap_read_unlock(vma->vm_mm);
3814 	return ret;
3815 }
3816 
3817 /*
3818  * Handle the case of a page which we actually need to copy to a new page,
3819  * either due to COW or unsharing.
3820  *
3821  * Called with mmap_lock locked and the old page referenced, but
3822  * without the ptl held.
3823  *
3824  * High level logic flow:
3825  *
3826  * - Allocate a page, copy the content of the old page to the new one.
3827  * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
3828  * - Take the PTL. If the pte changed, bail out and release the allocated page
3829  * - If the pte is still the way we remember it, update the page table and all
3830  *   relevant references. This includes dropping the reference the page-table
3831  *   held to the old page, as well as updating the rmap.
3832  * - In any case, unlock the PTL and drop the reference we took to the old page.
3833  */
3834 static vm_fault_t wp_page_copy(struct vm_fault *vmf)
3835 {
3836 	const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
3837 	struct vm_area_struct *vma = vmf->vma;
3838 	struct mm_struct *mm = vma->vm_mm;
3839 	struct folio *old_folio = NULL;
3840 	struct folio *new_folio = NULL;
3841 	pte_t entry;
3842 	int page_copied = 0;
3843 	struct mmu_notifier_range range;
3844 	vm_fault_t ret;
3845 	bool pfn_is_zero;
3846 
3847 	delayacct_wpcopy_start();
3848 
3849 	if (vmf->page)
3850 		old_folio = page_folio(vmf->page);
3851 	ret = vmf_anon_prepare(vmf);
3852 	if (unlikely(ret))
3853 		goto out;
3854 
3855 	pfn_is_zero = is_zero_pfn(pte_pfn(vmf->orig_pte));
3856 	new_folio = folio_prealloc(mm, vma, vmf->address, pfn_is_zero);
3857 	if (!new_folio)
3858 		goto oom;
3859 
3860 	if (!pfn_is_zero) {
3861 		int err;
3862 
3863 		err = __wp_page_copy_user(&new_folio->page, vmf->page, vmf);
3864 		if (err) {
3865 			/*
3866 			 * COW failed, if the fault was solved by other,
3867 			 * it's fine. If not, userspace would re-fault on
3868 			 * the same address and we will handle the fault
3869 			 * from the second attempt.
3870 			 * The -EHWPOISON case will not be retried.
3871 			 */
3872 			folio_put(new_folio);
3873 			if (old_folio)
3874 				folio_put(old_folio);
3875 
3876 			delayacct_wpcopy_end();
3877 			return err == -EHWPOISON ? VM_FAULT_HWPOISON : 0;
3878 		}
3879 		kmsan_copy_page_meta(&new_folio->page, vmf->page);
3880 	}
3881 
3882 	__folio_mark_uptodate(new_folio);
3883 
3884 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
3885 				vmf->address & PAGE_MASK,
3886 				(vmf->address & PAGE_MASK) + PAGE_SIZE);
3887 	mmu_notifier_invalidate_range_start(&range);
3888 
3889 	/*
3890 	 * Re-check the pte - we dropped the lock
3891 	 */
3892 	vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
3893 	if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
3894 		if (old_folio) {
3895 			if (!folio_test_anon(old_folio)) {
3896 				dec_mm_counter(mm, mm_counter_file(old_folio));
3897 				inc_mm_counter(mm, MM_ANONPAGES);
3898 			}
3899 		} else {
3900 			ksm_might_unmap_zero_page(mm, vmf->orig_pte);
3901 			inc_mm_counter(mm, MM_ANONPAGES);
3902 		}
3903 		flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3904 		entry = folio_mk_pte(new_folio, vma->vm_page_prot);
3905 		entry = pte_sw_mkyoung(entry);
3906 		if (unlikely(unshare)) {
3907 			if (pte_soft_dirty(vmf->orig_pte))
3908 				entry = pte_mksoft_dirty(entry);
3909 			if (pte_uffd_wp(vmf->orig_pte))
3910 				entry = pte_mkuffd_wp(entry);
3911 		} else {
3912 			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3913 		}
3914 
3915 		/*
3916 		 * Clear the pte entry and flush it first, before updating the
3917 		 * pte with the new entry, to keep TLBs on different CPUs in
3918 		 * sync. This code used to set the new PTE then flush TLBs, but
3919 		 * that left a window where the new PTE could be loaded into
3920 		 * some TLBs while the old PTE remains in others.
3921 		 */
3922 		ptep_clear_flush(vma, vmf->address, vmf->pte);
3923 		folio_add_new_anon_rmap(new_folio, vma, vmf->address, RMAP_EXCLUSIVE);
3924 		folio_add_lru_vma(new_folio, vma);
3925 		BUG_ON(unshare && pte_write(entry));
3926 		set_pte_at(mm, vmf->address, vmf->pte, entry);
3927 		update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
3928 		if (old_folio) {
3929 			/*
3930 			 * Only after switching the pte to the new page may
3931 			 * we remove the mapcount here. Otherwise another
3932 			 * process may come and find the rmap count decremented
3933 			 * before the pte is switched to the new page, and
3934 			 * "reuse" the old page writing into it while our pte
3935 			 * here still points into it and can be read by other
3936 			 * threads.
3937 			 *
3938 			 * The critical issue is to order this
3939 			 * folio_remove_rmap_pte() with the ptp_clear_flush
3940 			 * above. Those stores are ordered by (if nothing else,)
3941 			 * the barrier present in the atomic_add_negative
3942 			 * in folio_remove_rmap_pte();
3943 			 *
3944 			 * Then the TLB flush in ptep_clear_flush ensures that
3945 			 * no process can access the old page before the
3946 			 * decremented mapcount is visible. And the old page
3947 			 * cannot be reused until after the decremented
3948 			 * mapcount is visible. So transitively, TLBs to
3949 			 * old page will be flushed before it can be reused.
3950 			 */
3951 			folio_remove_rmap_pte(old_folio, vmf->page, vma);
3952 		}
3953 
3954 		/* Free the old page.. */
3955 		new_folio = old_folio;
3956 		page_copied = 1;
3957 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3958 	} else if (vmf->pte) {
3959 		update_mmu_tlb(vma, vmf->address, vmf->pte);
3960 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3961 	}
3962 
3963 	mmu_notifier_invalidate_range_end(&range);
3964 
3965 	if (new_folio)
3966 		folio_put(new_folio);
3967 	if (old_folio) {
3968 		if (page_copied)
3969 			free_swap_cache(old_folio);
3970 		folio_put(old_folio);
3971 	}
3972 
3973 	delayacct_wpcopy_end();
3974 	return 0;
3975 oom:
3976 	ret = VM_FAULT_OOM;
3977 out:
3978 	if (old_folio)
3979 		folio_put(old_folio);
3980 
3981 	delayacct_wpcopy_end();
3982 	return ret;
3983 }
3984 
3985 /**
3986  * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
3987  *			  writeable once the page is prepared
3988  *
3989  * @vmf: structure describing the fault
3990  * @folio: the folio of vmf->page
3991  *
3992  * This function handles all that is needed to finish a write page fault in a
3993  * shared mapping due to PTE being read-only once the mapped page is prepared.
3994  * It handles locking of PTE and modifying it.
3995  *
3996  * The function expects the page to be locked or other protection against
3997  * concurrent faults / writeback (such as DAX radix tree locks).
3998  *
3999  * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before
4000  * we acquired PTE lock.
4001  */
4002 static vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf, struct folio *folio)
4003 {
4004 	WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
4005 	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
4006 				       &vmf->ptl);
4007 	if (!vmf->pte)
4008 		return VM_FAULT_NOPAGE;
4009 	/*
4010 	 * We might have raced with another page fault while we released the
4011 	 * pte_offset_map_lock.
4012 	 */
4013 	if (!pte_same(ptep_get(vmf->pte), vmf->orig_pte)) {
4014 		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
4015 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4016 		return VM_FAULT_NOPAGE;
4017 	}
4018 	wp_page_reuse(vmf, folio);
4019 	return 0;
4020 }
4021 
4022 /*
4023  * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
4024  * mapping
4025  */
4026 static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
4027 {
4028 	struct vm_area_struct *vma = vmf->vma;
4029 
4030 	if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
4031 		vm_fault_t ret;
4032 
4033 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4034 		ret = vmf_can_call_fault(vmf);
4035 		if (ret)
4036 			return ret;
4037 
4038 		vmf->flags |= FAULT_FLAG_MKWRITE;
4039 		ret = vma->vm_ops->pfn_mkwrite(vmf);
4040 		if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
4041 			return ret;
4042 		return finish_mkwrite_fault(vmf, NULL);
4043 	}
4044 	wp_page_reuse(vmf, NULL);
4045 	return 0;
4046 }
4047 
4048 static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio)
4049 	__releases(vmf->ptl)
4050 {
4051 	struct vm_area_struct *vma = vmf->vma;
4052 	vm_fault_t ret = 0;
4053 
4054 	folio_get(folio);
4055 
4056 	if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
4057 		vm_fault_t tmp;
4058 
4059 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4060 		tmp = vmf_can_call_fault(vmf);
4061 		if (tmp) {
4062 			folio_put(folio);
4063 			return tmp;
4064 		}
4065 
4066 		tmp = do_page_mkwrite(vmf, folio);
4067 		if (unlikely(!tmp || (tmp &
4068 				      (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
4069 			folio_put(folio);
4070 			return tmp;
4071 		}
4072 		tmp = finish_mkwrite_fault(vmf, folio);
4073 		if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
4074 			folio_unlock(folio);
4075 			folio_put(folio);
4076 			return tmp;
4077 		}
4078 	} else {
4079 		wp_page_reuse(vmf, folio);
4080 		folio_lock(folio);
4081 	}
4082 	ret |= fault_dirty_shared_page(vmf);
4083 	folio_put(folio);
4084 
4085 	return ret;
4086 }
4087 
4088 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4089 static bool __wp_can_reuse_large_anon_folio(struct folio *folio,
4090 		struct vm_area_struct *vma)
4091 {
4092 	bool exclusive = false;
4093 
4094 	/* Let's just free up a large folio if only a single page is mapped. */
4095 	if (folio_large_mapcount(folio) <= 1)
4096 		return false;
4097 
4098 	/*
4099 	 * The assumption for anonymous folios is that each page can only get
4100 	 * mapped once into each MM. The only exception are KSM folios, which
4101 	 * are always small.
4102 	 *
4103 	 * Each taken mapcount must be paired with exactly one taken reference,
4104 	 * whereby the refcount must be incremented before the mapcount when
4105 	 * mapping a page, and the refcount must be decremented after the
4106 	 * mapcount when unmapping a page.
4107 	 *
4108 	 * If all folio references are from mappings, and all mappings are in
4109 	 * the page tables of this MM, then this folio is exclusive to this MM.
4110 	 */
4111 	if (test_bit(FOLIO_MM_IDS_SHARED_BITNUM, &folio->_mm_ids))
4112 		return false;
4113 
4114 	VM_WARN_ON_ONCE(folio_test_ksm(folio));
4115 
4116 	if (unlikely(folio_test_swapcache(folio))) {
4117 		/*
4118 		 * Note: freeing up the swapcache will fail if some PTEs are
4119 		 * still swap entries.
4120 		 */
4121 		if (!folio_trylock(folio))
4122 			return false;
4123 		folio_free_swap(folio);
4124 		folio_unlock(folio);
4125 	}
4126 
4127 	if (folio_large_mapcount(folio) != folio_ref_count(folio))
4128 		return false;
4129 
4130 	/* Stabilize the mapcount vs. refcount and recheck. */
4131 	folio_lock_large_mapcount(folio);
4132 	VM_WARN_ON_ONCE_FOLIO(folio_large_mapcount(folio) > folio_ref_count(folio), folio);
4133 
4134 	if (test_bit(FOLIO_MM_IDS_SHARED_BITNUM, &folio->_mm_ids))
4135 		goto unlock;
4136 	if (folio_large_mapcount(folio) != folio_ref_count(folio))
4137 		goto unlock;
4138 
4139 	VM_WARN_ON_ONCE_FOLIO(folio_large_mapcount(folio) > folio_nr_pages(folio), folio);
4140 	VM_WARN_ON_ONCE_FOLIO(folio_entire_mapcount(folio), folio);
4141 	VM_WARN_ON_ONCE(folio_mm_id(folio, 0) != vma->vm_mm->mm_id &&
4142 			folio_mm_id(folio, 1) != vma->vm_mm->mm_id);
4143 
4144 	/*
4145 	 * Do we need the folio lock? Likely not. If there would have been
4146 	 * references from page migration/swapout, we would have detected
4147 	 * an additional folio reference and never ended up here.
4148 	 */
4149 	exclusive = true;
4150 unlock:
4151 	folio_unlock_large_mapcount(folio);
4152 	return exclusive;
4153 }
4154 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
4155 static bool __wp_can_reuse_large_anon_folio(struct folio *folio,
4156 		struct vm_area_struct *vma)
4157 {
4158 	BUILD_BUG();
4159 }
4160 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4161 
4162 static bool wp_can_reuse_anon_folio(struct folio *folio,
4163 				    struct vm_area_struct *vma)
4164 {
4165 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && folio_test_large(folio))
4166 		return __wp_can_reuse_large_anon_folio(folio, vma);
4167 
4168 	/*
4169 	 * We have to verify under folio lock: these early checks are
4170 	 * just an optimization to avoid locking the folio and freeing
4171 	 * the swapcache if there is little hope that we can reuse.
4172 	 *
4173 	 * KSM doesn't necessarily raise the folio refcount.
4174 	 */
4175 	if (folio_test_ksm(folio) || folio_ref_count(folio) > 3)
4176 		return false;
4177 	if (!folio_test_lru(folio))
4178 		/*
4179 		 * We cannot easily detect+handle references from
4180 		 * remote LRU caches or references to LRU folios.
4181 		 */
4182 		lru_add_drain();
4183 	if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio))
4184 		return false;
4185 	if (!folio_trylock(folio))
4186 		return false;
4187 	if (folio_test_swapcache(folio))
4188 		folio_free_swap(folio);
4189 	if (folio_test_ksm(folio) || folio_ref_count(folio) != 1) {
4190 		folio_unlock(folio);
4191 		return false;
4192 	}
4193 	/*
4194 	 * Ok, we've got the only folio reference from our mapping
4195 	 * and the folio is locked, it's dark out, and we're wearing
4196 	 * sunglasses. Hit it.
4197 	 */
4198 	folio_move_anon_rmap(folio, vma);
4199 	folio_unlock(folio);
4200 	return true;
4201 }
4202 
4203 /*
4204  * This routine handles present pages, when
4205  * * users try to write to a shared page (FAULT_FLAG_WRITE)
4206  * * GUP wants to take a R/O pin on a possibly shared anonymous page
4207  *   (FAULT_FLAG_UNSHARE)
4208  *
4209  * It is done by copying the page to a new address and decrementing the
4210  * shared-page counter for the old page.
4211  *
4212  * Note that this routine assumes that the protection checks have been
4213  * done by the caller (the low-level page fault routine in most cases).
4214  * Thus, with FAULT_FLAG_WRITE, we can safely just mark it writable once we've
4215  * done any necessary COW.
4216  *
4217  * In case of FAULT_FLAG_WRITE, we also mark the page dirty at this point even
4218  * though the page will change only once the write actually happens. This
4219  * avoids a few races, and potentially makes it more efficient.
4220  *
4221  * We enter with non-exclusive mmap_lock (to exclude vma changes,
4222  * but allow concurrent faults), with pte both mapped and locked.
4223  * We return with mmap_lock still held, but pte unmapped and unlocked.
4224  */
4225 static vm_fault_t do_wp_page(struct vm_fault *vmf)
4226 	__releases(vmf->ptl)
4227 {
4228 	const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
4229 	struct vm_area_struct *vma = vmf->vma;
4230 	struct folio *folio = NULL;
4231 	pte_t pte;
4232 
4233 	if (likely(!unshare)) {
4234 		if (userfaultfd_pte_wp(vma, ptep_get(vmf->pte))) {
4235 			if (!userfaultfd_wp_async(vma)) {
4236 				pte_unmap_unlock(vmf->pte, vmf->ptl);
4237 				return handle_userfault(vmf, VM_UFFD_WP);
4238 			}
4239 
4240 			/*
4241 			 * Nothing needed (cache flush, TLB invalidations,
4242 			 * etc.) because we're only removing the uffd-wp bit,
4243 			 * which is completely invisible to the user.
4244 			 */
4245 			pte = pte_clear_uffd_wp(ptep_get(vmf->pte));
4246 
4247 			set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
4248 			/*
4249 			 * Update this to be prepared for following up CoW
4250 			 * handling
4251 			 */
4252 			vmf->orig_pte = pte;
4253 		}
4254 
4255 		/*
4256 		 * Userfaultfd write-protect can defer flushes. Ensure the TLB
4257 		 * is flushed in this case before copying.
4258 		 */
4259 		if (unlikely(userfaultfd_wp(vmf->vma) &&
4260 			     mm_tlb_flush_pending(vmf->vma->vm_mm)))
4261 			flush_tlb_page(vmf->vma, vmf->address);
4262 	}
4263 
4264 	vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
4265 
4266 	if (vmf->page)
4267 		folio = page_folio(vmf->page);
4268 
4269 	/*
4270 	 * Shared mapping: we are guaranteed to have VM_WRITE and
4271 	 * FAULT_FLAG_WRITE set at this point.
4272 	 */
4273 	if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
4274 		/*
4275 		 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
4276 		 * VM_PFNMAP VMA. FS DAX also wants ops->pfn_mkwrite called.
4277 		 *
4278 		 * We should not cow pages in a shared writeable mapping.
4279 		 * Just mark the pages writable and/or call ops->pfn_mkwrite.
4280 		 */
4281 		if (!vmf->page || is_fsdax_page(vmf->page)) {
4282 			vmf->page = NULL;
4283 			return wp_pfn_shared(vmf);
4284 		}
4285 		return wp_page_shared(vmf, folio);
4286 	}
4287 
4288 	/*
4289 	 * Private mapping: create an exclusive anonymous page copy if reuse
4290 	 * is impossible. We might miss VM_WRITE for FOLL_FORCE handling.
4291 	 *
4292 	 * If we encounter a page that is marked exclusive, we must reuse
4293 	 * the page without further checks.
4294 	 */
4295 	if (folio && folio_test_anon(folio) &&
4296 	    (PageAnonExclusive(vmf->page) || wp_can_reuse_anon_folio(folio, vma))) {
4297 		if (!PageAnonExclusive(vmf->page))
4298 			SetPageAnonExclusive(vmf->page);
4299 		if (unlikely(unshare)) {
4300 			pte_unmap_unlock(vmf->pte, vmf->ptl);
4301 			return 0;
4302 		}
4303 		wp_page_reuse(vmf, folio);
4304 		return 0;
4305 	}
4306 	/*
4307 	 * Ok, we need to copy. Oh, well..
4308 	 */
4309 	if (folio)
4310 		folio_get(folio);
4311 
4312 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4313 #ifdef CONFIG_KSM
4314 	if (folio && folio_test_ksm(folio))
4315 		count_vm_event(COW_KSM);
4316 #endif
4317 	return wp_page_copy(vmf);
4318 }
4319 
4320 static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
4321 					    pgoff_t first_index,
4322 					    pgoff_t last_index,
4323 					    struct zap_details *details)
4324 {
4325 	struct vm_area_struct *vma;
4326 	unsigned long start, size;
4327 	struct mmu_gather tlb;
4328 
4329 	vma_interval_tree_foreach(vma, root, first_index, last_index) {
4330 		const pgoff_t start_idx = max(first_index, vma->vm_pgoff);
4331 		const pgoff_t end_idx = min(last_index, vma_last_pgoff(vma)) + 1;
4332 
4333 		start = vma->vm_start + ((start_idx - vma->vm_pgoff) << PAGE_SHIFT);
4334 		size = (end_idx - start_idx) << PAGE_SHIFT;
4335 
4336 		tlb_gather_mmu(&tlb, vma->vm_mm);
4337 		zap_vma_range_batched(&tlb, vma, start, size, details);
4338 		tlb_finish_mmu(&tlb);
4339 	}
4340 }
4341 
4342 /**
4343  * unmap_mapping_folio() - Unmap single folio from processes.
4344  * @folio: The locked folio to be unmapped.
4345  *
4346  * Unmap this folio from any userspace process which still has it mmaped.
4347  * Typically, for efficiency, the range of nearby pages has already been
4348  * unmapped by unmap_mapping_pages() or unmap_mapping_range().  But once
4349  * truncation or invalidation holds the lock on a folio, it may find that
4350  * the page has been remapped again: and then uses unmap_mapping_folio()
4351  * to unmap it finally.
4352  */
4353 void unmap_mapping_folio(struct folio *folio)
4354 {
4355 	struct address_space *mapping = folio->mapping;
4356 	struct zap_details details = { };
4357 	pgoff_t	first_index;
4358 	pgoff_t	last_index;
4359 
4360 	VM_BUG_ON(!folio_test_locked(folio));
4361 
4362 	first_index = folio->index;
4363 	last_index = folio_next_index(folio) - 1;
4364 
4365 	details.skip_cows = true;
4366 	details.single_folio = folio;
4367 	details.zap_flags = ZAP_FLAG_DROP_MARKER;
4368 
4369 	i_mmap_lock_read(mapping);
4370 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
4371 		unmap_mapping_range_tree(&mapping->i_mmap, first_index,
4372 					 last_index, &details);
4373 	i_mmap_unlock_read(mapping);
4374 }
4375 
4376 /**
4377  * unmap_mapping_pages() - Unmap pages from processes.
4378  * @mapping: The address space containing pages to be unmapped.
4379  * @start: Index of first page to be unmapped.
4380  * @nr: Number of pages to be unmapped.  0 to unmap to end of file.
4381  * @even_cows: Whether to unmap even private COWed pages.
4382  *
4383  * Unmap the pages in this address space from any userspace process which
4384  * has them mmaped.  Generally, you want to remove COWed pages as well when
4385  * a file is being truncated, but not when invalidating pages from the page
4386  * cache.
4387  */
4388 void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
4389 		pgoff_t nr, bool even_cows)
4390 {
4391 	struct zap_details details = { };
4392 	pgoff_t	first_index = start;
4393 	pgoff_t	last_index = start + nr - 1;
4394 
4395 	details.skip_cows = !even_cows;
4396 	if (last_index < first_index)
4397 		last_index = ULONG_MAX;
4398 
4399 	i_mmap_lock_read(mapping);
4400 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
4401 		unmap_mapping_range_tree(&mapping->i_mmap, first_index,
4402 					 last_index, &details);
4403 	i_mmap_unlock_read(mapping);
4404 }
4405 EXPORT_SYMBOL_GPL(unmap_mapping_pages);
4406 
4407 /**
4408  * unmap_mapping_range - unmap the portion of all mmaps in the specified
4409  * address_space corresponding to the specified byte range in the underlying
4410  * file.
4411  *
4412  * @mapping: the address space containing mmaps to be unmapped.
4413  * @holebegin: byte in first page to unmap, relative to the start of
4414  * the underlying file.  This will be rounded down to a PAGE_SIZE
4415  * boundary.  Note that this is different from truncate_pagecache(), which
4416  * must keep the partial page.  In contrast, we must get rid of
4417  * partial pages.
4418  * @holelen: size of prospective hole in bytes.  This will be rounded
4419  * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
4420  * end of the file.
4421  * @even_cows: 1 when truncating a file, unmap even private COWed pages;
4422  * but 0 when invalidating pagecache, don't throw away private data.
4423  */
4424 void unmap_mapping_range(struct address_space *mapping,
4425 		loff_t const holebegin, loff_t const holelen, int even_cows)
4426 {
4427 	pgoff_t hba = (pgoff_t)(holebegin) >> PAGE_SHIFT;
4428 	pgoff_t hlen = ((pgoff_t)(holelen) + PAGE_SIZE - 1) >> PAGE_SHIFT;
4429 
4430 	/* Check for overflow. */
4431 	if (sizeof(holelen) > sizeof(hlen)) {
4432 		long long holeend =
4433 			(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
4434 		if (holeend & ~(long long)ULONG_MAX)
4435 			hlen = ULONG_MAX - hba + 1;
4436 	}
4437 
4438 	unmap_mapping_pages(mapping, hba, hlen, even_cows);
4439 }
4440 EXPORT_SYMBOL(unmap_mapping_range);
4441 
4442 /*
4443  * Restore a potential device exclusive pte to a working pte entry
4444  */
4445 static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
4446 {
4447 	struct folio *folio = page_folio(vmf->page);
4448 	struct vm_area_struct *vma = vmf->vma;
4449 	struct mmu_notifier_range range;
4450 	vm_fault_t ret;
4451 
4452 	/*
4453 	 * We need a reference to lock the folio because we don't hold
4454 	 * the PTL so a racing thread can remove the device-exclusive
4455 	 * entry and unmap it. If the folio is free the entry must
4456 	 * have been removed already. If it happens to have already
4457 	 * been re-allocated after being freed all we do is lock and
4458 	 * unlock it.
4459 	 */
4460 	if (!folio_try_get(folio))
4461 		return 0;
4462 
4463 	ret = folio_lock_or_retry(folio, vmf);
4464 	if (ret) {
4465 		folio_put(folio);
4466 		return ret;
4467 	}
4468 	mmu_notifier_range_init_owner(&range, MMU_NOTIFY_CLEAR, 0,
4469 				vma->vm_mm, vmf->address & PAGE_MASK,
4470 				(vmf->address & PAGE_MASK) + PAGE_SIZE, NULL);
4471 	mmu_notifier_invalidate_range_start(&range);
4472 
4473 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
4474 				&vmf->ptl);
4475 	if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
4476 		restore_exclusive_pte(vma, folio, vmf->page, vmf->address,
4477 				      vmf->pte, vmf->orig_pte);
4478 
4479 	if (vmf->pte)
4480 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4481 	folio_unlock(folio);
4482 	folio_put(folio);
4483 
4484 	mmu_notifier_invalidate_range_end(&range);
4485 	return 0;
4486 }
4487 
4488 /*
4489  * Check if we should call folio_free_swap to free the swap cache.
4490  * folio_free_swap only frees the swap cache to release the slot if swap
4491  * count is zero, so we don't need to check the swap count here.
4492  */
4493 static inline bool should_try_to_free_swap(struct swap_info_struct *si,
4494 					   struct folio *folio,
4495 					   struct vm_area_struct *vma,
4496 					   unsigned int extra_refs,
4497 					   unsigned int fault_flags)
4498 {
4499 	if (!folio_test_swapcache(folio))
4500 		return false;
4501 	/*
4502 	 * Always try to free swap cache for SWP_SYNCHRONOUS_IO devices. Swap
4503 	 * cache can help save some IO or memory overhead, but these devices
4504 	 * are fast, and meanwhile, swap cache pinning the slot deferring the
4505 	 * release of metadata or fragmentation is a more critical issue.
4506 	 */
4507 	if (data_race(si->flags & SWP_SYNCHRONOUS_IO))
4508 		return true;
4509 	if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) ||
4510 	    folio_test_mlocked(folio))
4511 		return true;
4512 	/*
4513 	 * If we want to map a page that's in the swapcache writable, we
4514 	 * have to detect via the refcount if we're really the exclusive
4515 	 * user. Try freeing the swapcache to get rid of the swapcache
4516 	 * reference only in case it's likely that we'll be the exclusive user.
4517 	 */
4518 	return (fault_flags & FAULT_FLAG_WRITE) && !folio_test_ksm(folio) &&
4519 		folio_ref_count(folio) == (extra_refs + folio_nr_pages(folio));
4520 }
4521 
4522 static vm_fault_t pte_marker_clear(struct vm_fault *vmf)
4523 {
4524 	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
4525 				       vmf->address, &vmf->ptl);
4526 	if (!vmf->pte)
4527 		return 0;
4528 	/*
4529 	 * Be careful so that we will only recover a special uffd-wp pte into a
4530 	 * none pte.  Otherwise it means the pte could have changed, so retry.
4531 	 *
4532 	 * This should also cover the case where e.g. the pte changed
4533 	 * quickly from a PTE_MARKER_UFFD_WP into PTE_MARKER_POISONED.
4534 	 * So pte_is_marker() check is not enough to safely drop the pte.
4535 	 */
4536 	if (pte_same(vmf->orig_pte, ptep_get(vmf->pte)))
4537 		pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte);
4538 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4539 	return 0;
4540 }
4541 
4542 static vm_fault_t do_pte_missing(struct vm_fault *vmf)
4543 {
4544 	if (vma_is_anonymous(vmf->vma))
4545 		return do_anonymous_page(vmf);
4546 	else
4547 		return do_fault(vmf);
4548 }
4549 
4550 /*
4551  * This is actually a page-missing access, but with uffd-wp special pte
4552  * installed.  It means this pte was wr-protected before being unmapped.
4553  */
4554 static vm_fault_t pte_marker_handle_uffd_wp(struct vm_fault *vmf)
4555 {
4556 	/*
4557 	 * Just in case there're leftover special ptes even after the region
4558 	 * got unregistered - we can simply clear them.
4559 	 */
4560 	if (unlikely(!userfaultfd_wp(vmf->vma)))
4561 		return pte_marker_clear(vmf);
4562 
4563 	return do_pte_missing(vmf);
4564 }
4565 
4566 static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
4567 {
4568 	const softleaf_t entry = softleaf_from_pte(vmf->orig_pte);
4569 	const pte_marker marker = softleaf_to_marker(entry);
4570 
4571 	/*
4572 	 * PTE markers should never be empty.  If anything weird happened,
4573 	 * the best thing to do is to kill the process along with its mm.
4574 	 */
4575 	if (WARN_ON_ONCE(!marker))
4576 		return VM_FAULT_SIGBUS;
4577 
4578 	/* Higher priority than uffd-wp when data corrupted */
4579 	if (marker & PTE_MARKER_POISONED)
4580 		return VM_FAULT_HWPOISON;
4581 
4582 	/* Hitting a guard page is always a fatal condition. */
4583 	if (marker & PTE_MARKER_GUARD)
4584 		return VM_FAULT_SIGSEGV;
4585 
4586 	if (softleaf_is_uffd_wp_marker(entry))
4587 		return pte_marker_handle_uffd_wp(vmf);
4588 
4589 	/* This is an unknown pte marker */
4590 	return VM_FAULT_SIGBUS;
4591 }
4592 
4593 static struct folio *__alloc_swap_folio(struct vm_fault *vmf)
4594 {
4595 	struct vm_area_struct *vma = vmf->vma;
4596 	struct folio *folio;
4597 	softleaf_t entry;
4598 
4599 	folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vmf->address);
4600 	if (!folio)
4601 		return NULL;
4602 
4603 	entry = softleaf_from_pte(vmf->orig_pte);
4604 	if (mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
4605 					   GFP_KERNEL, entry)) {
4606 		folio_put(folio);
4607 		return NULL;
4608 	}
4609 
4610 	return folio;
4611 }
4612 
4613 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4614 /*
4615  * Check if the PTEs within a range are contiguous swap entries
4616  * and have consistent swapcache, zeromap.
4617  */
4618 static bool can_swapin_thp(struct vm_fault *vmf, pte_t *ptep, int nr_pages)
4619 {
4620 	unsigned long addr;
4621 	softleaf_t entry;
4622 	int idx;
4623 	pte_t pte;
4624 
4625 	addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE);
4626 	idx = (vmf->address - addr) / PAGE_SIZE;
4627 	pte = ptep_get(ptep);
4628 
4629 	if (!pte_same(pte, pte_move_swp_offset(vmf->orig_pte, -idx)))
4630 		return false;
4631 	entry = softleaf_from_pte(pte);
4632 	if (swap_pte_batch(ptep, nr_pages, pte) != nr_pages)
4633 		return false;
4634 
4635 	/*
4636 	 * swap_read_folio() can't handle the case a large folio is hybridly
4637 	 * from different backends. And they are likely corner cases. Similar
4638 	 * things might be added once zswap support large folios.
4639 	 */
4640 	if (unlikely(swap_zeromap_batch(entry, nr_pages, NULL) != nr_pages))
4641 		return false;
4642 	if (unlikely(non_swapcache_batch(entry, nr_pages) != nr_pages))
4643 		return false;
4644 
4645 	return true;
4646 }
4647 
4648 static inline unsigned long thp_swap_suitable_orders(pgoff_t swp_offset,
4649 						     unsigned long addr,
4650 						     unsigned long orders)
4651 {
4652 	int order, nr;
4653 
4654 	order = highest_order(orders);
4655 
4656 	/*
4657 	 * To swap in a THP with nr pages, we require that its first swap_offset
4658 	 * is aligned with that number, as it was when the THP was swapped out.
4659 	 * This helps filter out most invalid entries.
4660 	 */
4661 	while (orders) {
4662 		nr = 1 << order;
4663 		if ((addr >> PAGE_SHIFT) % nr == swp_offset % nr)
4664 			break;
4665 		order = next_order(&orders, order);
4666 	}
4667 
4668 	return orders;
4669 }
4670 
4671 static struct folio *alloc_swap_folio(struct vm_fault *vmf)
4672 {
4673 	struct vm_area_struct *vma = vmf->vma;
4674 	unsigned long orders;
4675 	struct folio *folio;
4676 	unsigned long addr;
4677 	softleaf_t entry;
4678 	spinlock_t *ptl;
4679 	pte_t *pte;
4680 	gfp_t gfp;
4681 	int order;
4682 
4683 	/*
4684 	 * If uffd is active for the vma we need per-page fault fidelity to
4685 	 * maintain the uffd semantics.
4686 	 */
4687 	if (unlikely(userfaultfd_armed(vma)))
4688 		goto fallback;
4689 
4690 	/*
4691 	 * A large swapped out folio could be partially or fully in zswap. We
4692 	 * lack handling for such cases, so fallback to swapping in order-0
4693 	 * folio.
4694 	 */
4695 	if (!zswap_never_enabled())
4696 		goto fallback;
4697 
4698 	entry = softleaf_from_pte(vmf->orig_pte);
4699 	/*
4700 	 * Get a list of all the (large) orders below PMD_ORDER that are enabled
4701 	 * and suitable for swapping THP.
4702 	 */
4703 	orders = thp_vma_allowable_orders(vma, vma->vm_flags, TVA_PAGEFAULT,
4704 					  BIT(PMD_ORDER) - 1);
4705 	orders = thp_vma_suitable_orders(vma, vmf->address, orders);
4706 	orders = thp_swap_suitable_orders(swp_offset(entry),
4707 					  vmf->address, orders);
4708 
4709 	if (!orders)
4710 		goto fallback;
4711 
4712 	pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
4713 				  vmf->address & PMD_MASK, &ptl);
4714 	if (unlikely(!pte))
4715 		goto fallback;
4716 
4717 	/*
4718 	 * For do_swap_page, find the highest order where the aligned range is
4719 	 * completely swap entries with contiguous swap offsets.
4720 	 */
4721 	order = highest_order(orders);
4722 	while (orders) {
4723 		addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
4724 		if (can_swapin_thp(vmf, pte + pte_index(addr), 1 << order))
4725 			break;
4726 		order = next_order(&orders, order);
4727 	}
4728 
4729 	pte_unmap_unlock(pte, ptl);
4730 
4731 	/* Try allocating the highest of the remaining orders. */
4732 	gfp = vma_thp_gfp_mask(vma);
4733 	while (orders) {
4734 		addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
4735 		folio = vma_alloc_folio(gfp, order, vma, addr);
4736 		if (folio) {
4737 			if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
4738 							    gfp, entry))
4739 				return folio;
4740 			count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK_CHARGE);
4741 			folio_put(folio);
4742 		}
4743 		count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK);
4744 		order = next_order(&orders, order);
4745 	}
4746 
4747 fallback:
4748 	return __alloc_swap_folio(vmf);
4749 }
4750 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
4751 static struct folio *alloc_swap_folio(struct vm_fault *vmf)
4752 {
4753 	return __alloc_swap_folio(vmf);
4754 }
4755 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4756 
4757 /* Sanity check that a folio is fully exclusive */
4758 static void check_swap_exclusive(struct folio *folio, swp_entry_t entry,
4759 				 unsigned int nr_pages)
4760 {
4761 	/* Called under PT locked and folio locked, the swap count is stable */
4762 	do {
4763 		VM_WARN_ON_ONCE_FOLIO(__swap_count(entry) != 1, folio);
4764 		entry.val++;
4765 	} while (--nr_pages);
4766 }
4767 
4768 /*
4769  * We enter with non-exclusive mmap_lock (to exclude vma changes,
4770  * but allow concurrent faults), and pte mapped but not yet locked.
4771  * We return with pte unmapped and unlocked.
4772  *
4773  * We return with the mmap_lock locked or unlocked in the same cases
4774  * as does filemap_fault().
4775  */
4776 vm_fault_t do_swap_page(struct vm_fault *vmf)
4777 {
4778 	struct vm_area_struct *vma = vmf->vma;
4779 	struct folio *swapcache = NULL, *folio;
4780 	struct page *page;
4781 	struct swap_info_struct *si = NULL;
4782 	rmap_t rmap_flags = RMAP_NONE;
4783 	bool exclusive = false;
4784 	softleaf_t entry;
4785 	pte_t pte;
4786 	vm_fault_t ret = 0;
4787 	int nr_pages;
4788 	unsigned long page_idx;
4789 	unsigned long address;
4790 	pte_t *ptep;
4791 
4792 	if (!pte_unmap_same(vmf))
4793 		goto out;
4794 
4795 	entry = softleaf_from_pte(vmf->orig_pte);
4796 	if (unlikely(!softleaf_is_swap(entry))) {
4797 		if (softleaf_is_migration(entry)) {
4798 			migration_entry_wait(vma->vm_mm, vmf->pmd,
4799 					     vmf->address);
4800 		} else if (softleaf_is_device_exclusive(entry)) {
4801 			vmf->page = softleaf_to_page(entry);
4802 			ret = remove_device_exclusive_entry(vmf);
4803 		} else if (softleaf_is_device_private(entry)) {
4804 			if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
4805 				/*
4806 				 * migrate_to_ram is not yet ready to operate
4807 				 * under VMA lock.
4808 				 */
4809 				vma_end_read(vma);
4810 				ret = VM_FAULT_RETRY;
4811 				goto out;
4812 			}
4813 
4814 			vmf->page = softleaf_to_page(entry);
4815 			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4816 					vmf->address, &vmf->ptl);
4817 			if (unlikely(!vmf->pte ||
4818 				     !pte_same(ptep_get(vmf->pte),
4819 							vmf->orig_pte)))
4820 				goto unlock;
4821 
4822 			/*
4823 			 * Get a page reference while we know the page can't be
4824 			 * freed.
4825 			 */
4826 			if (trylock_page(vmf->page)) {
4827 				struct dev_pagemap *pgmap;
4828 
4829 				get_page(vmf->page);
4830 				pte_unmap_unlock(vmf->pte, vmf->ptl);
4831 				pgmap = page_pgmap(vmf->page);
4832 				ret = pgmap->ops->migrate_to_ram(vmf);
4833 				unlock_page(vmf->page);
4834 				put_page(vmf->page);
4835 			} else {
4836 				pte_unmap(vmf->pte);
4837 				softleaf_entry_wait_on_locked(entry, vmf->ptl);
4838 			}
4839 		} else if (softleaf_is_hwpoison(entry)) {
4840 			ret = VM_FAULT_HWPOISON;
4841 		} else if (softleaf_is_marker(entry)) {
4842 			ret = handle_pte_marker(vmf);
4843 		} else {
4844 			print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
4845 			ret = VM_FAULT_SIGBUS;
4846 		}
4847 		goto out;
4848 	}
4849 
4850 	/* Prevent swapoff from happening to us. */
4851 	si = get_swap_device(entry);
4852 	if (unlikely(!si))
4853 		goto out;
4854 
4855 	folio = swap_cache_get_folio(entry);
4856 	if (folio)
4857 		swap_update_readahead(folio, vma, vmf->address);
4858 	if (!folio) {
4859 		if (data_race(si->flags & SWP_SYNCHRONOUS_IO)) {
4860 			folio = alloc_swap_folio(vmf);
4861 			if (folio) {
4862 				/*
4863 				 * folio is charged, so swapin can only fail due
4864 				 * to raced swapin and return NULL.
4865 				 */
4866 				swapcache = swapin_folio(entry, folio);
4867 				if (swapcache != folio)
4868 					folio_put(folio);
4869 				folio = swapcache;
4870 			}
4871 		} else {
4872 			folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, vmf);
4873 		}
4874 
4875 		if (!folio) {
4876 			/*
4877 			 * Back out if somebody else faulted in this pte
4878 			 * while we released the pte lock.
4879 			 */
4880 			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4881 					vmf->address, &vmf->ptl);
4882 			if (likely(vmf->pte &&
4883 				   pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
4884 				ret = VM_FAULT_OOM;
4885 			goto unlock;
4886 		}
4887 
4888 		/* Had to read the page from swap area: Major fault */
4889 		ret = VM_FAULT_MAJOR;
4890 		count_vm_event(PGMAJFAULT);
4891 		count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
4892 	}
4893 
4894 	swapcache = folio;
4895 	ret |= folio_lock_or_retry(folio, vmf);
4896 	if (ret & VM_FAULT_RETRY)
4897 		goto out_release;
4898 
4899 	page = folio_file_page(folio, swp_offset(entry));
4900 	/*
4901 	 * Make sure folio_free_swap() or swapoff did not release the
4902 	 * swapcache from under us.  The page pin, and pte_same test
4903 	 * below, are not enough to exclude that.  Even if it is still
4904 	 * swapcache, we need to check that the page's swap has not
4905 	 * changed.
4906 	 */
4907 	if (unlikely(!folio_matches_swap_entry(folio, entry)))
4908 		goto out_page;
4909 
4910 	if (unlikely(PageHWPoison(page))) {
4911 		/*
4912 		 * hwpoisoned dirty swapcache pages are kept for killing
4913 		 * owner processes (which may be unknown at hwpoison time)
4914 		 */
4915 		ret = VM_FAULT_HWPOISON;
4916 		goto out_page;
4917 	}
4918 
4919 	/*
4920 	 * KSM sometimes has to copy on read faults, for example, if
4921 	 * folio->index of non-ksm folios would be nonlinear inside the
4922 	 * anon VMA -- the ksm flag is lost on actual swapout.
4923 	 */
4924 	folio = ksm_might_need_to_copy(folio, vma, vmf->address);
4925 	if (unlikely(!folio)) {
4926 		ret = VM_FAULT_OOM;
4927 		folio = swapcache;
4928 		goto out_page;
4929 	} else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
4930 		ret = VM_FAULT_HWPOISON;
4931 		folio = swapcache;
4932 		goto out_page;
4933 	} else if (folio != swapcache)
4934 		page = folio_page(folio, 0);
4935 
4936 	/*
4937 	 * If we want to map a page that's in the swapcache writable, we
4938 	 * have to detect via the refcount if we're really the exclusive
4939 	 * owner. Try removing the extra reference from the local LRU
4940 	 * caches if required.
4941 	 */
4942 	if ((vmf->flags & FAULT_FLAG_WRITE) &&
4943 	    !folio_test_ksm(folio) && !folio_test_lru(folio))
4944 		lru_add_drain();
4945 
4946 	folio_throttle_swaprate(folio, GFP_KERNEL);
4947 
4948 	/*
4949 	 * Back out if somebody else already faulted in this pte.
4950 	 */
4951 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
4952 			&vmf->ptl);
4953 	if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
4954 		goto out_nomap;
4955 
4956 	if (unlikely(!folio_test_uptodate(folio))) {
4957 		ret = VM_FAULT_SIGBUS;
4958 		goto out_nomap;
4959 	}
4960 
4961 	nr_pages = 1;
4962 	page_idx = 0;
4963 	address = vmf->address;
4964 	ptep = vmf->pte;
4965 	if (folio_test_large(folio) && folio_test_swapcache(folio)) {
4966 		int nr = folio_nr_pages(folio);
4967 		unsigned long idx = folio_page_idx(folio, page);
4968 		unsigned long folio_start = address - idx * PAGE_SIZE;
4969 		unsigned long folio_end = folio_start + nr * PAGE_SIZE;
4970 		pte_t *folio_ptep;
4971 		pte_t folio_pte;
4972 
4973 		if (unlikely(folio_start < max(address & PMD_MASK, vma->vm_start)))
4974 			goto check_folio;
4975 		if (unlikely(folio_end > pmd_addr_end(address, vma->vm_end)))
4976 			goto check_folio;
4977 
4978 		folio_ptep = vmf->pte - idx;
4979 		folio_pte = ptep_get(folio_ptep);
4980 		if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
4981 		    swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
4982 			goto check_folio;
4983 
4984 		page_idx = idx;
4985 		address = folio_start;
4986 		ptep = folio_ptep;
4987 		nr_pages = nr;
4988 		entry = folio->swap;
4989 		page = &folio->page;
4990 	}
4991 
4992 check_folio:
4993 	/*
4994 	 * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte
4995 	 * must never point at an anonymous page in the swapcache that is
4996 	 * PG_anon_exclusive. Sanity check that this holds and especially, that
4997 	 * no filesystem set PG_mappedtodisk on a page in the swapcache. Sanity
4998 	 * check after taking the PT lock and making sure that nobody
4999 	 * concurrently faulted in this page and set PG_anon_exclusive.
5000 	 */
5001 	BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio));
5002 	BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page));
5003 
5004 	/*
5005 	 * If a large folio already belongs to anon mapping, then we
5006 	 * can just go on and map it partially.
5007 	 * If not, with the large swapin check above failing, the page table
5008 	 * have changed, so sub pages might got charged to the wrong cgroup,
5009 	 * or even should be shmem. So we have to free it and fallback.
5010 	 * Nothing should have touched it, both anon and shmem checks if a
5011 	 * large folio is fully appliable before use.
5012 	 *
5013 	 * This will be removed once we unify folio allocation in the swap cache
5014 	 * layer, where allocation of a folio stabilizes the swap entries.
5015 	 */
5016 	if (!folio_test_anon(folio) && folio_test_large(folio) &&
5017 	    nr_pages != folio_nr_pages(folio)) {
5018 		if (!WARN_ON_ONCE(folio_test_dirty(folio)))
5019 			swap_cache_del_folio(folio);
5020 		goto out_nomap;
5021 	}
5022 
5023 	/*
5024 	 * Check under PT lock (to protect against concurrent fork() sharing
5025 	 * the swap entry concurrently) for certainly exclusive pages.
5026 	 */
5027 	if (!folio_test_ksm(folio)) {
5028 		/*
5029 		 * The can_swapin_thp check above ensures all PTE have
5030 		 * same exclusiveness. Checking just one PTE is fine.
5031 		 */
5032 		exclusive = pte_swp_exclusive(vmf->orig_pte);
5033 		if (exclusive)
5034 			check_swap_exclusive(folio, entry, nr_pages);
5035 		if (folio != swapcache) {
5036 			/*
5037 			 * We have a fresh page that is not exposed to the
5038 			 * swapcache -> certainly exclusive.
5039 			 */
5040 			exclusive = true;
5041 		} else if (exclusive && folio_test_writeback(folio) &&
5042 			  data_race(si->flags & SWP_STABLE_WRITES)) {
5043 			/*
5044 			 * This is tricky: not all swap backends support
5045 			 * concurrent page modifications while under writeback.
5046 			 *
5047 			 * So if we stumble over such a page in the swapcache
5048 			 * we must not set the page exclusive, otherwise we can
5049 			 * map it writable without further checks and modify it
5050 			 * while still under writeback.
5051 			 *
5052 			 * For these problematic swap backends, simply drop the
5053 			 * exclusive marker: this is perfectly fine as we start
5054 			 * writeback only if we fully unmapped the page and
5055 			 * there are no unexpected references on the page after
5056 			 * unmapping succeeded. After fully unmapped, no
5057 			 * further GUP references (FOLL_GET and FOLL_PIN) can
5058 			 * appear, so dropping the exclusive marker and mapping
5059 			 * it only R/O is fine.
5060 			 */
5061 			exclusive = false;
5062 		}
5063 	}
5064 
5065 	/*
5066 	 * Some architectures may have to restore extra metadata to the page
5067 	 * when reading from swap. This metadata may be indexed by swap entry
5068 	 * so this must be called before folio_put_swap().
5069 	 */
5070 	arch_swap_restore(folio_swap(entry, folio), folio);
5071 
5072 	add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
5073 	add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
5074 	pte = mk_pte(page, vma->vm_page_prot);
5075 	if (pte_swp_soft_dirty(vmf->orig_pte))
5076 		pte = pte_mksoft_dirty(pte);
5077 	if (pte_swp_uffd_wp(vmf->orig_pte))
5078 		pte = pte_mkuffd_wp(pte);
5079 
5080 	/*
5081 	 * Same logic as in do_wp_page(); however, optimize for pages that are
5082 	 * certainly not shared either because we just allocated them without
5083 	 * exposing them to the swapcache or because the swap entry indicates
5084 	 * exclusivity.
5085 	 */
5086 	if (!folio_test_ksm(folio) &&
5087 	    (exclusive || folio_ref_count(folio) == 1)) {
5088 		if ((vma->vm_flags & VM_WRITE) && !userfaultfd_pte_wp(vma, pte) &&
5089 		    !pte_needs_soft_dirty_wp(vma, pte)) {
5090 			pte = pte_mkwrite(pte, vma);
5091 			if (vmf->flags & FAULT_FLAG_WRITE) {
5092 				pte = pte_mkdirty(pte);
5093 				vmf->flags &= ~FAULT_FLAG_WRITE;
5094 			}
5095 		}
5096 		rmap_flags |= RMAP_EXCLUSIVE;
5097 	}
5098 	folio_ref_add(folio, nr_pages - 1);
5099 	flush_icache_pages(vma, page, nr_pages);
5100 	vmf->orig_pte = pte_advance_pfn(pte, page_idx);
5101 
5102 	/* ksm created a completely new copy */
5103 	if (unlikely(folio != swapcache)) {
5104 		folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE);
5105 		folio_add_lru_vma(folio, vma);
5106 		folio_put_swap(swapcache, NULL);
5107 	} else if (!folio_test_anon(folio)) {
5108 		/*
5109 		 * We currently only expect !anon folios that are fully
5110 		 * mappable. See the comment after can_swapin_thp above.
5111 		 */
5112 		VM_WARN_ON_ONCE_FOLIO(folio_nr_pages(folio) != nr_pages, folio);
5113 		VM_WARN_ON_ONCE_FOLIO(folio_mapped(folio), folio);
5114 		folio_add_new_anon_rmap(folio, vma, address, rmap_flags);
5115 		folio_put_swap(folio, NULL);
5116 	} else {
5117 		VM_WARN_ON_ONCE(nr_pages != 1 && nr_pages != folio_nr_pages(folio));
5118 		folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address,
5119 					 rmap_flags);
5120 		folio_put_swap(folio, nr_pages == 1 ? page : NULL);
5121 	}
5122 
5123 	VM_BUG_ON(!folio_test_anon(folio) ||
5124 			(pte_write(pte) && !PageAnonExclusive(page)));
5125 	set_ptes(vma->vm_mm, address, ptep, pte, nr_pages);
5126 	arch_do_swap_page_nr(vma->vm_mm, vma, address,
5127 			pte, pte, nr_pages);
5128 
5129 	/*
5130 	 * Remove the swap entry and conditionally try to free up the swapcache.
5131 	 * Do it after mapping, so raced page faults will likely see the folio
5132 	 * in swap cache and wait on the folio lock.
5133 	 */
5134 	if (should_try_to_free_swap(si, folio, vma, nr_pages, vmf->flags))
5135 		folio_free_swap(folio);
5136 
5137 	folio_unlock(folio);
5138 	if (unlikely(folio != swapcache)) {
5139 		/*
5140 		 * Hold the lock to avoid the swap entry to be reused
5141 		 * until we take the PT lock for the pte_same() check
5142 		 * (to avoid false positives from pte_same). For
5143 		 * further safety release the lock after the folio_put_swap
5144 		 * so that the swap count won't change under a
5145 		 * parallel locked swapcache.
5146 		 */
5147 		folio_unlock(swapcache);
5148 		folio_put(swapcache);
5149 	}
5150 
5151 	if (vmf->flags & FAULT_FLAG_WRITE) {
5152 		ret |= do_wp_page(vmf);
5153 		if (ret & VM_FAULT_ERROR)
5154 			ret &= VM_FAULT_ERROR;
5155 		goto out;
5156 	}
5157 
5158 	/* No need to invalidate - it was non-present before */
5159 	update_mmu_cache_range(vmf, vma, address, ptep, nr_pages);
5160 unlock:
5161 	if (vmf->pte)
5162 		pte_unmap_unlock(vmf->pte, vmf->ptl);
5163 out:
5164 	if (si)
5165 		put_swap_device(si);
5166 	return ret;
5167 out_nomap:
5168 	if (vmf->pte)
5169 		pte_unmap_unlock(vmf->pte, vmf->ptl);
5170 out_page:
5171 	if (folio_test_swapcache(folio))
5172 		folio_free_swap(folio);
5173 	folio_unlock(folio);
5174 out_release:
5175 	folio_put(folio);
5176 	if (folio != swapcache) {
5177 		folio_unlock(swapcache);
5178 		folio_put(swapcache);
5179 	}
5180 	if (si)
5181 		put_swap_device(si);
5182 	return ret;
5183 }
5184 
5185 static bool pte_range_none(pte_t *pte, int nr_pages)
5186 {
5187 	int i;
5188 
5189 	for (i = 0; i < nr_pages; i++) {
5190 		if (!pte_none(ptep_get_lockless(pte + i)))
5191 			return false;
5192 	}
5193 
5194 	return true;
5195 }
5196 
5197 static struct folio *alloc_anon_folio(struct vm_fault *vmf)
5198 {
5199 	struct vm_area_struct *vma = vmf->vma;
5200 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5201 	unsigned long orders;
5202 	struct folio *folio;
5203 	unsigned long addr;
5204 	pte_t *pte;
5205 	gfp_t gfp;
5206 	int order;
5207 
5208 	/*
5209 	 * If uffd is active for the vma we need per-page fault fidelity to
5210 	 * maintain the uffd semantics.
5211 	 */
5212 	if (unlikely(userfaultfd_armed(vma)))
5213 		goto fallback;
5214 
5215 	/*
5216 	 * Get a list of all the (large) orders below PMD_ORDER that are enabled
5217 	 * for this vma. Then filter out the orders that can't be allocated over
5218 	 * the faulting address and still be fully contained in the vma.
5219 	 */
5220 	orders = thp_vma_allowable_orders(vma, vma->vm_flags, TVA_PAGEFAULT,
5221 					  BIT(PMD_ORDER) - 1);
5222 	orders = thp_vma_suitable_orders(vma, vmf->address, orders);
5223 
5224 	if (!orders)
5225 		goto fallback;
5226 
5227 	pte = pte_offset_map(vmf->pmd, vmf->address & PMD_MASK);
5228 	if (!pte)
5229 		return ERR_PTR(-EAGAIN);
5230 
5231 	/*
5232 	 * Find the highest order where the aligned range is completely
5233 	 * pte_none(). Note that all remaining orders will be completely
5234 	 * pte_none().
5235 	 */
5236 	order = highest_order(orders);
5237 	while (orders) {
5238 		addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
5239 		if (pte_range_none(pte + pte_index(addr), 1 << order))
5240 			break;
5241 		order = next_order(&orders, order);
5242 	}
5243 
5244 	pte_unmap(pte);
5245 
5246 	if (!orders)
5247 		goto fallback;
5248 
5249 	/* Try allocating the highest of the remaining orders. */
5250 	gfp = vma_thp_gfp_mask(vma);
5251 	while (orders) {
5252 		addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
5253 		folio = vma_alloc_folio(gfp, order, vma, addr);
5254 		if (folio) {
5255 			if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
5256 				count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
5257 				folio_put(folio);
5258 				goto next;
5259 			}
5260 			folio_throttle_swaprate(folio, gfp);
5261 			/*
5262 			 * When a folio is not zeroed during allocation
5263 			 * (__GFP_ZERO not used) or user folios require special
5264 			 * handling, folio_zero_user() is used to make sure
5265 			 * that the page corresponding to the faulting address
5266 			 * will be hot in the cache after zeroing.
5267 			 */
5268 			if (user_alloc_needs_zeroing())
5269 				folio_zero_user(folio, vmf->address);
5270 			return folio;
5271 		}
5272 next:
5273 		count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK);
5274 		order = next_order(&orders, order);
5275 	}
5276 
5277 fallback:
5278 #endif
5279 	return folio_prealloc(vma->vm_mm, vma, vmf->address, true);
5280 }
5281 
5282 void map_anon_folio_pte_nopf(struct folio *folio, pte_t *pte,
5283 		struct vm_area_struct *vma, unsigned long addr,
5284 		bool uffd_wp)
5285 {
5286 	const unsigned int nr_pages = folio_nr_pages(folio);
5287 	pte_t entry = folio_mk_pte(folio, vma->vm_page_prot);
5288 
5289 	entry = pte_sw_mkyoung(entry);
5290 
5291 	if (vma->vm_flags & VM_WRITE)
5292 		entry = pte_mkwrite(pte_mkdirty(entry), vma);
5293 	if (uffd_wp)
5294 		entry = pte_mkuffd_wp(entry);
5295 
5296 	folio_ref_add(folio, nr_pages - 1);
5297 	folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
5298 	folio_add_lru_vma(folio, vma);
5299 	set_ptes(vma->vm_mm, addr, pte, entry, nr_pages);
5300 	update_mmu_cache_range(NULL, vma, addr, pte, nr_pages);
5301 }
5302 
5303 static void map_anon_folio_pte_pf(struct folio *folio, pte_t *pte,
5304 		struct vm_area_struct *vma, unsigned long addr, bool uffd_wp)
5305 {
5306 	const unsigned int order = folio_order(folio);
5307 
5308 	map_anon_folio_pte_nopf(folio, pte, vma, addr, uffd_wp);
5309 	add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1L << order);
5310 	count_mthp_stat(order, MTHP_STAT_ANON_FAULT_ALLOC);
5311 }
5312 
5313 /*
5314  * We enter with non-exclusive mmap_lock (to exclude vma changes,
5315  * but allow concurrent faults), and pte mapped but not yet locked.
5316  * We return with mmap_lock still held, but pte unmapped and unlocked.
5317  */
5318 static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
5319 {
5320 	struct vm_area_struct *vma = vmf->vma;
5321 	unsigned long addr = vmf->address;
5322 	struct folio *folio;
5323 	vm_fault_t ret = 0;
5324 	int nr_pages;
5325 	pte_t entry;
5326 
5327 	/* File mapping without ->vm_ops ? */
5328 	if (vma->vm_flags & VM_SHARED)
5329 		return VM_FAULT_SIGBUS;
5330 
5331 	/*
5332 	 * Use pte_alloc() instead of pte_alloc_map(), so that OOM can
5333 	 * be distinguished from a transient failure of pte_offset_map().
5334 	 */
5335 	if (pte_alloc(vma->vm_mm, vmf->pmd))
5336 		return VM_FAULT_OOM;
5337 
5338 	/* Use the zero-page for reads */
5339 	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
5340 			!mm_forbids_zeropage(vma->vm_mm)) {
5341 		entry = pte_mkspecial(pfn_pte(zero_pfn(vmf->address),
5342 						vma->vm_page_prot));
5343 		vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
5344 				vmf->address, &vmf->ptl);
5345 		if (!vmf->pte)
5346 			goto unlock;
5347 		if (vmf_pte_changed(vmf)) {
5348 			update_mmu_tlb(vma, vmf->address, vmf->pte);
5349 			goto unlock;
5350 		}
5351 		ret = check_stable_address_space(vma->vm_mm);
5352 		if (ret)
5353 			goto unlock;
5354 		/* Deliver the page fault to userland, check inside PT lock */
5355 		if (userfaultfd_missing(vma)) {
5356 			pte_unmap_unlock(vmf->pte, vmf->ptl);
5357 			return handle_userfault(vmf, VM_UFFD_MISSING);
5358 		}
5359 		if (vmf_orig_pte_uffd_wp(vmf))
5360 			entry = pte_mkuffd_wp(entry);
5361 		set_pte_at(vma->vm_mm, addr, vmf->pte, entry);
5362 
5363 		/* No need to invalidate - it was non-present before */
5364 		update_mmu_cache(vma, addr, vmf->pte);
5365 		goto unlock;
5366 	}
5367 
5368 	/* Allocate our own private page. */
5369 	ret = vmf_anon_prepare(vmf);
5370 	if (ret)
5371 		return ret;
5372 	/* Returns NULL on OOM or ERR_PTR(-EAGAIN) if we must retry the fault */
5373 	folio = alloc_anon_folio(vmf);
5374 	if (IS_ERR(folio))
5375 		return 0;
5376 	if (!folio)
5377 		goto oom;
5378 
5379 	nr_pages = folio_nr_pages(folio);
5380 	addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE);
5381 
5382 	/*
5383 	 * The memory barrier inside __folio_mark_uptodate makes sure that
5384 	 * preceding stores to the page contents become visible before
5385 	 * the set_pte_at() write.
5386 	 */
5387 	__folio_mark_uptodate(folio);
5388 
5389 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
5390 	if (!vmf->pte)
5391 		goto release;
5392 	if (nr_pages == 1 && vmf_pte_changed(vmf)) {
5393 		update_mmu_tlb(vma, addr, vmf->pte);
5394 		goto release;
5395 	} else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) {
5396 		update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages);
5397 		goto release;
5398 	}
5399 
5400 	ret = check_stable_address_space(vma->vm_mm);
5401 	if (ret)
5402 		goto release;
5403 
5404 	/* Deliver the page fault to userland, check inside PT lock */
5405 	if (userfaultfd_missing(vma)) {
5406 		pte_unmap_unlock(vmf->pte, vmf->ptl);
5407 		folio_put(folio);
5408 		return handle_userfault(vmf, VM_UFFD_MISSING);
5409 	}
5410 	map_anon_folio_pte_pf(folio, vmf->pte, vma, addr,
5411 			      vmf_orig_pte_uffd_wp(vmf));
5412 unlock:
5413 	if (vmf->pte)
5414 		pte_unmap_unlock(vmf->pte, vmf->ptl);
5415 	return ret;
5416 release:
5417 	folio_put(folio);
5418 	goto unlock;
5419 oom:
5420 	return VM_FAULT_OOM;
5421 }
5422 
5423 /*
5424  * The mmap_lock must have been held on entry, and may have been
5425  * released depending on flags and vma->vm_ops->fault() return value.
5426  * See filemap_fault() and __lock_page_retry().
5427  */
5428 static vm_fault_t __do_fault(struct vm_fault *vmf)
5429 {
5430 	struct vm_area_struct *vma = vmf->vma;
5431 	struct folio *folio;
5432 	vm_fault_t ret;
5433 
5434 	/*
5435 	 * Preallocate pte before we take page_lock because this might lead to
5436 	 * deadlocks for memcg reclaim which waits for pages under writeback:
5437 	 *				lock_page(A)
5438 	 *				SetPageWriteback(A)
5439 	 *				unlock_page(A)
5440 	 * lock_page(B)
5441 	 *				lock_page(B)
5442 	 * pte_alloc_one
5443 	 *   shrink_folio_list
5444 	 *     wait_on_page_writeback(A)
5445 	 *				SetPageWriteback(B)
5446 	 *				unlock_page(B)
5447 	 *				# flush A, B to clear the writeback
5448 	 */
5449 	if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
5450 		vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
5451 		if (!vmf->prealloc_pte)
5452 			return VM_FAULT_OOM;
5453 	}
5454 
5455 	ret = vma->vm_ops->fault(vmf);
5456 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
5457 			    VM_FAULT_DONE_COW)))
5458 		return ret;
5459 
5460 	folio = page_folio(vmf->page);
5461 	if (unlikely(PageHWPoison(vmf->page))) {
5462 		vm_fault_t poisonret = VM_FAULT_HWPOISON;
5463 		if (ret & VM_FAULT_LOCKED) {
5464 			if (page_mapped(vmf->page))
5465 				unmap_mapping_folio(folio);
5466 			/* Retry if a clean folio was removed from the cache. */
5467 			if (mapping_evict_folio(folio->mapping, folio))
5468 				poisonret = VM_FAULT_NOPAGE;
5469 			folio_unlock(folio);
5470 		}
5471 		folio_put(folio);
5472 		vmf->page = NULL;
5473 		return poisonret;
5474 	}
5475 
5476 	if (unlikely(!(ret & VM_FAULT_LOCKED)))
5477 		folio_lock(folio);
5478 	else
5479 		VM_BUG_ON_PAGE(!folio_test_locked(folio), vmf->page);
5480 
5481 	return ret;
5482 }
5483 
5484 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5485 static void deposit_prealloc_pte(struct vm_fault *vmf)
5486 {
5487 	struct vm_area_struct *vma = vmf->vma;
5488 
5489 	pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
5490 	/*
5491 	 * We are going to consume the prealloc table,
5492 	 * count that as nr_ptes.
5493 	 */
5494 	mm_inc_nr_ptes(vma->vm_mm);
5495 	vmf->prealloc_pte = NULL;
5496 }
5497 
5498 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *page)
5499 {
5500 	struct vm_area_struct *vma = vmf->vma;
5501 	bool write = vmf->flags & FAULT_FLAG_WRITE;
5502 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
5503 	pmd_t entry;
5504 	vm_fault_t ret = VM_FAULT_FALLBACK;
5505 
5506 	/*
5507 	 * It is too late to allocate a small folio, we already have a large
5508 	 * folio in the pagecache: especially s390 KVM cannot tolerate any
5509 	 * PMD mappings, but PTE-mapped THP are fine. So let's simply refuse any
5510 	 * PMD mappings if THPs are disabled. As we already have a THP,
5511 	 * behave as if we are forcing a collapse.
5512 	 */
5513 	if (thp_disabled_by_hw() || vma_thp_disabled(vma, vma->vm_flags,
5514 						     /* forced_collapse=*/ true))
5515 		return ret;
5516 
5517 	if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
5518 		return ret;
5519 
5520 	if (!is_pmd_order(folio_order(folio)))
5521 		return ret;
5522 	page = &folio->page;
5523 
5524 	/*
5525 	 * Just backoff if any subpage of a THP is corrupted otherwise
5526 	 * the corrupted page may mapped by PMD silently to escape the
5527 	 * check.  This kind of THP just can be PTE mapped.  Access to
5528 	 * the corrupted subpage should trigger SIGBUS as expected.
5529 	 */
5530 	if (unlikely(folio_test_has_hwpoisoned(folio)))
5531 		return ret;
5532 
5533 	/*
5534 	 * Archs like ppc64 need additional space to store information
5535 	 * related to pte entry. Use the preallocated table for that.
5536 	 */
5537 	if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
5538 		vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
5539 		if (!vmf->prealloc_pte)
5540 			return VM_FAULT_OOM;
5541 	}
5542 
5543 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
5544 	if (unlikely(!pmd_none(*vmf->pmd)))
5545 		goto out;
5546 
5547 	flush_icache_pages(vma, page, HPAGE_PMD_NR);
5548 
5549 	entry = folio_mk_pmd(folio, vma->vm_page_prot);
5550 	if (write)
5551 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
5552 
5553 	add_mm_counter(vma->vm_mm, mm_counter_file(folio), HPAGE_PMD_NR);
5554 	folio_add_file_rmap_pmd(folio, page, vma);
5555 
5556 	/*
5557 	 * deposit and withdraw with pmd lock held
5558 	 */
5559 	if (arch_needs_pgtable_deposit())
5560 		deposit_prealloc_pte(vmf);
5561 
5562 	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
5563 
5564 	update_mmu_cache_pmd(vma, haddr, vmf->pmd);
5565 
5566 	/* fault is handled */
5567 	ret = 0;
5568 	count_vm_event(THP_FILE_MAPPED);
5569 out:
5570 	spin_unlock(vmf->ptl);
5571 	return ret;
5572 }
5573 #else
5574 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *page)
5575 {
5576 	return VM_FAULT_FALLBACK;
5577 }
5578 #endif
5579 
5580 /**
5581  * set_pte_range - Set a range of PTEs to point to pages in a folio.
5582  * @vmf: Fault description.
5583  * @folio: The folio that contains @page.
5584  * @page: The first page to create a PTE for.
5585  * @nr: The number of PTEs to create.
5586  * @addr: The first address to create a PTE for.
5587  */
5588 void set_pte_range(struct vm_fault *vmf, struct folio *folio,
5589 		struct page *page, unsigned int nr, unsigned long addr)
5590 {
5591 	struct vm_area_struct *vma = vmf->vma;
5592 	bool write = vmf->flags & FAULT_FLAG_WRITE;
5593 	bool prefault = !in_range(vmf->address, addr, nr * PAGE_SIZE);
5594 	pte_t entry;
5595 
5596 	flush_icache_pages(vma, page, nr);
5597 	entry = mk_pte(page, vma->vm_page_prot);
5598 
5599 	if (prefault && arch_wants_old_prefaulted_pte())
5600 		entry = pte_mkold(entry);
5601 	else
5602 		entry = pte_sw_mkyoung(entry);
5603 
5604 	if (write)
5605 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
5606 	else if (pte_write(entry) && folio_test_dirty(folio))
5607 		entry = pte_mkdirty(entry);
5608 	if (unlikely(vmf_orig_pte_uffd_wp(vmf)))
5609 		entry = pte_mkuffd_wp(entry);
5610 	/* copy-on-write page */
5611 	if (write && !(vma->vm_flags & VM_SHARED)) {
5612 		VM_BUG_ON_FOLIO(nr != 1, folio);
5613 		folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
5614 		folio_add_lru_vma(folio, vma);
5615 	} else {
5616 		folio_add_file_rmap_ptes(folio, page, nr, vma);
5617 	}
5618 	set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr);
5619 
5620 	/* no need to invalidate: a not-present page won't be cached */
5621 	update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr);
5622 }
5623 
5624 static bool vmf_pte_changed(struct vm_fault *vmf)
5625 {
5626 	if (vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)
5627 		return !pte_same(ptep_get(vmf->pte), vmf->orig_pte);
5628 
5629 	return !pte_none(ptep_get(vmf->pte));
5630 }
5631 
5632 /**
5633  * finish_fault - finish page fault once we have prepared the page to fault
5634  *
5635  * @vmf: structure describing the fault
5636  *
5637  * This function handles all that is needed to finish a page fault once the
5638  * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
5639  * given page, adds reverse page mapping, handles memcg charges and LRU
5640  * addition.
5641  *
5642  * The function expects the page to be locked and on success it consumes a
5643  * reference of a page being mapped (for the PTE which maps it).
5644  *
5645  * Return: %0 on success, %VM_FAULT_ code in case of error.
5646  */
5647 vm_fault_t finish_fault(struct vm_fault *vmf)
5648 {
5649 	struct vm_area_struct *vma = vmf->vma;
5650 	struct page *page;
5651 	struct folio *folio;
5652 	vm_fault_t ret;
5653 	bool is_cow = (vmf->flags & FAULT_FLAG_WRITE) &&
5654 		      !(vma->vm_flags & VM_SHARED);
5655 	int type, nr_pages;
5656 	unsigned long addr;
5657 	bool needs_fallback = false;
5658 
5659 fallback:
5660 	addr = vmf->address;
5661 
5662 	/* Did we COW the page? */
5663 	if (is_cow)
5664 		page = vmf->cow_page;
5665 	else
5666 		page = vmf->page;
5667 
5668 	folio = page_folio(page);
5669 	/*
5670 	 * check even for read faults because we might have lost our CoWed
5671 	 * page
5672 	 */
5673 	if (!(vma->vm_flags & VM_SHARED)) {
5674 		ret = check_stable_address_space(vma->vm_mm);
5675 		if (ret)
5676 			return ret;
5677 	}
5678 
5679 	if (!needs_fallback && vma->vm_file) {
5680 		struct address_space *mapping = vma->vm_file->f_mapping;
5681 		pgoff_t file_end;
5682 
5683 		file_end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
5684 
5685 		/*
5686 		 * Do not allow to map with PTEs beyond i_size and with PMD
5687 		 * across i_size to preserve SIGBUS semantics.
5688 		 *
5689 		 * Make an exception for shmem/tmpfs that for long time
5690 		 * intentionally mapped with PMDs across i_size.
5691 		 */
5692 		needs_fallback = !shmem_mapping(mapping) &&
5693 			file_end < folio_next_index(folio);
5694 	}
5695 
5696 	if (pmd_none(*vmf->pmd)) {
5697 		if (!needs_fallback && folio_test_pmd_mappable(folio)) {
5698 			ret = do_set_pmd(vmf, folio, page);
5699 			if (ret != VM_FAULT_FALLBACK)
5700 				return ret;
5701 		}
5702 
5703 		if (vmf->prealloc_pte)
5704 			pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte);
5705 		else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd)))
5706 			return VM_FAULT_OOM;
5707 	}
5708 
5709 	nr_pages = folio_nr_pages(folio);
5710 
5711 	/* Using per-page fault to maintain the uffd semantics */
5712 	if (unlikely(userfaultfd_armed(vma)) || unlikely(needs_fallback)) {
5713 		nr_pages = 1;
5714 	} else if (nr_pages > 1) {
5715 		pgoff_t idx = folio_page_idx(folio, page);
5716 		/* The page offset of vmf->address within the VMA. */
5717 		pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff;
5718 		/* The index of the entry in the pagetable for fault page. */
5719 		pgoff_t pte_off = pte_index(vmf->address);
5720 
5721 		/*
5722 		 * Fallback to per-page fault in case the folio size in page
5723 		 * cache beyond the VMA limits and PMD pagetable limits.
5724 		 */
5725 		if (unlikely(vma_off < idx ||
5726 			    vma_off + (nr_pages - idx) > vma_pages(vma) ||
5727 			    pte_off < idx ||
5728 			    pte_off + (nr_pages - idx)  > PTRS_PER_PTE)) {
5729 			nr_pages = 1;
5730 		} else {
5731 			/* Now we can set mappings for the whole large folio. */
5732 			addr = vmf->address - idx * PAGE_SIZE;
5733 			page = &folio->page;
5734 		}
5735 	}
5736 
5737 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
5738 				       addr, &vmf->ptl);
5739 	if (!vmf->pte)
5740 		return VM_FAULT_NOPAGE;
5741 
5742 	/* Re-check under ptl */
5743 	if (nr_pages == 1 && unlikely(vmf_pte_changed(vmf))) {
5744 		update_mmu_tlb(vma, addr, vmf->pte);
5745 		ret = VM_FAULT_NOPAGE;
5746 		goto unlock;
5747 	} else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) {
5748 		needs_fallback = true;
5749 		pte_unmap_unlock(vmf->pte, vmf->ptl);
5750 		goto fallback;
5751 	}
5752 
5753 	folio_ref_add(folio, nr_pages - 1);
5754 	set_pte_range(vmf, folio, page, nr_pages, addr);
5755 	type = is_cow ? MM_ANONPAGES : mm_counter_file(folio);
5756 	add_mm_counter(vma->vm_mm, type, nr_pages);
5757 	ret = 0;
5758 
5759 unlock:
5760 	pte_unmap_unlock(vmf->pte, vmf->ptl);
5761 	return ret;
5762 }
5763 
5764 static unsigned long fault_around_pages __read_mostly =
5765 	65536 >> PAGE_SHIFT;
5766 
5767 #ifdef CONFIG_DEBUG_FS
5768 static int fault_around_bytes_get(void *data, u64 *val)
5769 {
5770 	*val = fault_around_pages << PAGE_SHIFT;
5771 	return 0;
5772 }
5773 
5774 /*
5775  * fault_around_bytes must be rounded down to the nearest page order as it's
5776  * what do_fault_around() expects to see.
5777  */
5778 static int fault_around_bytes_set(void *data, u64 val)
5779 {
5780 	if (val / PAGE_SIZE > PTRS_PER_PTE)
5781 		return -EINVAL;
5782 
5783 	/*
5784 	 * The minimum value is 1 page, however this results in no fault-around
5785 	 * at all. See should_fault_around().
5786 	 */
5787 	val = max(val, PAGE_SIZE);
5788 	fault_around_pages = rounddown_pow_of_two(val) >> PAGE_SHIFT;
5789 
5790 	return 0;
5791 }
5792 DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
5793 		fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
5794 
5795 static int __init fault_around_debugfs(void)
5796 {
5797 	debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
5798 				   &fault_around_bytes_fops);
5799 	return 0;
5800 }
5801 late_initcall(fault_around_debugfs);
5802 #endif
5803 
5804 /*
5805  * do_fault_around() tries to map few pages around the fault address. The hope
5806  * is that the pages will be needed soon and this will lower the number of
5807  * faults to handle.
5808  *
5809  * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
5810  * not ready to be mapped: not up-to-date, locked, etc.
5811  *
5812  * This function doesn't cross VMA or page table boundaries, in order to call
5813  * map_pages() and acquire a PTE lock only once.
5814  *
5815  * fault_around_pages defines how many pages we'll try to map.
5816  * do_fault_around() expects it to be set to a power of two less than or equal
5817  * to PTRS_PER_PTE.
5818  *
5819  * The virtual address of the area that we map is naturally aligned to
5820  * fault_around_pages * PAGE_SIZE rounded down to the machine page size
5821  * (and therefore to page order).  This way it's easier to guarantee
5822  * that we don't cross page table boundaries.
5823  */
5824 static vm_fault_t do_fault_around(struct vm_fault *vmf)
5825 {
5826 	pgoff_t nr_pages = READ_ONCE(fault_around_pages);
5827 	pgoff_t pte_off = pte_index(vmf->address);
5828 	/* The page offset of vmf->address within the VMA. */
5829 	pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff;
5830 	pgoff_t from_pte, to_pte;
5831 	vm_fault_t ret;
5832 
5833 	/* The PTE offset of the start address, clamped to the VMA. */
5834 	from_pte = max(ALIGN_DOWN(pte_off, nr_pages),
5835 		       pte_off - min(pte_off, vma_off));
5836 
5837 	/* The PTE offset of the end address, clamped to the VMA and PTE. */
5838 	to_pte = min3(from_pte + nr_pages, (pgoff_t)PTRS_PER_PTE,
5839 		      pte_off + vma_pages(vmf->vma) - vma_off) - 1;
5840 
5841 	if (pmd_none(*vmf->pmd)) {
5842 		vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
5843 		if (!vmf->prealloc_pte)
5844 			return VM_FAULT_OOM;
5845 	}
5846 
5847 	rcu_read_lock();
5848 	ret = vmf->vma->vm_ops->map_pages(vmf,
5849 			vmf->pgoff + from_pte - pte_off,
5850 			vmf->pgoff + to_pte - pte_off);
5851 	rcu_read_unlock();
5852 
5853 	return ret;
5854 }
5855 
5856 /* Return true if we should do read fault-around, false otherwise */
5857 static inline bool should_fault_around(struct vm_fault *vmf)
5858 {
5859 	/* No ->map_pages?  No way to fault around... */
5860 	if (!vmf->vma->vm_ops->map_pages)
5861 		return false;
5862 
5863 	if (uffd_disable_fault_around(vmf->vma))
5864 		return false;
5865 
5866 	/* A single page implies no faulting 'around' at all. */
5867 	return fault_around_pages > 1;
5868 }
5869 
5870 static vm_fault_t do_read_fault(struct vm_fault *vmf)
5871 {
5872 	vm_fault_t ret = 0;
5873 	struct folio *folio;
5874 
5875 	/*
5876 	 * Let's call ->map_pages() first and use ->fault() as fallback
5877 	 * if page by the offset is not ready to be mapped (cold cache or
5878 	 * something).
5879 	 */
5880 	if (should_fault_around(vmf)) {
5881 		ret = do_fault_around(vmf);
5882 		if (ret)
5883 			return ret;
5884 	}
5885 
5886 	ret = vmf_can_call_fault(vmf);
5887 	if (ret)
5888 		return ret;
5889 
5890 	ret = __do_fault(vmf);
5891 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5892 		return ret;
5893 
5894 	ret |= finish_fault(vmf);
5895 	folio = page_folio(vmf->page);
5896 	folio_unlock(folio);
5897 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5898 		folio_put(folio);
5899 	return ret;
5900 }
5901 
5902 static vm_fault_t do_cow_fault(struct vm_fault *vmf)
5903 {
5904 	struct vm_area_struct *vma = vmf->vma;
5905 	struct folio *folio;
5906 	vm_fault_t ret;
5907 
5908 	ret = vmf_can_call_fault(vmf);
5909 	if (!ret)
5910 		ret = vmf_anon_prepare(vmf);
5911 	if (ret)
5912 		return ret;
5913 
5914 	folio = folio_prealloc(vma->vm_mm, vma, vmf->address, false);
5915 	if (!folio)
5916 		return VM_FAULT_OOM;
5917 
5918 	vmf->cow_page = &folio->page;
5919 
5920 	ret = __do_fault(vmf);
5921 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5922 		goto uncharge_out;
5923 	if (ret & VM_FAULT_DONE_COW)
5924 		return ret;
5925 
5926 	if (copy_mc_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma)) {
5927 		ret = VM_FAULT_HWPOISON;
5928 		goto unlock;
5929 	}
5930 	__folio_mark_uptodate(folio);
5931 
5932 	ret |= finish_fault(vmf);
5933 unlock:
5934 	unlock_page(vmf->page);
5935 	put_page(vmf->page);
5936 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5937 		goto uncharge_out;
5938 	return ret;
5939 uncharge_out:
5940 	folio_put(folio);
5941 	return ret;
5942 }
5943 
5944 static vm_fault_t do_shared_fault(struct vm_fault *vmf)
5945 {
5946 	struct vm_area_struct *vma = vmf->vma;
5947 	vm_fault_t ret, tmp;
5948 	struct folio *folio;
5949 
5950 	ret = vmf_can_call_fault(vmf);
5951 	if (ret)
5952 		return ret;
5953 
5954 	ret = __do_fault(vmf);
5955 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5956 		return ret;
5957 
5958 	folio = page_folio(vmf->page);
5959 
5960 	/*
5961 	 * Check if the backing address space wants to know that the page is
5962 	 * about to become writable
5963 	 */
5964 	if (vma->vm_ops->page_mkwrite) {
5965 		folio_unlock(folio);
5966 		tmp = do_page_mkwrite(vmf, folio);
5967 		if (unlikely(!tmp ||
5968 				(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
5969 			folio_put(folio);
5970 			return tmp;
5971 		}
5972 	}
5973 
5974 	ret |= finish_fault(vmf);
5975 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
5976 					VM_FAULT_RETRY))) {
5977 		folio_unlock(folio);
5978 		folio_put(folio);
5979 		return ret;
5980 	}
5981 
5982 	ret |= fault_dirty_shared_page(vmf);
5983 	return ret;
5984 }
5985 
5986 /*
5987  * We enter with non-exclusive mmap_lock (to exclude vma changes,
5988  * but allow concurrent faults).
5989  * The mmap_lock may have been released depending on flags and our
5990  * return value.  See filemap_fault() and __folio_lock_or_retry().
5991  * If mmap_lock is released, vma may become invalid (for example
5992  * by other thread calling munmap()).
5993  */
5994 static vm_fault_t do_fault(struct vm_fault *vmf)
5995 {
5996 	struct vm_area_struct *vma = vmf->vma;
5997 	struct mm_struct *vm_mm = vma->vm_mm;
5998 	vm_fault_t ret;
5999 
6000 	/*
6001 	 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
6002 	 */
6003 	if (!vma->vm_ops->fault) {
6004 		vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
6005 					       vmf->address, &vmf->ptl);
6006 		if (unlikely(!vmf->pte))
6007 			ret = VM_FAULT_SIGBUS;
6008 		else {
6009 			/*
6010 			 * Make sure this is not a temporary clearing of pte
6011 			 * by holding ptl and checking again. A R/M/W update
6012 			 * of pte involves: take ptl, clearing the pte so that
6013 			 * we don't have concurrent modification by hardware
6014 			 * followed by an update.
6015 			 */
6016 			if (unlikely(pte_none(ptep_get(vmf->pte))))
6017 				ret = VM_FAULT_SIGBUS;
6018 			else
6019 				ret = VM_FAULT_NOPAGE;
6020 
6021 			pte_unmap_unlock(vmf->pte, vmf->ptl);
6022 		}
6023 	} else if (!(vmf->flags & FAULT_FLAG_WRITE))
6024 		ret = do_read_fault(vmf);
6025 	else if (!(vma->vm_flags & VM_SHARED))
6026 		ret = do_cow_fault(vmf);
6027 	else
6028 		ret = do_shared_fault(vmf);
6029 
6030 	/* preallocated pagetable is unused: free it */
6031 	if (vmf->prealloc_pte) {
6032 		pte_free(vm_mm, vmf->prealloc_pte);
6033 		vmf->prealloc_pte = NULL;
6034 	}
6035 	return ret;
6036 }
6037 
6038 int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
6039 		      unsigned long addr, int *flags,
6040 		      bool writable, int *last_cpupid)
6041 {
6042 	struct vm_area_struct *vma = vmf->vma;
6043 
6044 	/*
6045 	 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
6046 	 * much anyway since they can be in shared cache state. This misses
6047 	 * the case where a mapping is writable but the process never writes
6048 	 * to it but pte_write gets cleared during protection updates and
6049 	 * pte_dirty has unpredictable behaviour between PTE scan updates,
6050 	 * background writeback, dirty balancing and application behaviour.
6051 	 */
6052 	if (!writable)
6053 		*flags |= TNF_NO_GROUP;
6054 
6055 	/*
6056 	 * Flag if the folio is shared between multiple address spaces. This
6057 	 * is later used when determining whether to group tasks together
6058 	 */
6059 	if (folio_maybe_mapped_shared(folio) && (vma->vm_flags & VM_SHARED))
6060 		*flags |= TNF_SHARED;
6061 	/*
6062 	 * For memory tiering mode, cpupid of slow memory page is used
6063 	 * to record page access time.  So use default value.
6064 	 */
6065 	if (folio_use_access_time(folio))
6066 		*last_cpupid = (-1 & LAST_CPUPID_MASK);
6067 	else
6068 		*last_cpupid = folio_last_cpupid(folio);
6069 
6070 	/* Record the current PID accessing VMA */
6071 	vma_set_access_pid_bit(vma);
6072 
6073 	count_vm_numa_event(NUMA_HINT_FAULTS);
6074 #ifdef CONFIG_NUMA_BALANCING
6075 	count_memcg_folio_events(folio, NUMA_HINT_FAULTS, 1);
6076 #endif
6077 	if (folio_nid(folio) == numa_node_id()) {
6078 		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
6079 		*flags |= TNF_FAULT_LOCAL;
6080 	}
6081 
6082 	return mpol_misplaced(folio, vmf, addr);
6083 }
6084 
6085 static void numa_rebuild_single_mapping(struct vm_fault *vmf, struct vm_area_struct *vma,
6086 					unsigned long fault_addr, pte_t *fault_pte,
6087 					bool writable)
6088 {
6089 	pte_t pte, old_pte;
6090 
6091 	old_pte = ptep_modify_prot_start(vma, fault_addr, fault_pte);
6092 	pte = pte_modify(old_pte, vma->vm_page_prot);
6093 	pte = pte_mkyoung(pte);
6094 	if (writable)
6095 		pte = pte_mkwrite(pte, vma);
6096 	ptep_modify_prot_commit(vma, fault_addr, fault_pte, old_pte, pte);
6097 	update_mmu_cache_range(vmf, vma, fault_addr, fault_pte, 1);
6098 }
6099 
6100 static void numa_rebuild_large_mapping(struct vm_fault *vmf, struct vm_area_struct *vma,
6101 				       struct folio *folio, pte_t fault_pte,
6102 				       bool ignore_writable, bool pte_write_upgrade)
6103 {
6104 	int nr = pte_pfn(fault_pte) - folio_pfn(folio);
6105 	unsigned long start, end, addr = vmf->address;
6106 	unsigned long addr_start = addr - (nr << PAGE_SHIFT);
6107 	unsigned long pt_start = ALIGN_DOWN(addr, PMD_SIZE);
6108 	pte_t *start_ptep;
6109 
6110 	/* Stay within the VMA and within the page table. */
6111 	start = max3(addr_start, pt_start, vma->vm_start);
6112 	end = min3(addr_start + folio_size(folio), pt_start + PMD_SIZE,
6113 		   vma->vm_end);
6114 	start_ptep = vmf->pte - ((addr - start) >> PAGE_SHIFT);
6115 
6116 	/* Restore all PTEs' mapping of the large folio */
6117 	for (addr = start; addr != end; start_ptep++, addr += PAGE_SIZE) {
6118 		pte_t ptent = ptep_get(start_ptep);
6119 		bool writable = false;
6120 
6121 		if (!pte_present(ptent) || !pte_protnone(ptent))
6122 			continue;
6123 
6124 		if (pfn_folio(pte_pfn(ptent)) != folio)
6125 			continue;
6126 
6127 		if (!ignore_writable) {
6128 			ptent = pte_modify(ptent, vma->vm_page_prot);
6129 			writable = pte_write(ptent);
6130 			if (!writable && pte_write_upgrade &&
6131 			    can_change_pte_writable(vma, addr, ptent))
6132 				writable = true;
6133 		}
6134 
6135 		numa_rebuild_single_mapping(vmf, vma, addr, start_ptep, writable);
6136 	}
6137 }
6138 
6139 static vm_fault_t do_numa_page(struct vm_fault *vmf)
6140 {
6141 	struct vm_area_struct *vma = vmf->vma;
6142 	struct folio *folio = NULL;
6143 	int nid = NUMA_NO_NODE;
6144 	bool writable = false, ignore_writable = false;
6145 	bool pte_write_upgrade = vma_wants_manual_pte_write_upgrade(vma);
6146 	int last_cpupid;
6147 	int target_nid;
6148 	pte_t pte, old_pte;
6149 	int flags = 0, nr_pages;
6150 
6151 	/*
6152 	 * The pte cannot be used safely until we verify, while holding the page
6153 	 * table lock, that its contents have not changed during fault handling.
6154 	 */
6155 	spin_lock(vmf->ptl);
6156 	/* Read the live PTE from the page tables: */
6157 	old_pte = ptep_get(vmf->pte);
6158 
6159 	if (unlikely(!pte_same(old_pte, vmf->orig_pte))) {
6160 		pte_unmap_unlock(vmf->pte, vmf->ptl);
6161 		return 0;
6162 	}
6163 
6164 	pte = pte_modify(old_pte, vma->vm_page_prot);
6165 
6166 	/*
6167 	 * Detect now whether the PTE could be writable; this information
6168 	 * is only valid while holding the PT lock.
6169 	 */
6170 	writable = pte_write(pte);
6171 	if (!writable && pte_write_upgrade &&
6172 	    can_change_pte_writable(vma, vmf->address, pte))
6173 		writable = true;
6174 
6175 	folio = vm_normal_folio(vma, vmf->address, pte);
6176 	if (!folio || folio_is_zone_device(folio))
6177 		goto out_map;
6178 
6179 	nid = folio_nid(folio);
6180 	nr_pages = folio_nr_pages(folio);
6181 
6182 	target_nid = numa_migrate_check(folio, vmf, vmf->address, &flags,
6183 					writable, &last_cpupid);
6184 	if (target_nid == NUMA_NO_NODE)
6185 		goto out_map;
6186 	if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
6187 		flags |= TNF_MIGRATE_FAIL;
6188 		goto out_map;
6189 	}
6190 	/* The folio is isolated and isolation code holds a folio reference. */
6191 	pte_unmap_unlock(vmf->pte, vmf->ptl);
6192 	writable = false;
6193 	ignore_writable = true;
6194 
6195 	/* Migrate to the requested node */
6196 	if (!migrate_misplaced_folio(folio, target_nid)) {
6197 		nid = target_nid;
6198 		flags |= TNF_MIGRATED;
6199 		task_numa_fault(last_cpupid, nid, nr_pages, flags);
6200 		return 0;
6201 	}
6202 
6203 	flags |= TNF_MIGRATE_FAIL;
6204 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
6205 				       vmf->address, &vmf->ptl);
6206 	if (unlikely(!vmf->pte))
6207 		return 0;
6208 	if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
6209 		pte_unmap_unlock(vmf->pte, vmf->ptl);
6210 		return 0;
6211 	}
6212 out_map:
6213 	/*
6214 	 * Make it present again, depending on how arch implements
6215 	 * non-accessible ptes, some can allow access by kernel mode.
6216 	 */
6217 	if (folio && folio_test_large(folio))
6218 		numa_rebuild_large_mapping(vmf, vma, folio, pte, ignore_writable,
6219 					   pte_write_upgrade);
6220 	else
6221 		numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte,
6222 					    writable);
6223 	pte_unmap_unlock(vmf->pte, vmf->ptl);
6224 
6225 	if (nid != NUMA_NO_NODE)
6226 		task_numa_fault(last_cpupid, nid, nr_pages, flags);
6227 	return 0;
6228 }
6229 
6230 static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
6231 {
6232 	struct vm_area_struct *vma = vmf->vma;
6233 	if (vma_is_anonymous(vma))
6234 		return do_huge_pmd_anonymous_page(vmf);
6235 	if (vma->vm_ops->huge_fault)
6236 		return vma->vm_ops->huge_fault(vmf, PMD_ORDER);
6237 	return VM_FAULT_FALLBACK;
6238 }
6239 
6240 /* `inline' is required to avoid gcc 4.1.2 build error */
6241 static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf)
6242 {
6243 	struct vm_area_struct *vma = vmf->vma;
6244 	const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
6245 	vm_fault_t ret;
6246 
6247 	if (vma_is_anonymous(vma)) {
6248 		if (likely(!unshare) &&
6249 		    userfaultfd_huge_pmd_wp(vma, vmf->orig_pmd)) {
6250 			if (userfaultfd_wp_async(vmf->vma))
6251 				goto split;
6252 			return handle_userfault(vmf, VM_UFFD_WP);
6253 		}
6254 		return do_huge_pmd_wp_page(vmf);
6255 	}
6256 
6257 	if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
6258 		if (vma->vm_ops->huge_fault) {
6259 			ret = vma->vm_ops->huge_fault(vmf, PMD_ORDER);
6260 			if (!(ret & VM_FAULT_FALLBACK))
6261 				return ret;
6262 		}
6263 	}
6264 
6265 split:
6266 	/* COW or write-notify handled on pte level: split pmd. */
6267 	__split_huge_pmd(vma, vmf->pmd, vmf->address, false);
6268 
6269 	return VM_FAULT_FALLBACK;
6270 }
6271 
6272 static vm_fault_t create_huge_pud(struct vm_fault *vmf)
6273 {
6274 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&			\
6275 	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
6276 	struct vm_area_struct *vma = vmf->vma;
6277 	/* No support for anonymous transparent PUD pages yet */
6278 	if (vma_is_anonymous(vma))
6279 		return VM_FAULT_FALLBACK;
6280 	if (vma->vm_ops->huge_fault)
6281 		return vma->vm_ops->huge_fault(vmf, PUD_ORDER);
6282 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
6283 	return VM_FAULT_FALLBACK;
6284 }
6285 
6286 static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
6287 {
6288 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&			\
6289 	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
6290 	struct vm_area_struct *vma = vmf->vma;
6291 	vm_fault_t ret;
6292 
6293 	/* No support for anonymous transparent PUD pages yet */
6294 	if (vma_is_anonymous(vma))
6295 		goto split;
6296 	if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
6297 		if (vma->vm_ops->huge_fault) {
6298 			ret = vma->vm_ops->huge_fault(vmf, PUD_ORDER);
6299 			if (!(ret & VM_FAULT_FALLBACK))
6300 				return ret;
6301 		}
6302 	}
6303 split:
6304 	/* COW or write-notify not handled on PUD level: split pud.*/
6305 	__split_huge_pud(vma, vmf->pud, vmf->address);
6306 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
6307 	return VM_FAULT_FALLBACK;
6308 }
6309 
6310 /*
6311  * The page faults may be spurious because of the racy access to the
6312  * page table.  For example, a non-populated virtual page is accessed
6313  * on 2 CPUs simultaneously, thus the page faults are triggered on
6314  * both CPUs.  However, it's possible that one CPU (say CPU A) cannot
6315  * find the reason for the page fault if the other CPU (say CPU B) has
6316  * changed the page table before the PTE is checked on CPU A.  Most of
6317  * the time, the spurious page faults can be ignored safely.  However,
6318  * if the page fault is for the write access, it's possible that a
6319  * stale read-only TLB entry exists in the local CPU and needs to be
6320  * flushed on some architectures.  This is called the spurious page
6321  * fault fixing.
6322  *
6323  * Note: flush_tlb_fix_spurious_fault() is defined as flush_tlb_page()
6324  * by default and used as such on most architectures, while
6325  * flush_tlb_fix_spurious_fault_pmd() is defined as NOP by default and
6326  * used as such on most architectures.
6327  */
6328 static void fix_spurious_fault(struct vm_fault *vmf,
6329 			       enum pgtable_level ptlevel)
6330 {
6331 	/* Skip spurious TLB flush for retried page fault */
6332 	if (vmf->flags & FAULT_FLAG_TRIED)
6333 		return;
6334 	/*
6335 	 * This is needed only for protection faults but the arch code
6336 	 * is not yet telling us if this is a protection fault or not.
6337 	 * This still avoids useless tlb flushes for .text page faults
6338 	 * with threads.
6339 	 */
6340 	if (vmf->flags & FAULT_FLAG_WRITE) {
6341 		if (ptlevel == PGTABLE_LEVEL_PTE)
6342 			flush_tlb_fix_spurious_fault(vmf->vma, vmf->address,
6343 						     vmf->pte);
6344 		else
6345 			flush_tlb_fix_spurious_fault_pmd(vmf->vma, vmf->address,
6346 							 vmf->pmd);
6347 	}
6348 }
6349 /*
6350  * These routines also need to handle stuff like marking pages dirty
6351  * and/or accessed for architectures that don't do it in hardware (most
6352  * RISC architectures).  The early dirtying is also good on the i386.
6353  *
6354  * There is also a hook called "update_mmu_cache()" that architectures
6355  * with external mmu caches can use to update those (ie the Sparc or
6356  * PowerPC hashed page tables that act as extended TLBs).
6357  *
6358  * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow
6359  * concurrent faults).
6360  *
6361  * The mmap_lock may have been released depending on flags and our return value.
6362  * See filemap_fault() and __folio_lock_or_retry().
6363  */
6364 static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
6365 {
6366 	pte_t entry;
6367 
6368 	if (unlikely(pmd_none(*vmf->pmd))) {
6369 		/*
6370 		 * Leave __pte_alloc() until later: because vm_ops->fault may
6371 		 * want to allocate huge page, and if we expose page table
6372 		 * for an instant, it will be difficult to retract from
6373 		 * concurrent faults and from rmap lookups.
6374 		 */
6375 		vmf->pte = NULL;
6376 		vmf->flags &= ~FAULT_FLAG_ORIG_PTE_VALID;
6377 	} else {
6378 		pmd_t dummy_pmdval;
6379 
6380 		/*
6381 		 * A regular pmd is established and it can't morph into a huge
6382 		 * pmd by anon khugepaged, since that takes mmap_lock in write
6383 		 * mode; but shmem or file collapse to THP could still morph
6384 		 * it into a huge pmd: just retry later if so.
6385 		 *
6386 		 * Use the maywrite version to indicate that vmf->pte may be
6387 		 * modified, but since we will use pte_same() to detect the
6388 		 * change of the !pte_none() entry, there is no need to recheck
6389 		 * the pmdval. Here we choose to pass a dummy variable instead
6390 		 * of NULL, which helps new user think about why this place is
6391 		 * special.
6392 		 */
6393 		vmf->pte = pte_offset_map_rw_nolock(vmf->vma->vm_mm, vmf->pmd,
6394 						    vmf->address, &dummy_pmdval,
6395 						    &vmf->ptl);
6396 		if (unlikely(!vmf->pte))
6397 			return 0;
6398 		vmf->orig_pte = ptep_get_lockless(vmf->pte);
6399 		vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID;
6400 
6401 		if (pte_none(vmf->orig_pte)) {
6402 			pte_unmap(vmf->pte);
6403 			vmf->pte = NULL;
6404 		}
6405 	}
6406 
6407 	if (!vmf->pte)
6408 		return do_pte_missing(vmf);
6409 
6410 	if (!pte_present(vmf->orig_pte))
6411 		return do_swap_page(vmf);
6412 
6413 	if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
6414 		return do_numa_page(vmf);
6415 
6416 	spin_lock(vmf->ptl);
6417 	entry = vmf->orig_pte;
6418 	if (unlikely(!pte_same(ptep_get(vmf->pte), entry))) {
6419 		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
6420 		goto unlock;
6421 	}
6422 	if (vmf->flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
6423 		if (!pte_write(entry))
6424 			return do_wp_page(vmf);
6425 		else if (likely(vmf->flags & FAULT_FLAG_WRITE))
6426 			entry = pte_mkdirty(entry);
6427 	}
6428 	entry = pte_mkyoung(entry);
6429 	if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
6430 				vmf->flags & FAULT_FLAG_WRITE))
6431 		update_mmu_cache_range(vmf, vmf->vma, vmf->address,
6432 				vmf->pte, 1);
6433 	else
6434 		fix_spurious_fault(vmf, PGTABLE_LEVEL_PTE);
6435 unlock:
6436 	pte_unmap_unlock(vmf->pte, vmf->ptl);
6437 	return 0;
6438 }
6439 
6440 /*
6441  * On entry, we hold either the VMA lock or the mmap_lock
6442  * (FAULT_FLAG_VMA_LOCK tells you which).  If VM_FAULT_RETRY is set in
6443  * the result, the mmap_lock is not held on exit.  See filemap_fault()
6444  * and __folio_lock_or_retry().
6445  */
6446 static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
6447 		unsigned long address, unsigned int flags)
6448 {
6449 	struct vm_fault vmf = {
6450 		.vma = vma,
6451 		.address = address & PAGE_MASK,
6452 		.real_address = address,
6453 		.flags = flags,
6454 		.pgoff = linear_page_index(vma, address),
6455 		.gfp_mask = __get_fault_gfp_mask(vma),
6456 	};
6457 	struct mm_struct *mm = vma->vm_mm;
6458 	vm_flags_t vm_flags = vma->vm_flags;
6459 	pgd_t *pgd;
6460 	p4d_t *p4d;
6461 	vm_fault_t ret;
6462 
6463 	pgd = pgd_offset(mm, address);
6464 	p4d = p4d_alloc(mm, pgd, address);
6465 	if (!p4d)
6466 		return VM_FAULT_OOM;
6467 
6468 	vmf.pud = pud_alloc(mm, p4d, address);
6469 	if (!vmf.pud)
6470 		return VM_FAULT_OOM;
6471 retry_pud:
6472 	if (pud_none(*vmf.pud) &&
6473 	    thp_vma_allowable_order(vma, vm_flags, TVA_PAGEFAULT, PUD_ORDER)) {
6474 		ret = create_huge_pud(&vmf);
6475 		if (!(ret & VM_FAULT_FALLBACK))
6476 			return ret;
6477 	} else {
6478 		pud_t orig_pud = *vmf.pud;
6479 
6480 		barrier();
6481 		if (pud_trans_huge(orig_pud)) {
6482 
6483 			/*
6484 			 * TODO once we support anonymous PUDs: NUMA case and
6485 			 * FAULT_FLAG_UNSHARE handling.
6486 			 */
6487 			if ((flags & FAULT_FLAG_WRITE) && !pud_write(orig_pud)) {
6488 				ret = wp_huge_pud(&vmf, orig_pud);
6489 				if (!(ret & VM_FAULT_FALLBACK))
6490 					return ret;
6491 			} else {
6492 				huge_pud_set_accessed(&vmf, orig_pud);
6493 				return 0;
6494 			}
6495 		}
6496 	}
6497 
6498 	vmf.pmd = pmd_alloc(mm, vmf.pud, address);
6499 	if (!vmf.pmd)
6500 		return VM_FAULT_OOM;
6501 
6502 	/* Huge pud page fault raced with pmd_alloc? */
6503 	if (pud_trans_unstable(vmf.pud))
6504 		goto retry_pud;
6505 
6506 	if (pmd_none(*vmf.pmd) &&
6507 	    thp_vma_allowable_order(vma, vm_flags, TVA_PAGEFAULT, PMD_ORDER)) {
6508 		ret = create_huge_pmd(&vmf);
6509 		if (ret & VM_FAULT_FALLBACK)
6510 			goto fallback;
6511 		else
6512 			return ret;
6513 	}
6514 
6515 	vmf.orig_pmd = pmdp_get_lockless(vmf.pmd);
6516 	if (pmd_none(vmf.orig_pmd))
6517 		goto fallback;
6518 
6519 	if (unlikely(!pmd_present(vmf.orig_pmd))) {
6520 		if (pmd_is_device_private_entry(vmf.orig_pmd))
6521 			return do_huge_pmd_device_private(&vmf);
6522 
6523 		if (pmd_is_migration_entry(vmf.orig_pmd))
6524 			pmd_migration_entry_wait(mm, vmf.pmd);
6525 		return 0;
6526 	}
6527 	if (pmd_trans_huge(vmf.orig_pmd)) {
6528 		if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma))
6529 			return do_huge_pmd_numa_page(&vmf);
6530 
6531 		if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
6532 		    !pmd_write(vmf.orig_pmd)) {
6533 			ret = wp_huge_pmd(&vmf);
6534 			if (!(ret & VM_FAULT_FALLBACK))
6535 				return ret;
6536 		} else {
6537 			vmf.ptl = pmd_lock(mm, vmf.pmd);
6538 			if (!huge_pmd_set_accessed(&vmf))
6539 				fix_spurious_fault(&vmf, PGTABLE_LEVEL_PMD);
6540 			spin_unlock(vmf.ptl);
6541 			return 0;
6542 		}
6543 	}
6544 
6545 fallback:
6546 	return handle_pte_fault(&vmf);
6547 }
6548 
6549 /**
6550  * mm_account_fault - Do page fault accounting
6551  * @mm: mm from which memcg should be extracted. It can be NULL.
6552  * @regs: the pt_regs struct pointer.  When set to NULL, will skip accounting
6553  *        of perf event counters, but we'll still do the per-task accounting to
6554  *        the task who triggered this page fault.
6555  * @address: the faulted address.
6556  * @flags: the fault flags.
6557  * @ret: the fault retcode.
6558  *
6559  * This will take care of most of the page fault accounting.  Meanwhile, it
6560  * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter
6561  * updates.  However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should
6562  * still be in per-arch page fault handlers at the entry of page fault.
6563  */
6564 static inline void mm_account_fault(struct mm_struct *mm, struct pt_regs *regs,
6565 				    unsigned long address, unsigned int flags,
6566 				    vm_fault_t ret)
6567 {
6568 	bool major;
6569 
6570 	/* Incomplete faults will be accounted upon completion. */
6571 	if (ret & VM_FAULT_RETRY)
6572 		return;
6573 
6574 	/*
6575 	 * To preserve the behavior of older kernels, PGFAULT counters record
6576 	 * both successful and failed faults, as opposed to perf counters,
6577 	 * which ignore failed cases.
6578 	 */
6579 	count_vm_event(PGFAULT);
6580 	count_memcg_event_mm(mm, PGFAULT);
6581 
6582 	/*
6583 	 * Do not account for unsuccessful faults (e.g. when the address wasn't
6584 	 * valid).  That includes arch_vma_access_permitted() failing before
6585 	 * reaching here. So this is not a "this many hardware page faults"
6586 	 * counter.  We should use the hw profiling for that.
6587 	 */
6588 	if (ret & VM_FAULT_ERROR)
6589 		return;
6590 
6591 	/*
6592 	 * We define the fault as a major fault when the final successful fault
6593 	 * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't
6594 	 * handle it immediately previously).
6595 	 */
6596 	major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED);
6597 
6598 	if (major)
6599 		current->maj_flt++;
6600 	else
6601 		current->min_flt++;
6602 
6603 	/*
6604 	 * If the fault is done for GUP, regs will be NULL.  We only do the
6605 	 * accounting for the per thread fault counters who triggered the
6606 	 * fault, and we skip the perf event updates.
6607 	 */
6608 	if (!regs)
6609 		return;
6610 
6611 	if (major)
6612 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
6613 	else
6614 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
6615 }
6616 
6617 #ifdef CONFIG_LRU_GEN
6618 static void lru_gen_enter_fault(struct vm_area_struct *vma)
6619 {
6620 	/* the LRU algorithm only applies to accesses with recency */
6621 	current->in_lru_fault = vma_has_recency(vma);
6622 }
6623 
6624 static void lru_gen_exit_fault(void)
6625 {
6626 	current->in_lru_fault = false;
6627 }
6628 #else
6629 static void lru_gen_enter_fault(struct vm_area_struct *vma)
6630 {
6631 }
6632 
6633 static void lru_gen_exit_fault(void)
6634 {
6635 }
6636 #endif /* CONFIG_LRU_GEN */
6637 
6638 static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma,
6639 				       unsigned int *flags)
6640 {
6641 	if (unlikely(*flags & FAULT_FLAG_UNSHARE)) {
6642 		if (WARN_ON_ONCE(*flags & FAULT_FLAG_WRITE))
6643 			return VM_FAULT_SIGSEGV;
6644 		/*
6645 		 * FAULT_FLAG_UNSHARE only applies to COW mappings. Let's
6646 		 * just treat it like an ordinary read-fault otherwise.
6647 		 */
6648 		if (!is_cow_mapping(vma->vm_flags))
6649 			*flags &= ~FAULT_FLAG_UNSHARE;
6650 	} else if (*flags & FAULT_FLAG_WRITE) {
6651 		/* Write faults on read-only mappings are impossible ... */
6652 		if (WARN_ON_ONCE(!(vma->vm_flags & VM_MAYWRITE)))
6653 			return VM_FAULT_SIGSEGV;
6654 		/* ... and FOLL_FORCE only applies to COW mappings. */
6655 		if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE) &&
6656 				 !is_cow_mapping(vma->vm_flags)))
6657 			return VM_FAULT_SIGSEGV;
6658 	}
6659 #ifdef CONFIG_PER_VMA_LOCK
6660 	/*
6661 	 * Per-VMA locks can't be used with FAULT_FLAG_RETRY_NOWAIT because of
6662 	 * the assumption that lock is dropped on VM_FAULT_RETRY.
6663 	 */
6664 	if (WARN_ON_ONCE((*flags &
6665 			(FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT)) ==
6666 			(FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT)))
6667 		return VM_FAULT_SIGSEGV;
6668 #endif
6669 
6670 	return 0;
6671 }
6672 
6673 /*
6674  * By the time we get here, we already hold either the VMA lock or the
6675  * mmap_lock (FAULT_FLAG_VMA_LOCK tells you which).
6676  *
6677  * The mmap_lock may have been released depending on flags and our
6678  * return value.  See filemap_fault() and __folio_lock_or_retry().
6679  */
6680 vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
6681 			   unsigned int flags, struct pt_regs *regs)
6682 {
6683 	/* If the fault handler drops the mmap_lock, vma may be freed */
6684 	struct mm_struct *mm = vma->vm_mm;
6685 	vm_fault_t ret;
6686 	bool is_droppable;
6687 
6688 	__set_current_state(TASK_RUNNING);
6689 
6690 	ret = sanitize_fault_flags(vma, &flags);
6691 	if (ret)
6692 		goto out;
6693 
6694 	if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
6695 					    flags & FAULT_FLAG_INSTRUCTION,
6696 					    flags & FAULT_FLAG_REMOTE)) {
6697 		ret = VM_FAULT_SIGSEGV;
6698 		goto out;
6699 	}
6700 
6701 	is_droppable = !!(vma->vm_flags & VM_DROPPABLE);
6702 
6703 	/*
6704 	 * Enable the memcg OOM handling for faults triggered in user
6705 	 * space.  Kernel faults are handled more gracefully.
6706 	 */
6707 	if (flags & FAULT_FLAG_USER)
6708 		mem_cgroup_enter_user_fault();
6709 
6710 	lru_gen_enter_fault(vma);
6711 
6712 	if (unlikely(is_vm_hugetlb_page(vma)))
6713 		ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
6714 	else
6715 		ret = __handle_mm_fault(vma, address, flags);
6716 
6717 	/*
6718 	 * Warning: It is no longer safe to dereference vma-> after this point,
6719 	 * because mmap_lock might have been dropped by __handle_mm_fault(), so
6720 	 * vma might be destroyed from underneath us.
6721 	 */
6722 
6723 	lru_gen_exit_fault();
6724 
6725 	/* If the mapping is droppable, then errors due to OOM aren't fatal. */
6726 	if (is_droppable)
6727 		ret &= ~VM_FAULT_OOM;
6728 
6729 	if (flags & FAULT_FLAG_USER) {
6730 		mem_cgroup_exit_user_fault();
6731 		/*
6732 		 * The task may have entered a memcg OOM situation but
6733 		 * if the allocation error was handled gracefully (no
6734 		 * VM_FAULT_OOM), there is no need to kill anything.
6735 		 * Just clean up the OOM state peacefully.
6736 		 */
6737 		if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
6738 			mem_cgroup_oom_synchronize(false);
6739 	}
6740 out:
6741 	mm_account_fault(mm, regs, address, flags, ret);
6742 
6743 	return ret;
6744 }
6745 EXPORT_SYMBOL_GPL(handle_mm_fault);
6746 
6747 #ifndef __PAGETABLE_P4D_FOLDED
6748 /*
6749  * Allocate p4d page table.
6750  * We've already handled the fast-path in-line.
6751  */
6752 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
6753 {
6754 	p4d_t *new = p4d_alloc_one(mm, address);
6755 	if (!new)
6756 		return -ENOMEM;
6757 
6758 	spin_lock(&mm->page_table_lock);
6759 	if (pgd_present(*pgd)) {	/* Another has populated it */
6760 		p4d_free(mm, new);
6761 	} else {
6762 		smp_wmb(); /* See comment in pmd_install() */
6763 		pgd_populate(mm, pgd, new);
6764 	}
6765 	spin_unlock(&mm->page_table_lock);
6766 	return 0;
6767 }
6768 #endif /* __PAGETABLE_P4D_FOLDED */
6769 
6770 #ifndef __PAGETABLE_PUD_FOLDED
6771 /*
6772  * Allocate page upper directory.
6773  * We've already handled the fast-path in-line.
6774  */
6775 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
6776 {
6777 	pud_t *new = pud_alloc_one(mm, address);
6778 	if (!new)
6779 		return -ENOMEM;
6780 
6781 	spin_lock(&mm->page_table_lock);
6782 	if (!p4d_present(*p4d)) {
6783 		mm_inc_nr_puds(mm);
6784 		smp_wmb(); /* See comment in pmd_install() */
6785 		p4d_populate(mm, p4d, new);
6786 	} else	/* Another has populated it */
6787 		pud_free(mm, new);
6788 	spin_unlock(&mm->page_table_lock);
6789 	return 0;
6790 }
6791 #endif /* __PAGETABLE_PUD_FOLDED */
6792 
6793 #ifndef __PAGETABLE_PMD_FOLDED
6794 /*
6795  * Allocate page middle directory.
6796  * We've already handled the fast-path in-line.
6797  */
6798 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
6799 {
6800 	spinlock_t *ptl;
6801 	pmd_t *new = pmd_alloc_one(mm, address);
6802 	if (!new)
6803 		return -ENOMEM;
6804 
6805 	ptl = pud_lock(mm, pud);
6806 	if (!pud_present(*pud)) {
6807 		mm_inc_nr_pmds(mm);
6808 		smp_wmb(); /* See comment in pmd_install() */
6809 		pud_populate(mm, pud, new);
6810 	} else {	/* Another has populated it */
6811 		pmd_free(mm, new);
6812 	}
6813 	spin_unlock(ptl);
6814 	return 0;
6815 }
6816 #endif /* __PAGETABLE_PMD_FOLDED */
6817 
6818 static inline void pfnmap_args_setup(struct follow_pfnmap_args *args,
6819 				     spinlock_t *lock, pte_t *ptep,
6820 				     pgprot_t pgprot, unsigned long pfn_base,
6821 				     unsigned long addr_mask, bool writable,
6822 				     bool special)
6823 {
6824 	args->lock = lock;
6825 	args->ptep = ptep;
6826 	args->pfn = pfn_base + ((args->address & ~addr_mask) >> PAGE_SHIFT);
6827 	args->addr_mask = addr_mask;
6828 	args->pgprot = pgprot;
6829 	args->writable = writable;
6830 	args->special = special;
6831 }
6832 
6833 static inline void pfnmap_lockdep_assert(struct vm_area_struct *vma)
6834 {
6835 #ifdef CONFIG_LOCKDEP
6836 	struct file *file = vma->vm_file;
6837 	struct address_space *mapping = file ? file->f_mapping : NULL;
6838 
6839 	if (mapping)
6840 		lockdep_assert(lockdep_is_held(&mapping->i_mmap_rwsem) ||
6841 			       lockdep_is_held(&vma->vm_mm->mmap_lock));
6842 	else
6843 		lockdep_assert(lockdep_is_held(&vma->vm_mm->mmap_lock));
6844 #endif
6845 }
6846 
6847 /**
6848  * follow_pfnmap_start() - Look up a pfn mapping at a user virtual address
6849  * @args: Pointer to struct @follow_pfnmap_args
6850  *
6851  * The caller needs to setup args->vma and args->address to point to the
6852  * virtual address as the target of such lookup.  On a successful return,
6853  * the results will be put into other output fields.
6854  *
6855  * After the caller finished using the fields, the caller must invoke
6856  * another follow_pfnmap_end() to proper releases the locks and resources
6857  * of such look up request.
6858  *
6859  * During the start() and end() calls, the results in @args will be valid
6860  * as proper locks will be held.  After the end() is called, all the fields
6861  * in @follow_pfnmap_args will be invalid to be further accessed.  Further
6862  * use of such information after end() may require proper synchronizations
6863  * by the caller with page table updates, otherwise it can create a
6864  * security bug.
6865  *
6866  * If the PTE maps a refcounted page, callers are responsible to protect
6867  * against invalidation with MMU notifiers; otherwise access to the PFN at
6868  * a later point in time can trigger use-after-free.
6869  *
6870  * Only IO mappings and raw PFN mappings are allowed.  The mmap semaphore
6871  * should be taken for read, and the mmap semaphore cannot be released
6872  * before the end() is invoked.
6873  *
6874  * This function must not be used to modify PTE content.
6875  *
6876  * Return: zero on success, negative otherwise.
6877  */
6878 int follow_pfnmap_start(struct follow_pfnmap_args *args)
6879 {
6880 	struct vm_area_struct *vma = args->vma;
6881 	unsigned long address = args->address;
6882 	struct mm_struct *mm = vma->vm_mm;
6883 	spinlock_t *lock;
6884 	pgd_t *pgdp;
6885 	p4d_t *p4dp, p4d;
6886 	pud_t *pudp, pud;
6887 	pmd_t *pmdp, pmd;
6888 	pte_t *ptep, pte;
6889 
6890 	pfnmap_lockdep_assert(vma);
6891 
6892 	if (unlikely(address < vma->vm_start || address >= vma->vm_end))
6893 		goto out;
6894 
6895 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
6896 		goto out;
6897 retry:
6898 	pgdp = pgd_offset(mm, address);
6899 	if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
6900 		goto out;
6901 
6902 	p4dp = p4d_offset(pgdp, address);
6903 	p4d = p4dp_get(p4dp);
6904 	if (p4d_none(p4d) || unlikely(p4d_bad(p4d)))
6905 		goto out;
6906 
6907 	pudp = pud_offset(p4dp, address);
6908 	pud = pudp_get(pudp);
6909 	if (!pud_present(pud))
6910 		goto out;
6911 	if (pud_leaf(pud)) {
6912 		lock = pud_lock(mm, pudp);
6913 		pud = pudp_get(pudp);
6914 
6915 		if (unlikely(!pud_present(pud))) {
6916 			spin_unlock(lock);
6917 			goto out;
6918 		} else if (unlikely(!pud_leaf(pud))) {
6919 			spin_unlock(lock);
6920 			goto retry;
6921 		}
6922 		pfnmap_args_setup(args, lock, NULL, pud_pgprot(pud),
6923 				  pud_pfn(pud), PUD_MASK, pud_write(pud),
6924 				  pud_special(pud));
6925 		return 0;
6926 	}
6927 
6928 	pmdp = pmd_offset(pudp, address);
6929 	pmd = pmdp_get_lockless(pmdp);
6930 	if (!pmd_present(pmd))
6931 		goto out;
6932 	if (pmd_leaf(pmd)) {
6933 		lock = pmd_lock(mm, pmdp);
6934 		pmd = pmdp_get(pmdp);
6935 
6936 		if (unlikely(!pmd_present(pmd))) {
6937 			spin_unlock(lock);
6938 			goto out;
6939 		} else if (unlikely(!pmd_leaf(pmd))) {
6940 			spin_unlock(lock);
6941 			goto retry;
6942 		}
6943 		pfnmap_args_setup(args, lock, NULL, pmd_pgprot(pmd),
6944 				  pmd_pfn(pmd), PMD_MASK, pmd_write(pmd),
6945 				  pmd_special(pmd));
6946 		return 0;
6947 	}
6948 
6949 	ptep = pte_offset_map_lock(mm, pmdp, address, &lock);
6950 	if (!ptep)
6951 		goto out;
6952 	pte = ptep_get(ptep);
6953 	if (!pte_present(pte))
6954 		goto unlock;
6955 	pfnmap_args_setup(args, lock, ptep, pte_pgprot(pte),
6956 			  pte_pfn(pte), PAGE_MASK, pte_write(pte),
6957 			  pte_special(pte));
6958 	return 0;
6959 unlock:
6960 	pte_unmap_unlock(ptep, lock);
6961 out:
6962 	return -EINVAL;
6963 }
6964 EXPORT_SYMBOL_GPL(follow_pfnmap_start);
6965 
6966 /**
6967  * follow_pfnmap_end(): End a follow_pfnmap_start() process
6968  * @args: Pointer to struct @follow_pfnmap_args
6969  *
6970  * Must be used in pair of follow_pfnmap_start().  See the start() function
6971  * above for more information.
6972  */
6973 void follow_pfnmap_end(struct follow_pfnmap_args *args)
6974 {
6975 	if (args->lock)
6976 		spin_unlock(args->lock);
6977 	if (args->ptep)
6978 		pte_unmap(args->ptep);
6979 }
6980 EXPORT_SYMBOL_GPL(follow_pfnmap_end);
6981 
6982 #ifdef CONFIG_HAVE_IOREMAP_PROT
6983 /**
6984  * generic_access_phys - generic implementation for iomem mmap access
6985  * @vma: the vma to access
6986  * @addr: userspace address, not relative offset within @vma
6987  * @buf: buffer to read/write
6988  * @len: length of transfer
6989  * @write: set to FOLL_WRITE when writing, otherwise reading
6990  *
6991  * This is a generic implementation for &vm_operations_struct.access for an
6992  * iomem mapping. This callback is used by access_process_vm() when the @vma is
6993  * not page based.
6994  */
6995 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
6996 			void *buf, int len, int write)
6997 {
6998 	resource_size_t phys_addr;
6999 	pgprot_t prot = __pgprot(0);
7000 	void __iomem *maddr;
7001 	int offset = offset_in_page(addr);
7002 	int ret = -EINVAL;
7003 	bool writable;
7004 	struct follow_pfnmap_args args = { .vma = vma, .address = addr };
7005 
7006 retry:
7007 	if (follow_pfnmap_start(&args))
7008 		return -EINVAL;
7009 	prot = args.pgprot;
7010 	phys_addr = (resource_size_t)args.pfn << PAGE_SHIFT;
7011 	writable = args.writable;
7012 	follow_pfnmap_end(&args);
7013 
7014 	if ((write & FOLL_WRITE) && !writable)
7015 		return -EINVAL;
7016 
7017 	maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
7018 	if (!maddr)
7019 		return -ENOMEM;
7020 
7021 	if (follow_pfnmap_start(&args))
7022 		goto out_unmap;
7023 
7024 	if ((pgprot_val(prot) != pgprot_val(args.pgprot)) ||
7025 	    (phys_addr != (args.pfn << PAGE_SHIFT)) ||
7026 	    (writable != args.writable)) {
7027 		follow_pfnmap_end(&args);
7028 		iounmap(maddr);
7029 		goto retry;
7030 	}
7031 
7032 	if (write)
7033 		memcpy_toio(maddr + offset, buf, len);
7034 	else
7035 		memcpy_fromio(buf, maddr + offset, len);
7036 	ret = len;
7037 	follow_pfnmap_end(&args);
7038 out_unmap:
7039 	iounmap(maddr);
7040 
7041 	return ret;
7042 }
7043 EXPORT_SYMBOL_GPL(generic_access_phys);
7044 #endif
7045 
7046 /*
7047  * Access another process' address space as given in mm.
7048  */
7049 static int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
7050 			      void *buf, int len, unsigned int gup_flags)
7051 {
7052 	void *old_buf = buf;
7053 	int write = gup_flags & FOLL_WRITE;
7054 
7055 	if (mmap_read_lock_killable(mm))
7056 		return 0;
7057 
7058 	/* Untag the address before looking up the VMA */
7059 	addr = untagged_addr_remote(mm, addr);
7060 
7061 	/* Avoid triggering the temporary warning in __get_user_pages */
7062 	if (!vma_lookup(mm, addr) && !expand_stack(mm, addr))
7063 		return 0;
7064 
7065 	/* ignore errors, just check how much was successfully transferred */
7066 	while (len) {
7067 		int bytes, offset;
7068 		void *maddr;
7069 		struct folio *folio;
7070 		struct vm_area_struct *vma = NULL;
7071 		struct page *page = get_user_page_vma_remote(mm, addr,
7072 							     gup_flags, &vma);
7073 
7074 		if (IS_ERR(page)) {
7075 			/* We might need to expand the stack to access it */
7076 			vma = vma_lookup(mm, addr);
7077 			if (!vma) {
7078 				vma = expand_stack(mm, addr);
7079 
7080 				/* mmap_lock was dropped on failure */
7081 				if (!vma)
7082 					return buf - old_buf;
7083 
7084 				/* Try again if stack expansion worked */
7085 				continue;
7086 			}
7087 
7088 			/*
7089 			 * Check if this is a VM_IO | VM_PFNMAP VMA, which
7090 			 * we can access using slightly different code.
7091 			 */
7092 			bytes = 0;
7093 #ifdef CONFIG_HAVE_IOREMAP_PROT
7094 			if (vma->vm_ops && vma->vm_ops->access)
7095 				bytes = vma->vm_ops->access(vma, addr, buf,
7096 							    len, write);
7097 #endif
7098 			if (bytes <= 0)
7099 				break;
7100 		} else {
7101 			folio = page_folio(page);
7102 			bytes = len;
7103 			offset = addr & (PAGE_SIZE-1);
7104 			if (bytes > PAGE_SIZE-offset)
7105 				bytes = PAGE_SIZE-offset;
7106 
7107 			maddr = kmap_local_folio(folio, folio_page_idx(folio, page) * PAGE_SIZE);
7108 			if (write) {
7109 				copy_to_user_page(vma, page, addr,
7110 						  maddr + offset, buf, bytes);
7111 				folio_mark_dirty_lock(folio);
7112 			} else {
7113 				copy_from_user_page(vma, page, addr,
7114 						    buf, maddr + offset, bytes);
7115 			}
7116 			folio_release_kmap(folio, maddr);
7117 		}
7118 		len -= bytes;
7119 		buf += bytes;
7120 		addr += bytes;
7121 	}
7122 	mmap_read_unlock(mm);
7123 
7124 	return buf - old_buf;
7125 }
7126 
7127 /**
7128  * access_remote_vm - access another process' address space
7129  * @mm:		the mm_struct of the target address space
7130  * @addr:	start address to access
7131  * @buf:	source or destination buffer
7132  * @len:	number of bytes to transfer
7133  * @gup_flags:	flags modifying lookup behaviour
7134  *
7135  * The caller must hold a reference on @mm.
7136  *
7137  * Return: number of bytes copied from source to destination.
7138  */
7139 int access_remote_vm(struct mm_struct *mm, unsigned long addr,
7140 		void *buf, int len, unsigned int gup_flags)
7141 {
7142 	return __access_remote_vm(mm, addr, buf, len, gup_flags);
7143 }
7144 
7145 /*
7146  * Access another process' address space.
7147  * Source/target buffer must be kernel space,
7148  * Do not walk the page table directly, use get_user_pages
7149  */
7150 int access_process_vm(struct task_struct *tsk, unsigned long addr,
7151 		void *buf, int len, unsigned int gup_flags)
7152 {
7153 	struct mm_struct *mm;
7154 	int ret;
7155 
7156 	mm = get_task_mm(tsk);
7157 	if (!mm)
7158 		return 0;
7159 
7160 	ret = __access_remote_vm(mm, addr, buf, len, gup_flags);
7161 
7162 	mmput(mm);
7163 
7164 	return ret;
7165 }
7166 EXPORT_SYMBOL_GPL(access_process_vm);
7167 
7168 #ifdef CONFIG_BPF_SYSCALL
7169 /*
7170  * Copy a string from another process's address space as given in mm.
7171  * If there is any error return -EFAULT.
7172  */
7173 static int __copy_remote_vm_str(struct mm_struct *mm, unsigned long addr,
7174 				void *buf, int len, unsigned int gup_flags)
7175 {
7176 	void *old_buf = buf;
7177 	int err = 0;
7178 
7179 	*(char *)buf = '\0';
7180 
7181 	if (mmap_read_lock_killable(mm))
7182 		return -EFAULT;
7183 
7184 	addr = untagged_addr_remote(mm, addr);
7185 
7186 	/* Avoid triggering the temporary warning in __get_user_pages */
7187 	if (!vma_lookup(mm, addr)) {
7188 		err = -EFAULT;
7189 		goto out;
7190 	}
7191 
7192 	while (len) {
7193 		int bytes, offset, retval;
7194 		void *maddr;
7195 		struct folio *folio;
7196 		struct page *page;
7197 		struct vm_area_struct *vma = NULL;
7198 
7199 		page = get_user_page_vma_remote(mm, addr, gup_flags, &vma);
7200 		if (IS_ERR(page)) {
7201 			/*
7202 			 * Treat as a total failure for now until we decide how
7203 			 * to handle the CONFIG_HAVE_IOREMAP_PROT case and
7204 			 * stack expansion.
7205 			 */
7206 			*(char *)buf = '\0';
7207 			err = -EFAULT;
7208 			goto out;
7209 		}
7210 
7211 		folio = page_folio(page);
7212 		bytes = len;
7213 		offset = addr & (PAGE_SIZE - 1);
7214 		if (bytes > PAGE_SIZE - offset)
7215 			bytes = PAGE_SIZE - offset;
7216 
7217 		maddr = kmap_local_folio(folio, folio_page_idx(folio, page) * PAGE_SIZE);
7218 		retval = strscpy(buf, maddr + offset, bytes);
7219 		if (retval >= 0) {
7220 			/* Found the end of the string */
7221 			buf += retval;
7222 			folio_release_kmap(folio, maddr);
7223 			break;
7224 		}
7225 
7226 		buf += bytes - 1;
7227 		/*
7228 		 * Because strscpy always NUL terminates we need to
7229 		 * copy the last byte in the page if we are going to
7230 		 * load more pages
7231 		 */
7232 		if (bytes != len) {
7233 			addr += bytes - 1;
7234 			copy_from_user_page(vma, page, addr, buf, maddr + (PAGE_SIZE - 1), 1);
7235 			buf += 1;
7236 			addr += 1;
7237 		}
7238 		len -= bytes;
7239 
7240 		folio_release_kmap(folio, maddr);
7241 	}
7242 
7243 out:
7244 	mmap_read_unlock(mm);
7245 	if (err)
7246 		return err;
7247 	return buf - old_buf;
7248 }
7249 
7250 /**
7251  * copy_remote_vm_str - copy a string from another process's address space.
7252  * @tsk:	the task of the target address space
7253  * @addr:	start address to read from
7254  * @buf:	destination buffer
7255  * @len:	number of bytes to copy
7256  * @gup_flags:	flags modifying lookup behaviour
7257  *
7258  * The caller must hold a reference on @mm.
7259  *
7260  * Return: number of bytes copied from @addr (source) to @buf (destination);
7261  * not including the trailing NUL. Always guaranteed to leave NUL-terminated
7262  * buffer. On any error, return -EFAULT.
7263  */
7264 int copy_remote_vm_str(struct task_struct *tsk, unsigned long addr,
7265 		       void *buf, int len, unsigned int gup_flags)
7266 {
7267 	struct mm_struct *mm;
7268 	int ret;
7269 
7270 	if (unlikely(len == 0))
7271 		return 0;
7272 
7273 	mm = get_task_mm(tsk);
7274 	if (!mm) {
7275 		*(char *)buf = '\0';
7276 		return -EFAULT;
7277 	}
7278 
7279 	ret = __copy_remote_vm_str(mm, addr, buf, len, gup_flags);
7280 
7281 	mmput(mm);
7282 
7283 	return ret;
7284 }
7285 EXPORT_SYMBOL_GPL(copy_remote_vm_str);
7286 #endif /* CONFIG_BPF_SYSCALL */
7287 
7288 /*
7289  * Print the name of a VMA.
7290  */
7291 void print_vma_addr(char *prefix, unsigned long ip)
7292 {
7293 	struct mm_struct *mm = current->mm;
7294 	struct vm_area_struct *vma;
7295 
7296 	/*
7297 	 * we might be running from an atomic context so we cannot sleep
7298 	 */
7299 	if (!mmap_read_trylock(mm))
7300 		return;
7301 
7302 	vma = vma_lookup(mm, ip);
7303 	if (vma && vma->vm_file) {
7304 		struct file *f = vma->vm_file;
7305 		ip -= vma->vm_start;
7306 		ip += vma->vm_pgoff << PAGE_SHIFT;
7307 		printk("%s%pD[%lx,%lx+%lx]", prefix, f, ip,
7308 				vma->vm_start,
7309 				vma->vm_end - vma->vm_start);
7310 	}
7311 	mmap_read_unlock(mm);
7312 }
7313 
7314 #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
7315 void __might_fault(const char *file, int line)
7316 {
7317 	if (pagefault_disabled())
7318 		return;
7319 	__might_sleep(file, line);
7320 	if (current->mm)
7321 		might_lock_read(&current->mm->mmap_lock);
7322 }
7323 EXPORT_SYMBOL(__might_fault);
7324 #endif
7325 
7326 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
7327 /*
7328  * Process all subpages of the specified huge page with the specified
7329  * operation.  The target subpage will be processed last to keep its
7330  * cache lines hot.
7331  */
7332 static inline int process_huge_page(
7333 	unsigned long addr_hint, unsigned int nr_pages,
7334 	int (*process_subpage)(unsigned long addr, int idx, void *arg),
7335 	void *arg)
7336 {
7337 	int i, n, base, l, ret;
7338 	unsigned long addr = addr_hint &
7339 		~(((unsigned long)nr_pages << PAGE_SHIFT) - 1);
7340 
7341 	/* Process target subpage last to keep its cache lines hot */
7342 	might_sleep();
7343 	n = (addr_hint - addr) / PAGE_SIZE;
7344 	if (2 * n <= nr_pages) {
7345 		/* If target subpage in first half of huge page */
7346 		base = 0;
7347 		l = n;
7348 		/* Process subpages at the end of huge page */
7349 		for (i = nr_pages - 1; i >= 2 * n; i--) {
7350 			cond_resched();
7351 			ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
7352 			if (ret)
7353 				return ret;
7354 		}
7355 	} else {
7356 		/* If target subpage in second half of huge page */
7357 		base = nr_pages - 2 * (nr_pages - n);
7358 		l = nr_pages - n;
7359 		/* Process subpages at the begin of huge page */
7360 		for (i = 0; i < base; i++) {
7361 			cond_resched();
7362 			ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
7363 			if (ret)
7364 				return ret;
7365 		}
7366 	}
7367 	/*
7368 	 * Process remaining subpages in left-right-left-right pattern
7369 	 * towards the target subpage
7370 	 */
7371 	for (i = 0; i < l; i++) {
7372 		int left_idx = base + i;
7373 		int right_idx = base + 2 * l - 1 - i;
7374 
7375 		cond_resched();
7376 		ret = process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
7377 		if (ret)
7378 			return ret;
7379 		cond_resched();
7380 		ret = process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
7381 		if (ret)
7382 			return ret;
7383 	}
7384 	return 0;
7385 }
7386 
7387 static void clear_contig_highpages(struct page *page, unsigned long addr,
7388 				   unsigned int nr_pages)
7389 {
7390 	unsigned int i, count;
7391 	/*
7392 	 * When clearing we want to operate on the largest extent possible to
7393 	 * allow for architecture specific extent based optimizations.
7394 	 *
7395 	 * However, since clear_user_highpages() (and primitives clear_user_pages(),
7396 	 * clear_pages()), do not call cond_resched(), limit the unit size when
7397 	 * running under non-preemptible scheduling models.
7398 	 */
7399 	const unsigned int unit = preempt_model_preemptible() ?
7400 				   nr_pages : PROCESS_PAGES_NON_PREEMPT_BATCH;
7401 
7402 	might_sleep();
7403 
7404 	for (i = 0; i < nr_pages; i += count) {
7405 		cond_resched();
7406 
7407 		count = min(unit, nr_pages - i);
7408 		clear_user_highpages(page + i, addr + i * PAGE_SIZE, count);
7409 	}
7410 }
7411 
7412 /*
7413  * When zeroing a folio, we want to differentiate between pages in the
7414  * vicinity of the faulting address where we have spatial and temporal
7415  * locality, and those far away where we don't.
7416  *
7417  * Use a radius of 2 for determining the local neighbourhood.
7418  */
7419 #define FOLIO_ZERO_LOCALITY_RADIUS	2
7420 
7421 /**
7422  * folio_zero_user - Zero a folio which will be mapped to userspace.
7423  * @folio: The folio to zero.
7424  * @addr_hint: The address accessed by the user or the base address.
7425  */
7426 void folio_zero_user(struct folio *folio, unsigned long addr_hint)
7427 {
7428 	const unsigned long base_addr = ALIGN_DOWN(addr_hint, folio_size(folio));
7429 	const long fault_idx = (addr_hint - base_addr) / PAGE_SIZE;
7430 	const struct range pg = DEFINE_RANGE(0, folio_nr_pages(folio) - 1);
7431 	const long radius = FOLIO_ZERO_LOCALITY_RADIUS;
7432 	struct range r[3];
7433 	int i;
7434 
7435 	/*
7436 	 * Faulting page and its immediate neighbourhood. Will be cleared at the
7437 	 * end to keep its cachelines hot.
7438 	 */
7439 	r[2] = DEFINE_RANGE(fault_idx - radius < (long)pg.start ? pg.start : fault_idx - radius,
7440 			    fault_idx + radius > (long)pg.end   ? pg.end   : fault_idx + radius);
7441 
7442 
7443 	/* Region to the left of the fault */
7444 	r[1] = DEFINE_RANGE(pg.start, r[2].start - 1);
7445 
7446 	/* Region to the right of the fault: always valid for the common fault_idx=0 case. */
7447 	r[0] = DEFINE_RANGE(r[2].end + 1, pg.end);
7448 
7449 	for (i = 0; i < ARRAY_SIZE(r); i++) {
7450 		const unsigned long addr = base_addr + r[i].start * PAGE_SIZE;
7451 		const long nr_pages = (long)range_len(&r[i]);
7452 		struct page *page = folio_page(folio, r[i].start);
7453 
7454 		if (nr_pages > 0)
7455 			clear_contig_highpages(page, addr, nr_pages);
7456 	}
7457 }
7458 
7459 static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
7460 				   unsigned long addr_hint,
7461 				   struct vm_area_struct *vma,
7462 				   unsigned int nr_pages)
7463 {
7464 	unsigned long addr = ALIGN_DOWN(addr_hint, folio_size(dst));
7465 	struct page *dst_page;
7466 	struct page *src_page;
7467 	int i;
7468 
7469 	for (i = 0; i < nr_pages; i++) {
7470 		dst_page = folio_page(dst, i);
7471 		src_page = folio_page(src, i);
7472 
7473 		cond_resched();
7474 		if (copy_mc_user_highpage(dst_page, src_page,
7475 					  addr + i*PAGE_SIZE, vma))
7476 			return -EHWPOISON;
7477 	}
7478 	return 0;
7479 }
7480 
7481 struct copy_subpage_arg {
7482 	struct folio *dst;
7483 	struct folio *src;
7484 	struct vm_area_struct *vma;
7485 };
7486 
7487 static int copy_subpage(unsigned long addr, int idx, void *arg)
7488 {
7489 	struct copy_subpage_arg *copy_arg = arg;
7490 	struct page *dst = folio_page(copy_arg->dst, idx);
7491 	struct page *src = folio_page(copy_arg->src, idx);
7492 
7493 	if (copy_mc_user_highpage(dst, src, addr, copy_arg->vma))
7494 		return -EHWPOISON;
7495 	return 0;
7496 }
7497 
7498 int copy_user_large_folio(struct folio *dst, struct folio *src,
7499 			  unsigned long addr_hint, struct vm_area_struct *vma)
7500 {
7501 	unsigned int nr_pages = folio_nr_pages(dst);
7502 	struct copy_subpage_arg arg = {
7503 		.dst = dst,
7504 		.src = src,
7505 		.vma = vma,
7506 	};
7507 
7508 	if (unlikely(nr_pages > MAX_ORDER_NR_PAGES))
7509 		return copy_user_gigantic_page(dst, src, addr_hint, vma, nr_pages);
7510 
7511 	return process_huge_page(addr_hint, nr_pages, copy_subpage, &arg);
7512 }
7513 
7514 long copy_folio_from_user(struct folio *dst_folio,
7515 			   const void __user *usr_src,
7516 			   bool allow_pagefault)
7517 {
7518 	void *kaddr;
7519 	unsigned long i, rc = 0;
7520 	unsigned int nr_pages = folio_nr_pages(dst_folio);
7521 	unsigned long ret_val = nr_pages * PAGE_SIZE;
7522 	struct page *subpage;
7523 
7524 	for (i = 0; i < nr_pages; i++) {
7525 		subpage = folio_page(dst_folio, i);
7526 		kaddr = kmap_local_page(subpage);
7527 		if (!allow_pagefault)
7528 			pagefault_disable();
7529 		rc = copy_from_user(kaddr, usr_src + i * PAGE_SIZE, PAGE_SIZE);
7530 		if (!allow_pagefault)
7531 			pagefault_enable();
7532 		kunmap_local(kaddr);
7533 
7534 		ret_val -= (PAGE_SIZE - rc);
7535 		if (rc)
7536 			break;
7537 
7538 		flush_dcache_page(subpage);
7539 
7540 		cond_resched();
7541 	}
7542 	return ret_val;
7543 }
7544 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
7545 
7546 #if defined(CONFIG_SPLIT_PTE_PTLOCKS) && ALLOC_SPLIT_PTLOCKS
7547 
7548 static struct kmem_cache *page_ptl_cachep;
7549 
7550 void __init ptlock_cache_init(void)
7551 {
7552 	page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
7553 			SLAB_PANIC, NULL);
7554 }
7555 
7556 bool ptlock_alloc(struct ptdesc *ptdesc)
7557 {
7558 	spinlock_t *ptl;
7559 
7560 	ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
7561 	if (!ptl)
7562 		return false;
7563 	ptdesc->ptl = ptl;
7564 	return true;
7565 }
7566 
7567 void ptlock_free(struct ptdesc *ptdesc)
7568 {
7569 	if (ptdesc->ptl)
7570 		kmem_cache_free(page_ptl_cachep, ptdesc->ptl);
7571 }
7572 #endif
7573 
7574 void vma_pgtable_walk_begin(struct vm_area_struct *vma)
7575 {
7576 	if (is_vm_hugetlb_page(vma))
7577 		hugetlb_vma_lock_read(vma);
7578 }
7579 
7580 void vma_pgtable_walk_end(struct vm_area_struct *vma)
7581 {
7582 	if (is_vm_hugetlb_page(vma))
7583 		hugetlb_vma_unlock_read(vma);
7584 }
7585