xref: /linux/mm/memory.c (revision 409f45387c937145adeeeebc6d6032c2ec232b35)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/memory.c
4  *
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  */
7 
8 /*
9  * demand-loading started 01.12.91 - seems it is high on the list of
10  * things wanted, and it should be easy to implement. - Linus
11  */
12 
13 /*
14  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
15  * pages started 02.12.91, seems to work. - Linus.
16  *
17  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
18  * would have taken more than the 6M I have free, but it worked well as
19  * far as I could see.
20  *
21  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
22  */
23 
24 /*
25  * Real VM (paging to/from disk) started 18.12.91. Much more work and
26  * thought has to go into this. Oh, well..
27  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
28  *		Found it. Everything seems to work now.
29  * 20.12.91  -  Ok, making the swap-device changeable like the root.
30  */
31 
32 /*
33  * 05.04.94  -  Multi-page memory management added for v1.1.
34  *              Idea by Alex Bligh (alex@cconcepts.co.uk)
35  *
36  * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
37  *		(Gerhard.Wichert@pdb.siemens.de)
38  *
39  * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
40  */
41 
42 #include <linux/kernel_stat.h>
43 #include <linux/mm.h>
44 #include <linux/mm_inline.h>
45 #include <linux/sched/mm.h>
46 #include <linux/sched/numa_balancing.h>
47 #include <linux/sched/task.h>
48 #include <linux/hugetlb.h>
49 #include <linux/mman.h>
50 #include <linux/swap.h>
51 #include <linux/highmem.h>
52 #include <linux/pagemap.h>
53 #include <linux/memremap.h>
54 #include <linux/kmsan.h>
55 #include <linux/ksm.h>
56 #include <linux/rmap.h>
57 #include <linux/export.h>
58 #include <linux/delayacct.h>
59 #include <linux/init.h>
60 #include <linux/pfn_t.h>
61 #include <linux/writeback.h>
62 #include <linux/memcontrol.h>
63 #include <linux/mmu_notifier.h>
64 #include <linux/swapops.h>
65 #include <linux/elf.h>
66 #include <linux/gfp.h>
67 #include <linux/migrate.h>
68 #include <linux/string.h>
69 #include <linux/memory-tiers.h>
70 #include <linux/debugfs.h>
71 #include <linux/userfaultfd_k.h>
72 #include <linux/dax.h>
73 #include <linux/oom.h>
74 #include <linux/numa.h>
75 #include <linux/perf_event.h>
76 #include <linux/ptrace.h>
77 #include <linux/vmalloc.h>
78 #include <linux/sched/sysctl.h>
79 #include <linux/fsnotify.h>
80 
81 #include <trace/events/kmem.h>
82 
83 #include <asm/io.h>
84 #include <asm/mmu_context.h>
85 #include <asm/pgalloc.h>
86 #include <linux/uaccess.h>
87 #include <asm/tlb.h>
88 #include <asm/tlbflush.h>
89 
90 #include "pgalloc-track.h"
91 #include "internal.h"
92 #include "swap.h"
93 
94 #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
95 #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
96 #endif
97 
98 #ifndef CONFIG_NUMA
99 unsigned long max_mapnr;
100 EXPORT_SYMBOL(max_mapnr);
101 
102 struct page *mem_map;
103 EXPORT_SYMBOL(mem_map);
104 #endif
105 
106 static vm_fault_t do_fault(struct vm_fault *vmf);
107 static vm_fault_t do_anonymous_page(struct vm_fault *vmf);
108 static bool vmf_pte_changed(struct vm_fault *vmf);
109 
110 /*
111  * Return true if the original pte was a uffd-wp pte marker (so the pte was
112  * wr-protected).
113  */
114 static __always_inline bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf)
115 {
116 	if (!userfaultfd_wp(vmf->vma))
117 		return false;
118 	if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID))
119 		return false;
120 
121 	return pte_marker_uffd_wp(vmf->orig_pte);
122 }
123 
124 /*
125  * A number of key systems in x86 including ioremap() rely on the assumption
126  * that high_memory defines the upper bound on direct map memory, then end
127  * of ZONE_NORMAL.
128  */
129 void *high_memory;
130 EXPORT_SYMBOL(high_memory);
131 
132 /*
133  * Randomize the address space (stacks, mmaps, brk, etc.).
134  *
135  * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
136  *   as ancient (libc5 based) binaries can segfault. )
137  */
138 int randomize_va_space __read_mostly =
139 #ifdef CONFIG_COMPAT_BRK
140 					1;
141 #else
142 					2;
143 #endif
144 
145 #ifndef arch_wants_old_prefaulted_pte
146 static inline bool arch_wants_old_prefaulted_pte(void)
147 {
148 	/*
149 	 * Transitioning a PTE from 'old' to 'young' can be expensive on
150 	 * some architectures, even if it's performed in hardware. By
151 	 * default, "false" means prefaulted entries will be 'young'.
152 	 */
153 	return false;
154 }
155 #endif
156 
157 static int __init disable_randmaps(char *s)
158 {
159 	randomize_va_space = 0;
160 	return 1;
161 }
162 __setup("norandmaps", disable_randmaps);
163 
164 unsigned long zero_pfn __read_mostly;
165 EXPORT_SYMBOL(zero_pfn);
166 
167 unsigned long highest_memmap_pfn __read_mostly;
168 
169 /*
170  * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
171  */
172 static int __init init_zero_pfn(void)
173 {
174 	zero_pfn = page_to_pfn(ZERO_PAGE(0));
175 	return 0;
176 }
177 early_initcall(init_zero_pfn);
178 
179 void mm_trace_rss_stat(struct mm_struct *mm, int member)
180 {
181 	trace_rss_stat(mm, member);
182 }
183 
184 /*
185  * Note: this doesn't free the actual pages themselves. That
186  * has been handled earlier when unmapping all the memory regions.
187  */
188 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
189 			   unsigned long addr)
190 {
191 	pgtable_t token = pmd_pgtable(*pmd);
192 	pmd_clear(pmd);
193 	pte_free_tlb(tlb, token, addr);
194 	mm_dec_nr_ptes(tlb->mm);
195 }
196 
197 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
198 				unsigned long addr, unsigned long end,
199 				unsigned long floor, unsigned long ceiling)
200 {
201 	pmd_t *pmd;
202 	unsigned long next;
203 	unsigned long start;
204 
205 	start = addr;
206 	pmd = pmd_offset(pud, addr);
207 	do {
208 		next = pmd_addr_end(addr, end);
209 		if (pmd_none_or_clear_bad(pmd))
210 			continue;
211 		free_pte_range(tlb, pmd, addr);
212 	} while (pmd++, addr = next, addr != end);
213 
214 	start &= PUD_MASK;
215 	if (start < floor)
216 		return;
217 	if (ceiling) {
218 		ceiling &= PUD_MASK;
219 		if (!ceiling)
220 			return;
221 	}
222 	if (end - 1 > ceiling - 1)
223 		return;
224 
225 	pmd = pmd_offset(pud, start);
226 	pud_clear(pud);
227 	pmd_free_tlb(tlb, pmd, start);
228 	mm_dec_nr_pmds(tlb->mm);
229 }
230 
231 static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
232 				unsigned long addr, unsigned long end,
233 				unsigned long floor, unsigned long ceiling)
234 {
235 	pud_t *pud;
236 	unsigned long next;
237 	unsigned long start;
238 
239 	start = addr;
240 	pud = pud_offset(p4d, addr);
241 	do {
242 		next = pud_addr_end(addr, end);
243 		if (pud_none_or_clear_bad(pud))
244 			continue;
245 		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
246 	} while (pud++, addr = next, addr != end);
247 
248 	start &= P4D_MASK;
249 	if (start < floor)
250 		return;
251 	if (ceiling) {
252 		ceiling &= P4D_MASK;
253 		if (!ceiling)
254 			return;
255 	}
256 	if (end - 1 > ceiling - 1)
257 		return;
258 
259 	pud = pud_offset(p4d, start);
260 	p4d_clear(p4d);
261 	pud_free_tlb(tlb, pud, start);
262 	mm_dec_nr_puds(tlb->mm);
263 }
264 
265 static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
266 				unsigned long addr, unsigned long end,
267 				unsigned long floor, unsigned long ceiling)
268 {
269 	p4d_t *p4d;
270 	unsigned long next;
271 	unsigned long start;
272 
273 	start = addr;
274 	p4d = p4d_offset(pgd, addr);
275 	do {
276 		next = p4d_addr_end(addr, end);
277 		if (p4d_none_or_clear_bad(p4d))
278 			continue;
279 		free_pud_range(tlb, p4d, addr, next, floor, ceiling);
280 	} while (p4d++, addr = next, addr != end);
281 
282 	start &= PGDIR_MASK;
283 	if (start < floor)
284 		return;
285 	if (ceiling) {
286 		ceiling &= PGDIR_MASK;
287 		if (!ceiling)
288 			return;
289 	}
290 	if (end - 1 > ceiling - 1)
291 		return;
292 
293 	p4d = p4d_offset(pgd, start);
294 	pgd_clear(pgd);
295 	p4d_free_tlb(tlb, p4d, start);
296 }
297 
298 /*
299  * This function frees user-level page tables of a process.
300  */
301 void free_pgd_range(struct mmu_gather *tlb,
302 			unsigned long addr, unsigned long end,
303 			unsigned long floor, unsigned long ceiling)
304 {
305 	pgd_t *pgd;
306 	unsigned long next;
307 
308 	/*
309 	 * The next few lines have given us lots of grief...
310 	 *
311 	 * Why are we testing PMD* at this top level?  Because often
312 	 * there will be no work to do at all, and we'd prefer not to
313 	 * go all the way down to the bottom just to discover that.
314 	 *
315 	 * Why all these "- 1"s?  Because 0 represents both the bottom
316 	 * of the address space and the top of it (using -1 for the
317 	 * top wouldn't help much: the masks would do the wrong thing).
318 	 * The rule is that addr 0 and floor 0 refer to the bottom of
319 	 * the address space, but end 0 and ceiling 0 refer to the top
320 	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
321 	 * that end 0 case should be mythical).
322 	 *
323 	 * Wherever addr is brought up or ceiling brought down, we must
324 	 * be careful to reject "the opposite 0" before it confuses the
325 	 * subsequent tests.  But what about where end is brought down
326 	 * by PMD_SIZE below? no, end can't go down to 0 there.
327 	 *
328 	 * Whereas we round start (addr) and ceiling down, by different
329 	 * masks at different levels, in order to test whether a table
330 	 * now has no other vmas using it, so can be freed, we don't
331 	 * bother to round floor or end up - the tests don't need that.
332 	 */
333 
334 	addr &= PMD_MASK;
335 	if (addr < floor) {
336 		addr += PMD_SIZE;
337 		if (!addr)
338 			return;
339 	}
340 	if (ceiling) {
341 		ceiling &= PMD_MASK;
342 		if (!ceiling)
343 			return;
344 	}
345 	if (end - 1 > ceiling - 1)
346 		end -= PMD_SIZE;
347 	if (addr > end - 1)
348 		return;
349 	/*
350 	 * We add page table cache pages with PAGE_SIZE,
351 	 * (see pte_free_tlb()), flush the tlb if we need
352 	 */
353 	tlb_change_page_size(tlb, PAGE_SIZE);
354 	pgd = pgd_offset(tlb->mm, addr);
355 	do {
356 		next = pgd_addr_end(addr, end);
357 		if (pgd_none_or_clear_bad(pgd))
358 			continue;
359 		free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
360 	} while (pgd++, addr = next, addr != end);
361 }
362 
363 void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
364 		   struct vm_area_struct *vma, unsigned long floor,
365 		   unsigned long ceiling, bool mm_wr_locked)
366 {
367 	struct unlink_vma_file_batch vb;
368 
369 	do {
370 		unsigned long addr = vma->vm_start;
371 		struct vm_area_struct *next;
372 
373 		/*
374 		 * Note: USER_PGTABLES_CEILING may be passed as ceiling and may
375 		 * be 0.  This will underflow and is okay.
376 		 */
377 		next = mas_find(mas, ceiling - 1);
378 		if (unlikely(xa_is_zero(next)))
379 			next = NULL;
380 
381 		/*
382 		 * Hide vma from rmap and truncate_pagecache before freeing
383 		 * pgtables
384 		 */
385 		if (mm_wr_locked)
386 			vma_start_write(vma);
387 		unlink_anon_vmas(vma);
388 
389 		if (is_vm_hugetlb_page(vma)) {
390 			unlink_file_vma(vma);
391 			hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
392 				floor, next ? next->vm_start : ceiling);
393 		} else {
394 			unlink_file_vma_batch_init(&vb);
395 			unlink_file_vma_batch_add(&vb, vma);
396 
397 			/*
398 			 * Optimization: gather nearby vmas into one call down
399 			 */
400 			while (next && next->vm_start <= vma->vm_end + PMD_SIZE
401 			       && !is_vm_hugetlb_page(next)) {
402 				vma = next;
403 				next = mas_find(mas, ceiling - 1);
404 				if (unlikely(xa_is_zero(next)))
405 					next = NULL;
406 				if (mm_wr_locked)
407 					vma_start_write(vma);
408 				unlink_anon_vmas(vma);
409 				unlink_file_vma_batch_add(&vb, vma);
410 			}
411 			unlink_file_vma_batch_final(&vb);
412 			free_pgd_range(tlb, addr, vma->vm_end,
413 				floor, next ? next->vm_start : ceiling);
414 		}
415 		vma = next;
416 	} while (vma);
417 }
418 
419 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
420 {
421 	spinlock_t *ptl = pmd_lock(mm, pmd);
422 
423 	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
424 		mm_inc_nr_ptes(mm);
425 		/*
426 		 * Ensure all pte setup (eg. pte page lock and page clearing) are
427 		 * visible before the pte is made visible to other CPUs by being
428 		 * put into page tables.
429 		 *
430 		 * The other side of the story is the pointer chasing in the page
431 		 * table walking code (when walking the page table without locking;
432 		 * ie. most of the time). Fortunately, these data accesses consist
433 		 * of a chain of data-dependent loads, meaning most CPUs (alpha
434 		 * being the notable exception) will already guarantee loads are
435 		 * seen in-order. See the alpha page table accessors for the
436 		 * smp_rmb() barriers in page table walking code.
437 		 */
438 		smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
439 		pmd_populate(mm, pmd, *pte);
440 		*pte = NULL;
441 	}
442 	spin_unlock(ptl);
443 }
444 
445 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
446 {
447 	pgtable_t new = pte_alloc_one(mm);
448 	if (!new)
449 		return -ENOMEM;
450 
451 	pmd_install(mm, pmd, &new);
452 	if (new)
453 		pte_free(mm, new);
454 	return 0;
455 }
456 
457 int __pte_alloc_kernel(pmd_t *pmd)
458 {
459 	pte_t *new = pte_alloc_one_kernel(&init_mm);
460 	if (!new)
461 		return -ENOMEM;
462 
463 	spin_lock(&init_mm.page_table_lock);
464 	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
465 		smp_wmb(); /* See comment in pmd_install() */
466 		pmd_populate_kernel(&init_mm, pmd, new);
467 		new = NULL;
468 	}
469 	spin_unlock(&init_mm.page_table_lock);
470 	if (new)
471 		pte_free_kernel(&init_mm, new);
472 	return 0;
473 }
474 
475 static inline void init_rss_vec(int *rss)
476 {
477 	memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
478 }
479 
480 static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
481 {
482 	int i;
483 
484 	for (i = 0; i < NR_MM_COUNTERS; i++)
485 		if (rss[i])
486 			add_mm_counter(mm, i, rss[i]);
487 }
488 
489 /*
490  * This function is called to print an error when a bad pte
491  * is found. For example, we might have a PFN-mapped pte in
492  * a region that doesn't allow it.
493  *
494  * The calling function must still handle the error.
495  */
496 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
497 			  pte_t pte, struct page *page)
498 {
499 	pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
500 	p4d_t *p4d = p4d_offset(pgd, addr);
501 	pud_t *pud = pud_offset(p4d, addr);
502 	pmd_t *pmd = pmd_offset(pud, addr);
503 	struct address_space *mapping;
504 	pgoff_t index;
505 	static unsigned long resume;
506 	static unsigned long nr_shown;
507 	static unsigned long nr_unshown;
508 
509 	/*
510 	 * Allow a burst of 60 reports, then keep quiet for that minute;
511 	 * or allow a steady drip of one report per second.
512 	 */
513 	if (nr_shown == 60) {
514 		if (time_before(jiffies, resume)) {
515 			nr_unshown++;
516 			return;
517 		}
518 		if (nr_unshown) {
519 			pr_alert("BUG: Bad page map: %lu messages suppressed\n",
520 				 nr_unshown);
521 			nr_unshown = 0;
522 		}
523 		nr_shown = 0;
524 	}
525 	if (nr_shown++ == 0)
526 		resume = jiffies + 60 * HZ;
527 
528 	mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
529 	index = linear_page_index(vma, addr);
530 
531 	pr_alert("BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
532 		 current->comm,
533 		 (long long)pte_val(pte), (long long)pmd_val(*pmd));
534 	if (page)
535 		dump_page(page, "bad pte");
536 	pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
537 		 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
538 	pr_alert("file:%pD fault:%ps mmap:%ps read_folio:%ps\n",
539 		 vma->vm_file,
540 		 vma->vm_ops ? vma->vm_ops->fault : NULL,
541 		 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
542 		 mapping ? mapping->a_ops->read_folio : NULL);
543 	dump_stack();
544 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
545 }
546 
547 /*
548  * vm_normal_page -- This function gets the "struct page" associated with a pte.
549  *
550  * "Special" mappings do not wish to be associated with a "struct page" (either
551  * it doesn't exist, or it exists but they don't want to touch it). In this
552  * case, NULL is returned here. "Normal" mappings do have a struct page.
553  *
554  * There are 2 broad cases. Firstly, an architecture may define a pte_special()
555  * pte bit, in which case this function is trivial. Secondly, an architecture
556  * may not have a spare pte bit, which requires a more complicated scheme,
557  * described below.
558  *
559  * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
560  * special mapping (even if there are underlying and valid "struct pages").
561  * COWed pages of a VM_PFNMAP are always normal.
562  *
563  * The way we recognize COWed pages within VM_PFNMAP mappings is through the
564  * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
565  * set, and the vm_pgoff will point to the first PFN mapped: thus every special
566  * mapping will always honor the rule
567  *
568  *	pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
569  *
570  * And for normal mappings this is false.
571  *
572  * This restricts such mappings to be a linear translation from virtual address
573  * to pfn. To get around this restriction, we allow arbitrary mappings so long
574  * as the vma is not a COW mapping; in that case, we know that all ptes are
575  * special (because none can have been COWed).
576  *
577  *
578  * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
579  *
580  * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
581  * page" backing, however the difference is that _all_ pages with a struct
582  * page (that is, those where pfn_valid is true) are refcounted and considered
583  * normal pages by the VM. The only exception are zeropages, which are
584  * *never* refcounted.
585  *
586  * The disadvantage is that pages are refcounted (which can be slower and
587  * simply not an option for some PFNMAP users). The advantage is that we
588  * don't have to follow the strict linearity rule of PFNMAP mappings in
589  * order to support COWable mappings.
590  *
591  */
592 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
593 			    pte_t pte)
594 {
595 	unsigned long pfn = pte_pfn(pte);
596 
597 	if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
598 		if (likely(!pte_special(pte)))
599 			goto check_pfn;
600 		if (vma->vm_ops && vma->vm_ops->find_special_page)
601 			return vma->vm_ops->find_special_page(vma, addr);
602 		if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
603 			return NULL;
604 		if (is_zero_pfn(pfn))
605 			return NULL;
606 		if (pte_devmap(pte))
607 		/*
608 		 * NOTE: New users of ZONE_DEVICE will not set pte_devmap()
609 		 * and will have refcounts incremented on their struct pages
610 		 * when they are inserted into PTEs, thus they are safe to
611 		 * return here. Legacy ZONE_DEVICE pages that set pte_devmap()
612 		 * do not have refcounts. Example of legacy ZONE_DEVICE is
613 		 * MEMORY_DEVICE_FS_DAX type in pmem or virtio_fs drivers.
614 		 */
615 			return NULL;
616 
617 		print_bad_pte(vma, addr, pte, NULL);
618 		return NULL;
619 	}
620 
621 	/* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
622 
623 	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
624 		if (vma->vm_flags & VM_MIXEDMAP) {
625 			if (!pfn_valid(pfn))
626 				return NULL;
627 			if (is_zero_pfn(pfn))
628 				return NULL;
629 			goto out;
630 		} else {
631 			unsigned long off;
632 			off = (addr - vma->vm_start) >> PAGE_SHIFT;
633 			if (pfn == vma->vm_pgoff + off)
634 				return NULL;
635 			if (!is_cow_mapping(vma->vm_flags))
636 				return NULL;
637 		}
638 	}
639 
640 	if (is_zero_pfn(pfn))
641 		return NULL;
642 
643 check_pfn:
644 	if (unlikely(pfn > highest_memmap_pfn)) {
645 		print_bad_pte(vma, addr, pte, NULL);
646 		return NULL;
647 	}
648 
649 	/*
650 	 * NOTE! We still have PageReserved() pages in the page tables.
651 	 * eg. VDSO mappings can cause them to exist.
652 	 */
653 out:
654 	VM_WARN_ON_ONCE(is_zero_pfn(pfn));
655 	return pfn_to_page(pfn);
656 }
657 
658 struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
659 			    pte_t pte)
660 {
661 	struct page *page = vm_normal_page(vma, addr, pte);
662 
663 	if (page)
664 		return page_folio(page);
665 	return NULL;
666 }
667 
668 #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
669 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
670 				pmd_t pmd)
671 {
672 	unsigned long pfn = pmd_pfn(pmd);
673 
674 	/* Currently it's only used for huge pfnmaps */
675 	if (unlikely(pmd_special(pmd)))
676 		return NULL;
677 
678 	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
679 		if (vma->vm_flags & VM_MIXEDMAP) {
680 			if (!pfn_valid(pfn))
681 				return NULL;
682 			goto out;
683 		} else {
684 			unsigned long off;
685 			off = (addr - vma->vm_start) >> PAGE_SHIFT;
686 			if (pfn == vma->vm_pgoff + off)
687 				return NULL;
688 			if (!is_cow_mapping(vma->vm_flags))
689 				return NULL;
690 		}
691 	}
692 
693 	if (pmd_devmap(pmd))
694 		return NULL;
695 	if (is_huge_zero_pmd(pmd))
696 		return NULL;
697 	if (unlikely(pfn > highest_memmap_pfn))
698 		return NULL;
699 
700 	/*
701 	 * NOTE! We still have PageReserved() pages in the page tables.
702 	 * eg. VDSO mappings can cause them to exist.
703 	 */
704 out:
705 	return pfn_to_page(pfn);
706 }
707 
708 struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
709 				  unsigned long addr, pmd_t pmd)
710 {
711 	struct page *page = vm_normal_page_pmd(vma, addr, pmd);
712 
713 	if (page)
714 		return page_folio(page);
715 	return NULL;
716 }
717 #endif
718 
719 static void restore_exclusive_pte(struct vm_area_struct *vma,
720 				  struct page *page, unsigned long address,
721 				  pte_t *ptep)
722 {
723 	struct folio *folio = page_folio(page);
724 	pte_t orig_pte;
725 	pte_t pte;
726 	swp_entry_t entry;
727 
728 	orig_pte = ptep_get(ptep);
729 	pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
730 	if (pte_swp_soft_dirty(orig_pte))
731 		pte = pte_mksoft_dirty(pte);
732 
733 	entry = pte_to_swp_entry(orig_pte);
734 	if (pte_swp_uffd_wp(orig_pte))
735 		pte = pte_mkuffd_wp(pte);
736 	else if (is_writable_device_exclusive_entry(entry))
737 		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
738 
739 	VM_BUG_ON_FOLIO(pte_write(pte) && (!folio_test_anon(folio) &&
740 					   PageAnonExclusive(page)), folio);
741 
742 	/*
743 	 * No need to take a page reference as one was already
744 	 * created when the swap entry was made.
745 	 */
746 	if (folio_test_anon(folio))
747 		folio_add_anon_rmap_pte(folio, page, vma, address, RMAP_NONE);
748 	else
749 		/*
750 		 * Currently device exclusive access only supports anonymous
751 		 * memory so the entry shouldn't point to a filebacked page.
752 		 */
753 		WARN_ON_ONCE(1);
754 
755 	set_pte_at(vma->vm_mm, address, ptep, pte);
756 
757 	/*
758 	 * No need to invalidate - it was non-present before. However
759 	 * secondary CPUs may have mappings that need invalidating.
760 	 */
761 	update_mmu_cache(vma, address, ptep);
762 }
763 
764 /*
765  * Tries to restore an exclusive pte if the page lock can be acquired without
766  * sleeping.
767  */
768 static int
769 try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma,
770 			unsigned long addr)
771 {
772 	swp_entry_t entry = pte_to_swp_entry(ptep_get(src_pte));
773 	struct page *page = pfn_swap_entry_to_page(entry);
774 
775 	if (trylock_page(page)) {
776 		restore_exclusive_pte(vma, page, addr, src_pte);
777 		unlock_page(page);
778 		return 0;
779 	}
780 
781 	return -EBUSY;
782 }
783 
784 /*
785  * copy one vm_area from one task to the other. Assumes the page tables
786  * already present in the new task to be cleared in the whole range
787  * covered by this vma.
788  */
789 
790 static unsigned long
791 copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
792 		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma,
793 		struct vm_area_struct *src_vma, unsigned long addr, int *rss)
794 {
795 	unsigned long vm_flags = dst_vma->vm_flags;
796 	pte_t orig_pte = ptep_get(src_pte);
797 	pte_t pte = orig_pte;
798 	struct folio *folio;
799 	struct page *page;
800 	swp_entry_t entry = pte_to_swp_entry(orig_pte);
801 
802 	if (likely(!non_swap_entry(entry))) {
803 		if (swap_duplicate(entry) < 0)
804 			return -EIO;
805 
806 		/* make sure dst_mm is on swapoff's mmlist. */
807 		if (unlikely(list_empty(&dst_mm->mmlist))) {
808 			spin_lock(&mmlist_lock);
809 			if (list_empty(&dst_mm->mmlist))
810 				list_add(&dst_mm->mmlist,
811 						&src_mm->mmlist);
812 			spin_unlock(&mmlist_lock);
813 		}
814 		/* Mark the swap entry as shared. */
815 		if (pte_swp_exclusive(orig_pte)) {
816 			pte = pte_swp_clear_exclusive(orig_pte);
817 			set_pte_at(src_mm, addr, src_pte, pte);
818 		}
819 		rss[MM_SWAPENTS]++;
820 	} else if (is_migration_entry(entry)) {
821 		folio = pfn_swap_entry_folio(entry);
822 
823 		rss[mm_counter(folio)]++;
824 
825 		if (!is_readable_migration_entry(entry) &&
826 				is_cow_mapping(vm_flags)) {
827 			/*
828 			 * COW mappings require pages in both parent and child
829 			 * to be set to read. A previously exclusive entry is
830 			 * now shared.
831 			 */
832 			entry = make_readable_migration_entry(
833 							swp_offset(entry));
834 			pte = swp_entry_to_pte(entry);
835 			if (pte_swp_soft_dirty(orig_pte))
836 				pte = pte_swp_mksoft_dirty(pte);
837 			if (pte_swp_uffd_wp(orig_pte))
838 				pte = pte_swp_mkuffd_wp(pte);
839 			set_pte_at(src_mm, addr, src_pte, pte);
840 		}
841 	} else if (is_device_private_entry(entry)) {
842 		page = pfn_swap_entry_to_page(entry);
843 		folio = page_folio(page);
844 
845 		/*
846 		 * Update rss count even for unaddressable pages, as
847 		 * they should treated just like normal pages in this
848 		 * respect.
849 		 *
850 		 * We will likely want to have some new rss counters
851 		 * for unaddressable pages, at some point. But for now
852 		 * keep things as they are.
853 		 */
854 		folio_get(folio);
855 		rss[mm_counter(folio)]++;
856 		/* Cannot fail as these pages cannot get pinned. */
857 		folio_try_dup_anon_rmap_pte(folio, page, src_vma);
858 
859 		/*
860 		 * We do not preserve soft-dirty information, because so
861 		 * far, checkpoint/restore is the only feature that
862 		 * requires that. And checkpoint/restore does not work
863 		 * when a device driver is involved (you cannot easily
864 		 * save and restore device driver state).
865 		 */
866 		if (is_writable_device_private_entry(entry) &&
867 		    is_cow_mapping(vm_flags)) {
868 			entry = make_readable_device_private_entry(
869 							swp_offset(entry));
870 			pte = swp_entry_to_pte(entry);
871 			if (pte_swp_uffd_wp(orig_pte))
872 				pte = pte_swp_mkuffd_wp(pte);
873 			set_pte_at(src_mm, addr, src_pte, pte);
874 		}
875 	} else if (is_device_exclusive_entry(entry)) {
876 		/*
877 		 * Make device exclusive entries present by restoring the
878 		 * original entry then copying as for a present pte. Device
879 		 * exclusive entries currently only support private writable
880 		 * (ie. COW) mappings.
881 		 */
882 		VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags));
883 		if (try_restore_exclusive_pte(src_pte, src_vma, addr))
884 			return -EBUSY;
885 		return -ENOENT;
886 	} else if (is_pte_marker_entry(entry)) {
887 		pte_marker marker = copy_pte_marker(entry, dst_vma);
888 
889 		if (marker)
890 			set_pte_at(dst_mm, addr, dst_pte,
891 				   make_pte_marker(marker));
892 		return 0;
893 	}
894 	if (!userfaultfd_wp(dst_vma))
895 		pte = pte_swp_clear_uffd_wp(pte);
896 	set_pte_at(dst_mm, addr, dst_pte, pte);
897 	return 0;
898 }
899 
900 /*
901  * Copy a present and normal page.
902  *
903  * NOTE! The usual case is that this isn't required;
904  * instead, the caller can just increase the page refcount
905  * and re-use the pte the traditional way.
906  *
907  * And if we need a pre-allocated page but don't yet have
908  * one, return a negative error to let the preallocation
909  * code know so that it can do so outside the page table
910  * lock.
911  */
912 static inline int
913 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
914 		  pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
915 		  struct folio **prealloc, struct page *page)
916 {
917 	struct folio *new_folio;
918 	pte_t pte;
919 
920 	new_folio = *prealloc;
921 	if (!new_folio)
922 		return -EAGAIN;
923 
924 	/*
925 	 * We have a prealloc page, all good!  Take it
926 	 * over and copy the page & arm it.
927 	 */
928 
929 	if (copy_mc_user_highpage(&new_folio->page, page, addr, src_vma))
930 		return -EHWPOISON;
931 
932 	*prealloc = NULL;
933 	__folio_mark_uptodate(new_folio);
934 	folio_add_new_anon_rmap(new_folio, dst_vma, addr, RMAP_EXCLUSIVE);
935 	folio_add_lru_vma(new_folio, dst_vma);
936 	rss[MM_ANONPAGES]++;
937 
938 	/* All done, just insert the new page copy in the child */
939 	pte = mk_pte(&new_folio->page, dst_vma->vm_page_prot);
940 	pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma);
941 	if (userfaultfd_pte_wp(dst_vma, ptep_get(src_pte)))
942 		/* Uffd-wp needs to be delivered to dest pte as well */
943 		pte = pte_mkuffd_wp(pte);
944 	set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
945 	return 0;
946 }
947 
948 static __always_inline void __copy_present_ptes(struct vm_area_struct *dst_vma,
949 		struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte,
950 		pte_t pte, unsigned long addr, int nr)
951 {
952 	struct mm_struct *src_mm = src_vma->vm_mm;
953 
954 	/* If it's a COW mapping, write protect it both processes. */
955 	if (is_cow_mapping(src_vma->vm_flags) && pte_write(pte)) {
956 		wrprotect_ptes(src_mm, addr, src_pte, nr);
957 		pte = pte_wrprotect(pte);
958 	}
959 
960 	/* If it's a shared mapping, mark it clean in the child. */
961 	if (src_vma->vm_flags & VM_SHARED)
962 		pte = pte_mkclean(pte);
963 	pte = pte_mkold(pte);
964 
965 	if (!userfaultfd_wp(dst_vma))
966 		pte = pte_clear_uffd_wp(pte);
967 
968 	set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr);
969 }
970 
971 /*
972  * Copy one present PTE, trying to batch-process subsequent PTEs that map
973  * consecutive pages of the same folio by copying them as well.
974  *
975  * Returns -EAGAIN if one preallocated page is required to copy the next PTE.
976  * Otherwise, returns the number of copied PTEs (at least 1).
977  */
978 static inline int
979 copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
980 		 pte_t *dst_pte, pte_t *src_pte, pte_t pte, unsigned long addr,
981 		 int max_nr, int *rss, struct folio **prealloc)
982 {
983 	struct page *page;
984 	struct folio *folio;
985 	bool any_writable;
986 	fpb_t flags = 0;
987 	int err, nr;
988 
989 	page = vm_normal_page(src_vma, addr, pte);
990 	if (unlikely(!page))
991 		goto copy_pte;
992 
993 	folio = page_folio(page);
994 
995 	/*
996 	 * If we likely have to copy, just don't bother with batching. Make
997 	 * sure that the common "small folio" case is as fast as possible
998 	 * by keeping the batching logic separate.
999 	 */
1000 	if (unlikely(!*prealloc && folio_test_large(folio) && max_nr != 1)) {
1001 		if (src_vma->vm_flags & VM_SHARED)
1002 			flags |= FPB_IGNORE_DIRTY;
1003 		if (!vma_soft_dirty_enabled(src_vma))
1004 			flags |= FPB_IGNORE_SOFT_DIRTY;
1005 
1006 		nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr, flags,
1007 				     &any_writable, NULL, NULL);
1008 		folio_ref_add(folio, nr);
1009 		if (folio_test_anon(folio)) {
1010 			if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page,
1011 								  nr, src_vma))) {
1012 				folio_ref_sub(folio, nr);
1013 				return -EAGAIN;
1014 			}
1015 			rss[MM_ANONPAGES] += nr;
1016 			VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio);
1017 		} else {
1018 			folio_dup_file_rmap_ptes(folio, page, nr);
1019 			rss[mm_counter_file(folio)] += nr;
1020 		}
1021 		if (any_writable)
1022 			pte = pte_mkwrite(pte, src_vma);
1023 		__copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte,
1024 				    addr, nr);
1025 		return nr;
1026 	}
1027 
1028 	folio_get(folio);
1029 	if (folio_test_anon(folio)) {
1030 		/*
1031 		 * If this page may have been pinned by the parent process,
1032 		 * copy the page immediately for the child so that we'll always
1033 		 * guarantee the pinned page won't be randomly replaced in the
1034 		 * future.
1035 		 */
1036 		if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, src_vma))) {
1037 			/* Page may be pinned, we have to copy. */
1038 			folio_put(folio);
1039 			err = copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
1040 						addr, rss, prealloc, page);
1041 			return err ? err : 1;
1042 		}
1043 		rss[MM_ANONPAGES]++;
1044 		VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio);
1045 	} else {
1046 		folio_dup_file_rmap_pte(folio, page);
1047 		rss[mm_counter_file(folio)]++;
1048 	}
1049 
1050 copy_pte:
1051 	__copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte, addr, 1);
1052 	return 1;
1053 }
1054 
1055 static inline struct folio *folio_prealloc(struct mm_struct *src_mm,
1056 		struct vm_area_struct *vma, unsigned long addr, bool need_zero)
1057 {
1058 	struct folio *new_folio;
1059 
1060 	if (need_zero)
1061 		new_folio = vma_alloc_zeroed_movable_folio(vma, addr);
1062 	else
1063 		new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr);
1064 
1065 	if (!new_folio)
1066 		return NULL;
1067 
1068 	if (mem_cgroup_charge(new_folio, src_mm, GFP_KERNEL)) {
1069 		folio_put(new_folio);
1070 		return NULL;
1071 	}
1072 	folio_throttle_swaprate(new_folio, GFP_KERNEL);
1073 
1074 	return new_folio;
1075 }
1076 
1077 static int
1078 copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1079 	       pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1080 	       unsigned long end)
1081 {
1082 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1083 	struct mm_struct *src_mm = src_vma->vm_mm;
1084 	pte_t *orig_src_pte, *orig_dst_pte;
1085 	pte_t *src_pte, *dst_pte;
1086 	pmd_t dummy_pmdval;
1087 	pte_t ptent;
1088 	spinlock_t *src_ptl, *dst_ptl;
1089 	int progress, max_nr, ret = 0;
1090 	int rss[NR_MM_COUNTERS];
1091 	swp_entry_t entry = (swp_entry_t){0};
1092 	struct folio *prealloc = NULL;
1093 	int nr;
1094 
1095 again:
1096 	progress = 0;
1097 	init_rss_vec(rss);
1098 
1099 	/*
1100 	 * copy_pmd_range()'s prior pmd_none_or_clear_bad(src_pmd), and the
1101 	 * error handling here, assume that exclusive mmap_lock on dst and src
1102 	 * protects anon from unexpected THP transitions; with shmem and file
1103 	 * protected by mmap_lock-less collapse skipping areas with anon_vma
1104 	 * (whereas vma_needs_copy() skips areas without anon_vma).  A rework
1105 	 * can remove such assumptions later, but this is good enough for now.
1106 	 */
1107 	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
1108 	if (!dst_pte) {
1109 		ret = -ENOMEM;
1110 		goto out;
1111 	}
1112 
1113 	/*
1114 	 * We already hold the exclusive mmap_lock, the copy_pte_range() and
1115 	 * retract_page_tables() are using vma->anon_vma to be exclusive, so
1116 	 * the PTE page is stable, and there is no need to get pmdval and do
1117 	 * pmd_same() check.
1118 	 */
1119 	src_pte = pte_offset_map_rw_nolock(src_mm, src_pmd, addr, &dummy_pmdval,
1120 					   &src_ptl);
1121 	if (!src_pte) {
1122 		pte_unmap_unlock(dst_pte, dst_ptl);
1123 		/* ret == 0 */
1124 		goto out;
1125 	}
1126 	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1127 	orig_src_pte = src_pte;
1128 	orig_dst_pte = dst_pte;
1129 	arch_enter_lazy_mmu_mode();
1130 
1131 	do {
1132 		nr = 1;
1133 
1134 		/*
1135 		 * We are holding two locks at this point - either of them
1136 		 * could generate latencies in another task on another CPU.
1137 		 */
1138 		if (progress >= 32) {
1139 			progress = 0;
1140 			if (need_resched() ||
1141 			    spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
1142 				break;
1143 		}
1144 		ptent = ptep_get(src_pte);
1145 		if (pte_none(ptent)) {
1146 			progress++;
1147 			continue;
1148 		}
1149 		if (unlikely(!pte_present(ptent))) {
1150 			ret = copy_nonpresent_pte(dst_mm, src_mm,
1151 						  dst_pte, src_pte,
1152 						  dst_vma, src_vma,
1153 						  addr, rss);
1154 			if (ret == -EIO) {
1155 				entry = pte_to_swp_entry(ptep_get(src_pte));
1156 				break;
1157 			} else if (ret == -EBUSY) {
1158 				break;
1159 			} else if (!ret) {
1160 				progress += 8;
1161 				continue;
1162 			}
1163 			ptent = ptep_get(src_pte);
1164 			VM_WARN_ON_ONCE(!pte_present(ptent));
1165 
1166 			/*
1167 			 * Device exclusive entry restored, continue by copying
1168 			 * the now present pte.
1169 			 */
1170 			WARN_ON_ONCE(ret != -ENOENT);
1171 		}
1172 		/* copy_present_ptes() will clear `*prealloc' if consumed */
1173 		max_nr = (end - addr) / PAGE_SIZE;
1174 		ret = copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte,
1175 					ptent, addr, max_nr, rss, &prealloc);
1176 		/*
1177 		 * If we need a pre-allocated page for this pte, drop the
1178 		 * locks, allocate, and try again.
1179 		 * If copy failed due to hwpoison in source page, break out.
1180 		 */
1181 		if (unlikely(ret == -EAGAIN || ret == -EHWPOISON))
1182 			break;
1183 		if (unlikely(prealloc)) {
1184 			/*
1185 			 * pre-alloc page cannot be reused by next time so as
1186 			 * to strictly follow mempolicy (e.g., alloc_page_vma()
1187 			 * will allocate page according to address).  This
1188 			 * could only happen if one pinned pte changed.
1189 			 */
1190 			folio_put(prealloc);
1191 			prealloc = NULL;
1192 		}
1193 		nr = ret;
1194 		progress += 8 * nr;
1195 	} while (dst_pte += nr, src_pte += nr, addr += PAGE_SIZE * nr,
1196 		 addr != end);
1197 
1198 	arch_leave_lazy_mmu_mode();
1199 	pte_unmap_unlock(orig_src_pte, src_ptl);
1200 	add_mm_rss_vec(dst_mm, rss);
1201 	pte_unmap_unlock(orig_dst_pte, dst_ptl);
1202 	cond_resched();
1203 
1204 	if (ret == -EIO) {
1205 		VM_WARN_ON_ONCE(!entry.val);
1206 		if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) {
1207 			ret = -ENOMEM;
1208 			goto out;
1209 		}
1210 		entry.val = 0;
1211 	} else if (ret == -EBUSY || unlikely(ret == -EHWPOISON)) {
1212 		goto out;
1213 	} else if (ret ==  -EAGAIN) {
1214 		prealloc = folio_prealloc(src_mm, src_vma, addr, false);
1215 		if (!prealloc)
1216 			return -ENOMEM;
1217 	} else if (ret < 0) {
1218 		VM_WARN_ON_ONCE(1);
1219 	}
1220 
1221 	/* We've captured and resolved the error. Reset, try again. */
1222 	ret = 0;
1223 
1224 	if (addr != end)
1225 		goto again;
1226 out:
1227 	if (unlikely(prealloc))
1228 		folio_put(prealloc);
1229 	return ret;
1230 }
1231 
1232 static inline int
1233 copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1234 	       pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1235 	       unsigned long end)
1236 {
1237 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1238 	struct mm_struct *src_mm = src_vma->vm_mm;
1239 	pmd_t *src_pmd, *dst_pmd;
1240 	unsigned long next;
1241 
1242 	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
1243 	if (!dst_pmd)
1244 		return -ENOMEM;
1245 	src_pmd = pmd_offset(src_pud, addr);
1246 	do {
1247 		next = pmd_addr_end(addr, end);
1248 		if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
1249 			|| pmd_devmap(*src_pmd)) {
1250 			int err;
1251 			VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
1252 			err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd,
1253 					    addr, dst_vma, src_vma);
1254 			if (err == -ENOMEM)
1255 				return -ENOMEM;
1256 			if (!err)
1257 				continue;
1258 			/* fall through */
1259 		}
1260 		if (pmd_none_or_clear_bad(src_pmd))
1261 			continue;
1262 		if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd,
1263 				   addr, next))
1264 			return -ENOMEM;
1265 	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
1266 	return 0;
1267 }
1268 
1269 static inline int
1270 copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1271 	       p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr,
1272 	       unsigned long end)
1273 {
1274 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1275 	struct mm_struct *src_mm = src_vma->vm_mm;
1276 	pud_t *src_pud, *dst_pud;
1277 	unsigned long next;
1278 
1279 	dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
1280 	if (!dst_pud)
1281 		return -ENOMEM;
1282 	src_pud = pud_offset(src_p4d, addr);
1283 	do {
1284 		next = pud_addr_end(addr, end);
1285 		if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
1286 			int err;
1287 
1288 			VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma);
1289 			err = copy_huge_pud(dst_mm, src_mm,
1290 					    dst_pud, src_pud, addr, src_vma);
1291 			if (err == -ENOMEM)
1292 				return -ENOMEM;
1293 			if (!err)
1294 				continue;
1295 			/* fall through */
1296 		}
1297 		if (pud_none_or_clear_bad(src_pud))
1298 			continue;
1299 		if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud,
1300 				   addr, next))
1301 			return -ENOMEM;
1302 	} while (dst_pud++, src_pud++, addr = next, addr != end);
1303 	return 0;
1304 }
1305 
1306 static inline int
1307 copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1308 	       pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr,
1309 	       unsigned long end)
1310 {
1311 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1312 	p4d_t *src_p4d, *dst_p4d;
1313 	unsigned long next;
1314 
1315 	dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
1316 	if (!dst_p4d)
1317 		return -ENOMEM;
1318 	src_p4d = p4d_offset(src_pgd, addr);
1319 	do {
1320 		next = p4d_addr_end(addr, end);
1321 		if (p4d_none_or_clear_bad(src_p4d))
1322 			continue;
1323 		if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d,
1324 				   addr, next))
1325 			return -ENOMEM;
1326 	} while (dst_p4d++, src_p4d++, addr = next, addr != end);
1327 	return 0;
1328 }
1329 
1330 /*
1331  * Return true if the vma needs to copy the pgtable during this fork().  Return
1332  * false when we can speed up fork() by allowing lazy page faults later until
1333  * when the child accesses the memory range.
1334  */
1335 static bool
1336 vma_needs_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1337 {
1338 	/*
1339 	 * Always copy pgtables when dst_vma has uffd-wp enabled even if it's
1340 	 * file-backed (e.g. shmem). Because when uffd-wp is enabled, pgtable
1341 	 * contains uffd-wp protection information, that's something we can't
1342 	 * retrieve from page cache, and skip copying will lose those info.
1343 	 */
1344 	if (userfaultfd_wp(dst_vma))
1345 		return true;
1346 
1347 	if (src_vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
1348 		return true;
1349 
1350 	if (src_vma->anon_vma)
1351 		return true;
1352 
1353 	/*
1354 	 * Don't copy ptes where a page fault will fill them correctly.  Fork
1355 	 * becomes much lighter when there are big shared or private readonly
1356 	 * mappings. The tradeoff is that copy_page_range is more efficient
1357 	 * than faulting.
1358 	 */
1359 	return false;
1360 }
1361 
1362 int
1363 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1364 {
1365 	pgd_t *src_pgd, *dst_pgd;
1366 	unsigned long next;
1367 	unsigned long addr = src_vma->vm_start;
1368 	unsigned long end = src_vma->vm_end;
1369 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1370 	struct mm_struct *src_mm = src_vma->vm_mm;
1371 	struct mmu_notifier_range range;
1372 	bool is_cow;
1373 	int ret;
1374 
1375 	if (!vma_needs_copy(dst_vma, src_vma))
1376 		return 0;
1377 
1378 	if (is_vm_hugetlb_page(src_vma))
1379 		return copy_hugetlb_page_range(dst_mm, src_mm, dst_vma, src_vma);
1380 
1381 	if (unlikely(src_vma->vm_flags & VM_PFNMAP)) {
1382 		/*
1383 		 * We do not free on error cases below as remove_vma
1384 		 * gets called on error from higher level routine
1385 		 */
1386 		ret = track_pfn_copy(src_vma);
1387 		if (ret)
1388 			return ret;
1389 	}
1390 
1391 	/*
1392 	 * We need to invalidate the secondary MMU mappings only when
1393 	 * there could be a permission downgrade on the ptes of the
1394 	 * parent mm. And a permission downgrade will only happen if
1395 	 * is_cow_mapping() returns true.
1396 	 */
1397 	is_cow = is_cow_mapping(src_vma->vm_flags);
1398 
1399 	if (is_cow) {
1400 		mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
1401 					0, src_mm, addr, end);
1402 		mmu_notifier_invalidate_range_start(&range);
1403 		/*
1404 		 * Disabling preemption is not needed for the write side, as
1405 		 * the read side doesn't spin, but goes to the mmap_lock.
1406 		 *
1407 		 * Use the raw variant of the seqcount_t write API to avoid
1408 		 * lockdep complaining about preemptibility.
1409 		 */
1410 		vma_assert_write_locked(src_vma);
1411 		raw_write_seqcount_begin(&src_mm->write_protect_seq);
1412 	}
1413 
1414 	ret = 0;
1415 	dst_pgd = pgd_offset(dst_mm, addr);
1416 	src_pgd = pgd_offset(src_mm, addr);
1417 	do {
1418 		next = pgd_addr_end(addr, end);
1419 		if (pgd_none_or_clear_bad(src_pgd))
1420 			continue;
1421 		if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,
1422 					    addr, next))) {
1423 			untrack_pfn_clear(dst_vma);
1424 			ret = -ENOMEM;
1425 			break;
1426 		}
1427 	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
1428 
1429 	if (is_cow) {
1430 		raw_write_seqcount_end(&src_mm->write_protect_seq);
1431 		mmu_notifier_invalidate_range_end(&range);
1432 	}
1433 	return ret;
1434 }
1435 
1436 /* Whether we should zap all COWed (private) pages too */
1437 static inline bool should_zap_cows(struct zap_details *details)
1438 {
1439 	/* By default, zap all pages */
1440 	if (!details || details->reclaim_pt)
1441 		return true;
1442 
1443 	/* Or, we zap COWed pages only if the caller wants to */
1444 	return details->even_cows;
1445 }
1446 
1447 /* Decides whether we should zap this folio with the folio pointer specified */
1448 static inline bool should_zap_folio(struct zap_details *details,
1449 				    struct folio *folio)
1450 {
1451 	/* If we can make a decision without *folio.. */
1452 	if (should_zap_cows(details))
1453 		return true;
1454 
1455 	/* Otherwise we should only zap non-anon folios */
1456 	return !folio_test_anon(folio);
1457 }
1458 
1459 static inline bool zap_drop_markers(struct zap_details *details)
1460 {
1461 	if (!details)
1462 		return false;
1463 
1464 	return details->zap_flags & ZAP_FLAG_DROP_MARKER;
1465 }
1466 
1467 /*
1468  * This function makes sure that we'll replace the none pte with an uffd-wp
1469  * swap special pte marker when necessary. Must be with the pgtable lock held.
1470  *
1471  * Returns true if uffd-wp ptes was installed, false otherwise.
1472  */
1473 static inline bool
1474 zap_install_uffd_wp_if_needed(struct vm_area_struct *vma,
1475 			      unsigned long addr, pte_t *pte, int nr,
1476 			      struct zap_details *details, pte_t pteval)
1477 {
1478 	bool was_installed = false;
1479 
1480 #ifdef CONFIG_PTE_MARKER_UFFD_WP
1481 	/* Zap on anonymous always means dropping everything */
1482 	if (vma_is_anonymous(vma))
1483 		return false;
1484 
1485 	if (zap_drop_markers(details))
1486 		return false;
1487 
1488 	for (;;) {
1489 		/* the PFN in the PTE is irrelevant. */
1490 		if (pte_install_uffd_wp_if_needed(vma, addr, pte, pteval))
1491 			was_installed = true;
1492 		if (--nr == 0)
1493 			break;
1494 		pte++;
1495 		addr += PAGE_SIZE;
1496 	}
1497 #endif
1498 	return was_installed;
1499 }
1500 
1501 static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb,
1502 		struct vm_area_struct *vma, struct folio *folio,
1503 		struct page *page, pte_t *pte, pte_t ptent, unsigned int nr,
1504 		unsigned long addr, struct zap_details *details, int *rss,
1505 		bool *force_flush, bool *force_break, bool *any_skipped)
1506 {
1507 	struct mm_struct *mm = tlb->mm;
1508 	bool delay_rmap = false;
1509 
1510 	if (!folio_test_anon(folio)) {
1511 		ptent = get_and_clear_full_ptes(mm, addr, pte, nr, tlb->fullmm);
1512 		if (pte_dirty(ptent)) {
1513 			folio_mark_dirty(folio);
1514 			if (tlb_delay_rmap(tlb)) {
1515 				delay_rmap = true;
1516 				*force_flush = true;
1517 			}
1518 		}
1519 		if (pte_young(ptent) && likely(vma_has_recency(vma)))
1520 			folio_mark_accessed(folio);
1521 		rss[mm_counter(folio)] -= nr;
1522 	} else {
1523 		/* We don't need up-to-date accessed/dirty bits. */
1524 		clear_full_ptes(mm, addr, pte, nr, tlb->fullmm);
1525 		rss[MM_ANONPAGES] -= nr;
1526 	}
1527 	/* Checking a single PTE in a batch is sufficient. */
1528 	arch_check_zapped_pte(vma, ptent);
1529 	tlb_remove_tlb_entries(tlb, pte, nr, addr);
1530 	if (unlikely(userfaultfd_pte_wp(vma, ptent)))
1531 		*any_skipped = zap_install_uffd_wp_if_needed(vma, addr, pte,
1532 							     nr, details, ptent);
1533 
1534 	if (!delay_rmap) {
1535 		folio_remove_rmap_ptes(folio, page, nr, vma);
1536 
1537 		if (unlikely(folio_mapcount(folio) < 0))
1538 			print_bad_pte(vma, addr, ptent, page);
1539 	}
1540 	if (unlikely(__tlb_remove_folio_pages(tlb, page, nr, delay_rmap))) {
1541 		*force_flush = true;
1542 		*force_break = true;
1543 	}
1544 }
1545 
1546 /*
1547  * Zap or skip at least one present PTE, trying to batch-process subsequent
1548  * PTEs that map consecutive pages of the same folio.
1549  *
1550  * Returns the number of processed (skipped or zapped) PTEs (at least 1).
1551  */
1552 static inline int zap_present_ptes(struct mmu_gather *tlb,
1553 		struct vm_area_struct *vma, pte_t *pte, pte_t ptent,
1554 		unsigned int max_nr, unsigned long addr,
1555 		struct zap_details *details, int *rss, bool *force_flush,
1556 		bool *force_break, bool *any_skipped)
1557 {
1558 	const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY;
1559 	struct mm_struct *mm = tlb->mm;
1560 	struct folio *folio;
1561 	struct page *page;
1562 	int nr;
1563 
1564 	page = vm_normal_page(vma, addr, ptent);
1565 	if (!page) {
1566 		/* We don't need up-to-date accessed/dirty bits. */
1567 		ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm);
1568 		arch_check_zapped_pte(vma, ptent);
1569 		tlb_remove_tlb_entry(tlb, pte, addr);
1570 		if (userfaultfd_pte_wp(vma, ptent))
1571 			*any_skipped = zap_install_uffd_wp_if_needed(vma, addr,
1572 						pte, 1, details, ptent);
1573 		ksm_might_unmap_zero_page(mm, ptent);
1574 		return 1;
1575 	}
1576 
1577 	folio = page_folio(page);
1578 	if (unlikely(!should_zap_folio(details, folio))) {
1579 		*any_skipped = true;
1580 		return 1;
1581 	}
1582 
1583 	/*
1584 	 * Make sure that the common "small folio" case is as fast as possible
1585 	 * by keeping the batching logic separate.
1586 	 */
1587 	if (unlikely(folio_test_large(folio) && max_nr != 1)) {
1588 		nr = folio_pte_batch(folio, addr, pte, ptent, max_nr, fpb_flags,
1589 				     NULL, NULL, NULL);
1590 
1591 		zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr,
1592 				       addr, details, rss, force_flush,
1593 				       force_break, any_skipped);
1594 		return nr;
1595 	}
1596 	zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, 1, addr,
1597 			       details, rss, force_flush, force_break, any_skipped);
1598 	return 1;
1599 }
1600 
1601 static inline int zap_nonpresent_ptes(struct mmu_gather *tlb,
1602 		struct vm_area_struct *vma, pte_t *pte, pte_t ptent,
1603 		unsigned int max_nr, unsigned long addr,
1604 		struct zap_details *details, int *rss, bool *any_skipped)
1605 {
1606 	swp_entry_t entry;
1607 	int nr = 1;
1608 
1609 	*any_skipped = true;
1610 	entry = pte_to_swp_entry(ptent);
1611 	if (is_device_private_entry(entry) ||
1612 		is_device_exclusive_entry(entry)) {
1613 		struct page *page = pfn_swap_entry_to_page(entry);
1614 		struct folio *folio = page_folio(page);
1615 
1616 		if (unlikely(!should_zap_folio(details, folio)))
1617 			return 1;
1618 		/*
1619 		 * Both device private/exclusive mappings should only
1620 		 * work with anonymous page so far, so we don't need to
1621 		 * consider uffd-wp bit when zap. For more information,
1622 		 * see zap_install_uffd_wp_if_needed().
1623 		 */
1624 		WARN_ON_ONCE(!vma_is_anonymous(vma));
1625 		rss[mm_counter(folio)]--;
1626 		if (is_device_private_entry(entry))
1627 			folio_remove_rmap_pte(folio, page, vma);
1628 		folio_put(folio);
1629 	} else if (!non_swap_entry(entry)) {
1630 		/* Genuine swap entries, hence a private anon pages */
1631 		if (!should_zap_cows(details))
1632 			return 1;
1633 
1634 		nr = swap_pte_batch(pte, max_nr, ptent);
1635 		rss[MM_SWAPENTS] -= nr;
1636 		free_swap_and_cache_nr(entry, nr);
1637 	} else if (is_migration_entry(entry)) {
1638 		struct folio *folio = pfn_swap_entry_folio(entry);
1639 
1640 		if (!should_zap_folio(details, folio))
1641 			return 1;
1642 		rss[mm_counter(folio)]--;
1643 	} else if (pte_marker_entry_uffd_wp(entry)) {
1644 		/*
1645 		 * For anon: always drop the marker; for file: only
1646 		 * drop the marker if explicitly requested.
1647 		 */
1648 		if (!vma_is_anonymous(vma) && !zap_drop_markers(details))
1649 			return 1;
1650 	} else if (is_guard_swp_entry(entry)) {
1651 		/*
1652 		 * Ordinary zapping should not remove guard PTE
1653 		 * markers. Only do so if we should remove PTE markers
1654 		 * in general.
1655 		 */
1656 		if (!zap_drop_markers(details))
1657 			return 1;
1658 	} else if (is_hwpoison_entry(entry) || is_poisoned_swp_entry(entry)) {
1659 		if (!should_zap_cows(details))
1660 			return 1;
1661 	} else {
1662 		/* We should have covered all the swap entry types */
1663 		pr_alert("unrecognized swap entry 0x%lx\n", entry.val);
1664 		WARN_ON_ONCE(1);
1665 	}
1666 	clear_not_present_full_ptes(vma->vm_mm, addr, pte, nr, tlb->fullmm);
1667 	*any_skipped = zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, ptent);
1668 
1669 	return nr;
1670 }
1671 
1672 static inline int do_zap_pte_range(struct mmu_gather *tlb,
1673 				   struct vm_area_struct *vma, pte_t *pte,
1674 				   unsigned long addr, unsigned long end,
1675 				   struct zap_details *details, int *rss,
1676 				   bool *force_flush, bool *force_break,
1677 				   bool *any_skipped)
1678 {
1679 	pte_t ptent = ptep_get(pte);
1680 	int max_nr = (end - addr) / PAGE_SIZE;
1681 	int nr = 0;
1682 
1683 	/* Skip all consecutive none ptes */
1684 	if (pte_none(ptent)) {
1685 		for (nr = 1; nr < max_nr; nr++) {
1686 			ptent = ptep_get(pte + nr);
1687 			if (!pte_none(ptent))
1688 				break;
1689 		}
1690 		max_nr -= nr;
1691 		if (!max_nr)
1692 			return nr;
1693 		pte += nr;
1694 		addr += nr * PAGE_SIZE;
1695 	}
1696 
1697 	if (pte_present(ptent))
1698 		nr += zap_present_ptes(tlb, vma, pte, ptent, max_nr, addr,
1699 				       details, rss, force_flush, force_break,
1700 				       any_skipped);
1701 	else
1702 		nr += zap_nonpresent_ptes(tlb, vma, pte, ptent, max_nr, addr,
1703 					  details, rss, any_skipped);
1704 
1705 	return nr;
1706 }
1707 
1708 static unsigned long zap_pte_range(struct mmu_gather *tlb,
1709 				struct vm_area_struct *vma, pmd_t *pmd,
1710 				unsigned long addr, unsigned long end,
1711 				struct zap_details *details)
1712 {
1713 	bool force_flush = false, force_break = false;
1714 	struct mm_struct *mm = tlb->mm;
1715 	int rss[NR_MM_COUNTERS];
1716 	spinlock_t *ptl;
1717 	pte_t *start_pte;
1718 	pte_t *pte;
1719 	pmd_t pmdval;
1720 	unsigned long start = addr;
1721 	bool can_reclaim_pt = reclaim_pt_is_enabled(start, end, details);
1722 	bool direct_reclaim = false;
1723 	int nr;
1724 
1725 retry:
1726 	tlb_change_page_size(tlb, PAGE_SIZE);
1727 	init_rss_vec(rss);
1728 	start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1729 	if (!pte)
1730 		return addr;
1731 
1732 	flush_tlb_batched_pending(mm);
1733 	arch_enter_lazy_mmu_mode();
1734 	do {
1735 		bool any_skipped = false;
1736 
1737 		if (need_resched())
1738 			break;
1739 
1740 		nr = do_zap_pte_range(tlb, vma, pte, addr, end, details, rss,
1741 				      &force_flush, &force_break, &any_skipped);
1742 		if (any_skipped)
1743 			can_reclaim_pt = false;
1744 		if (unlikely(force_break)) {
1745 			addr += nr * PAGE_SIZE;
1746 			break;
1747 		}
1748 	} while (pte += nr, addr += PAGE_SIZE * nr, addr != end);
1749 
1750 	if (can_reclaim_pt && addr == end)
1751 		direct_reclaim = try_get_and_clear_pmd(mm, pmd, &pmdval);
1752 
1753 	add_mm_rss_vec(mm, rss);
1754 	arch_leave_lazy_mmu_mode();
1755 
1756 	/* Do the actual TLB flush before dropping ptl */
1757 	if (force_flush) {
1758 		tlb_flush_mmu_tlbonly(tlb);
1759 		tlb_flush_rmaps(tlb, vma);
1760 	}
1761 	pte_unmap_unlock(start_pte, ptl);
1762 
1763 	/*
1764 	 * If we forced a TLB flush (either due to running out of
1765 	 * batch buffers or because we needed to flush dirty TLB
1766 	 * entries before releasing the ptl), free the batched
1767 	 * memory too. Come back again if we didn't do everything.
1768 	 */
1769 	if (force_flush)
1770 		tlb_flush_mmu(tlb);
1771 
1772 	if (addr != end) {
1773 		cond_resched();
1774 		force_flush = false;
1775 		force_break = false;
1776 		goto retry;
1777 	}
1778 
1779 	if (can_reclaim_pt) {
1780 		if (direct_reclaim)
1781 			free_pte(mm, start, tlb, pmdval);
1782 		else
1783 			try_to_free_pte(mm, pmd, start, tlb);
1784 	}
1785 
1786 	return addr;
1787 }
1788 
1789 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1790 				struct vm_area_struct *vma, pud_t *pud,
1791 				unsigned long addr, unsigned long end,
1792 				struct zap_details *details)
1793 {
1794 	pmd_t *pmd;
1795 	unsigned long next;
1796 
1797 	pmd = pmd_offset(pud, addr);
1798 	do {
1799 		next = pmd_addr_end(addr, end);
1800 		if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
1801 			if (next - addr != HPAGE_PMD_SIZE)
1802 				__split_huge_pmd(vma, pmd, addr, false, NULL);
1803 			else if (zap_huge_pmd(tlb, vma, pmd, addr)) {
1804 				addr = next;
1805 				continue;
1806 			}
1807 			/* fall through */
1808 		} else if (details && details->single_folio &&
1809 			   folio_test_pmd_mappable(details->single_folio) &&
1810 			   next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
1811 			spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
1812 			/*
1813 			 * Take and drop THP pmd lock so that we cannot return
1814 			 * prematurely, while zap_huge_pmd() has cleared *pmd,
1815 			 * but not yet decremented compound_mapcount().
1816 			 */
1817 			spin_unlock(ptl);
1818 		}
1819 		if (pmd_none(*pmd)) {
1820 			addr = next;
1821 			continue;
1822 		}
1823 		addr = zap_pte_range(tlb, vma, pmd, addr, next, details);
1824 		if (addr != next)
1825 			pmd--;
1826 	} while (pmd++, cond_resched(), addr != end);
1827 
1828 	return addr;
1829 }
1830 
1831 static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1832 				struct vm_area_struct *vma, p4d_t *p4d,
1833 				unsigned long addr, unsigned long end,
1834 				struct zap_details *details)
1835 {
1836 	pud_t *pud;
1837 	unsigned long next;
1838 
1839 	pud = pud_offset(p4d, addr);
1840 	do {
1841 		next = pud_addr_end(addr, end);
1842 		if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
1843 			if (next - addr != HPAGE_PUD_SIZE) {
1844 				mmap_assert_locked(tlb->mm);
1845 				split_huge_pud(vma, pud, addr);
1846 			} else if (zap_huge_pud(tlb, vma, pud, addr))
1847 				goto next;
1848 			/* fall through */
1849 		}
1850 		if (pud_none_or_clear_bad(pud))
1851 			continue;
1852 		next = zap_pmd_range(tlb, vma, pud, addr, next, details);
1853 next:
1854 		cond_resched();
1855 	} while (pud++, addr = next, addr != end);
1856 
1857 	return addr;
1858 }
1859 
1860 static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
1861 				struct vm_area_struct *vma, pgd_t *pgd,
1862 				unsigned long addr, unsigned long end,
1863 				struct zap_details *details)
1864 {
1865 	p4d_t *p4d;
1866 	unsigned long next;
1867 
1868 	p4d = p4d_offset(pgd, addr);
1869 	do {
1870 		next = p4d_addr_end(addr, end);
1871 		if (p4d_none_or_clear_bad(p4d))
1872 			continue;
1873 		next = zap_pud_range(tlb, vma, p4d, addr, next, details);
1874 	} while (p4d++, addr = next, addr != end);
1875 
1876 	return addr;
1877 }
1878 
1879 void unmap_page_range(struct mmu_gather *tlb,
1880 			     struct vm_area_struct *vma,
1881 			     unsigned long addr, unsigned long end,
1882 			     struct zap_details *details)
1883 {
1884 	pgd_t *pgd;
1885 	unsigned long next;
1886 
1887 	BUG_ON(addr >= end);
1888 	tlb_start_vma(tlb, vma);
1889 	pgd = pgd_offset(vma->vm_mm, addr);
1890 	do {
1891 		next = pgd_addr_end(addr, end);
1892 		if (pgd_none_or_clear_bad(pgd))
1893 			continue;
1894 		next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
1895 	} while (pgd++, addr = next, addr != end);
1896 	tlb_end_vma(tlb, vma);
1897 }
1898 
1899 
1900 static void unmap_single_vma(struct mmu_gather *tlb,
1901 		struct vm_area_struct *vma, unsigned long start_addr,
1902 		unsigned long end_addr,
1903 		struct zap_details *details, bool mm_wr_locked)
1904 {
1905 	unsigned long start = max(vma->vm_start, start_addr);
1906 	unsigned long end;
1907 
1908 	if (start >= vma->vm_end)
1909 		return;
1910 	end = min(vma->vm_end, end_addr);
1911 	if (end <= vma->vm_start)
1912 		return;
1913 
1914 	if (vma->vm_file)
1915 		uprobe_munmap(vma, start, end);
1916 
1917 	if (unlikely(vma->vm_flags & VM_PFNMAP))
1918 		untrack_pfn(vma, 0, 0, mm_wr_locked);
1919 
1920 	if (start != end) {
1921 		if (unlikely(is_vm_hugetlb_page(vma))) {
1922 			/*
1923 			 * It is undesirable to test vma->vm_file as it
1924 			 * should be non-null for valid hugetlb area.
1925 			 * However, vm_file will be NULL in the error
1926 			 * cleanup path of mmap_region. When
1927 			 * hugetlbfs ->mmap method fails,
1928 			 * mmap_region() nullifies vma->vm_file
1929 			 * before calling this function to clean up.
1930 			 * Since no pte has actually been setup, it is
1931 			 * safe to do nothing in this case.
1932 			 */
1933 			if (vma->vm_file) {
1934 				zap_flags_t zap_flags = details ?
1935 				    details->zap_flags : 0;
1936 				__unmap_hugepage_range(tlb, vma, start, end,
1937 							     NULL, zap_flags);
1938 			}
1939 		} else
1940 			unmap_page_range(tlb, vma, start, end, details);
1941 	}
1942 }
1943 
1944 /**
1945  * unmap_vmas - unmap a range of memory covered by a list of vma's
1946  * @tlb: address of the caller's struct mmu_gather
1947  * @mas: the maple state
1948  * @vma: the starting vma
1949  * @start_addr: virtual address at which to start unmapping
1950  * @end_addr: virtual address at which to end unmapping
1951  * @tree_end: The maximum index to check
1952  * @mm_wr_locked: lock flag
1953  *
1954  * Unmap all pages in the vma list.
1955  *
1956  * Only addresses between `start' and `end' will be unmapped.
1957  *
1958  * The VMA list must be sorted in ascending virtual address order.
1959  *
1960  * unmap_vmas() assumes that the caller will flush the whole unmapped address
1961  * range after unmap_vmas() returns.  So the only responsibility here is to
1962  * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1963  * drops the lock and schedules.
1964  */
1965 void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
1966 		struct vm_area_struct *vma, unsigned long start_addr,
1967 		unsigned long end_addr, unsigned long tree_end,
1968 		bool mm_wr_locked)
1969 {
1970 	struct mmu_notifier_range range;
1971 	struct zap_details details = {
1972 		.zap_flags = ZAP_FLAG_DROP_MARKER | ZAP_FLAG_UNMAP,
1973 		/* Careful - we need to zap private pages too! */
1974 		.even_cows = true,
1975 	};
1976 
1977 	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm,
1978 				start_addr, end_addr);
1979 	mmu_notifier_invalidate_range_start(&range);
1980 	do {
1981 		unsigned long start = start_addr;
1982 		unsigned long end = end_addr;
1983 		hugetlb_zap_begin(vma, &start, &end);
1984 		unmap_single_vma(tlb, vma, start, end, &details,
1985 				 mm_wr_locked);
1986 		hugetlb_zap_end(vma, &details);
1987 		vma = mas_find(mas, tree_end - 1);
1988 	} while (vma && likely(!xa_is_zero(vma)));
1989 	mmu_notifier_invalidate_range_end(&range);
1990 }
1991 
1992 /**
1993  * zap_page_range_single - remove user pages in a given range
1994  * @vma: vm_area_struct holding the applicable pages
1995  * @address: starting address of pages to zap
1996  * @size: number of bytes to zap
1997  * @details: details of shared cache invalidation
1998  *
1999  * The range must fit into one VMA.
2000  */
2001 void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
2002 		unsigned long size, struct zap_details *details)
2003 {
2004 	const unsigned long end = address + size;
2005 	struct mmu_notifier_range range;
2006 	struct mmu_gather tlb;
2007 
2008 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2009 				address, end);
2010 	hugetlb_zap_begin(vma, &range.start, &range.end);
2011 	tlb_gather_mmu(&tlb, vma->vm_mm);
2012 	update_hiwater_rss(vma->vm_mm);
2013 	mmu_notifier_invalidate_range_start(&range);
2014 	/*
2015 	 * unmap 'address-end' not 'range.start-range.end' as range
2016 	 * could have been expanded for hugetlb pmd sharing.
2017 	 */
2018 	unmap_single_vma(&tlb, vma, address, end, details, false);
2019 	mmu_notifier_invalidate_range_end(&range);
2020 	tlb_finish_mmu(&tlb);
2021 	hugetlb_zap_end(vma, details);
2022 }
2023 
2024 /**
2025  * zap_vma_ptes - remove ptes mapping the vma
2026  * @vma: vm_area_struct holding ptes to be zapped
2027  * @address: starting address of pages to zap
2028  * @size: number of bytes to zap
2029  *
2030  * This function only unmaps ptes assigned to VM_PFNMAP vmas.
2031  *
2032  * The entire address range must be fully contained within the vma.
2033  *
2034  */
2035 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
2036 		unsigned long size)
2037 {
2038 	if (!range_in_vma(vma, address, address + size) ||
2039 	    		!(vma->vm_flags & VM_PFNMAP))
2040 		return;
2041 
2042 	zap_page_range_single(vma, address, size, NULL);
2043 }
2044 EXPORT_SYMBOL_GPL(zap_vma_ptes);
2045 
2046 static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
2047 {
2048 	pgd_t *pgd;
2049 	p4d_t *p4d;
2050 	pud_t *pud;
2051 	pmd_t *pmd;
2052 
2053 	pgd = pgd_offset(mm, addr);
2054 	p4d = p4d_alloc(mm, pgd, addr);
2055 	if (!p4d)
2056 		return NULL;
2057 	pud = pud_alloc(mm, p4d, addr);
2058 	if (!pud)
2059 		return NULL;
2060 	pmd = pmd_alloc(mm, pud, addr);
2061 	if (!pmd)
2062 		return NULL;
2063 
2064 	VM_BUG_ON(pmd_trans_huge(*pmd));
2065 	return pmd;
2066 }
2067 
2068 pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
2069 			spinlock_t **ptl)
2070 {
2071 	pmd_t *pmd = walk_to_pmd(mm, addr);
2072 
2073 	if (!pmd)
2074 		return NULL;
2075 	return pte_alloc_map_lock(mm, pmd, addr, ptl);
2076 }
2077 
2078 static bool vm_mixed_zeropage_allowed(struct vm_area_struct *vma)
2079 {
2080 	VM_WARN_ON_ONCE(vma->vm_flags & VM_PFNMAP);
2081 	/*
2082 	 * Whoever wants to forbid the zeropage after some zeropages
2083 	 * might already have been mapped has to scan the page tables and
2084 	 * bail out on any zeropages. Zeropages in COW mappings can
2085 	 * be unshared using FAULT_FLAG_UNSHARE faults.
2086 	 */
2087 	if (mm_forbids_zeropage(vma->vm_mm))
2088 		return false;
2089 	/* zeropages in COW mappings are common and unproblematic. */
2090 	if (is_cow_mapping(vma->vm_flags))
2091 		return true;
2092 	/* Mappings that do not allow for writable PTEs are unproblematic. */
2093 	if (!(vma->vm_flags & (VM_WRITE | VM_MAYWRITE)))
2094 		return true;
2095 	/*
2096 	 * Why not allow any VMA that has vm_ops->pfn_mkwrite? GUP could
2097 	 * find the shared zeropage and longterm-pin it, which would
2098 	 * be problematic as soon as the zeropage gets replaced by a different
2099 	 * page due to vma->vm_ops->pfn_mkwrite, because what's mapped would
2100 	 * now differ to what GUP looked up. FSDAX is incompatible to
2101 	 * FOLL_LONGTERM and VM_IO is incompatible to GUP completely (see
2102 	 * check_vma_flags).
2103 	 */
2104 	return vma->vm_ops && vma->vm_ops->pfn_mkwrite &&
2105 	       (vma_is_fsdax(vma) || vma->vm_flags & VM_IO);
2106 }
2107 
2108 static int validate_page_before_insert(struct vm_area_struct *vma,
2109 				       struct page *page)
2110 {
2111 	struct folio *folio = page_folio(page);
2112 
2113 	if (!folio_ref_count(folio))
2114 		return -EINVAL;
2115 	if (unlikely(is_zero_folio(folio))) {
2116 		if (!vm_mixed_zeropage_allowed(vma))
2117 			return -EINVAL;
2118 		return 0;
2119 	}
2120 	if (folio_test_anon(folio) || folio_test_slab(folio) ||
2121 	    page_has_type(page))
2122 		return -EINVAL;
2123 	flush_dcache_folio(folio);
2124 	return 0;
2125 }
2126 
2127 static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte,
2128 			unsigned long addr, struct page *page, pgprot_t prot)
2129 {
2130 	struct folio *folio = page_folio(page);
2131 	pte_t pteval;
2132 
2133 	if (!pte_none(ptep_get(pte)))
2134 		return -EBUSY;
2135 	/* Ok, finally just insert the thing.. */
2136 	pteval = mk_pte(page, prot);
2137 	if (unlikely(is_zero_folio(folio))) {
2138 		pteval = pte_mkspecial(pteval);
2139 	} else {
2140 		folio_get(folio);
2141 		inc_mm_counter(vma->vm_mm, mm_counter_file(folio));
2142 		folio_add_file_rmap_pte(folio, page, vma);
2143 	}
2144 	set_pte_at(vma->vm_mm, addr, pte, pteval);
2145 	return 0;
2146 }
2147 
2148 static int insert_page(struct vm_area_struct *vma, unsigned long addr,
2149 			struct page *page, pgprot_t prot)
2150 {
2151 	int retval;
2152 	pte_t *pte;
2153 	spinlock_t *ptl;
2154 
2155 	retval = validate_page_before_insert(vma, page);
2156 	if (retval)
2157 		goto out;
2158 	retval = -ENOMEM;
2159 	pte = get_locked_pte(vma->vm_mm, addr, &ptl);
2160 	if (!pte)
2161 		goto out;
2162 	retval = insert_page_into_pte_locked(vma, pte, addr, page, prot);
2163 	pte_unmap_unlock(pte, ptl);
2164 out:
2165 	return retval;
2166 }
2167 
2168 static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte,
2169 			unsigned long addr, struct page *page, pgprot_t prot)
2170 {
2171 	int err;
2172 
2173 	err = validate_page_before_insert(vma, page);
2174 	if (err)
2175 		return err;
2176 	return insert_page_into_pte_locked(vma, pte, addr, page, prot);
2177 }
2178 
2179 /* insert_pages() amortizes the cost of spinlock operations
2180  * when inserting pages in a loop.
2181  */
2182 static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
2183 			struct page **pages, unsigned long *num, pgprot_t prot)
2184 {
2185 	pmd_t *pmd = NULL;
2186 	pte_t *start_pte, *pte;
2187 	spinlock_t *pte_lock;
2188 	struct mm_struct *const mm = vma->vm_mm;
2189 	unsigned long curr_page_idx = 0;
2190 	unsigned long remaining_pages_total = *num;
2191 	unsigned long pages_to_write_in_pmd;
2192 	int ret;
2193 more:
2194 	ret = -EFAULT;
2195 	pmd = walk_to_pmd(mm, addr);
2196 	if (!pmd)
2197 		goto out;
2198 
2199 	pages_to_write_in_pmd = min_t(unsigned long,
2200 		remaining_pages_total, PTRS_PER_PTE - pte_index(addr));
2201 
2202 	/* Allocate the PTE if necessary; takes PMD lock once only. */
2203 	ret = -ENOMEM;
2204 	if (pte_alloc(mm, pmd))
2205 		goto out;
2206 
2207 	while (pages_to_write_in_pmd) {
2208 		int pte_idx = 0;
2209 		const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
2210 
2211 		start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
2212 		if (!start_pte) {
2213 			ret = -EFAULT;
2214 			goto out;
2215 		}
2216 		for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
2217 			int err = insert_page_in_batch_locked(vma, pte,
2218 				addr, pages[curr_page_idx], prot);
2219 			if (unlikely(err)) {
2220 				pte_unmap_unlock(start_pte, pte_lock);
2221 				ret = err;
2222 				remaining_pages_total -= pte_idx;
2223 				goto out;
2224 			}
2225 			addr += PAGE_SIZE;
2226 			++curr_page_idx;
2227 		}
2228 		pte_unmap_unlock(start_pte, pte_lock);
2229 		pages_to_write_in_pmd -= batch_size;
2230 		remaining_pages_total -= batch_size;
2231 	}
2232 	if (remaining_pages_total)
2233 		goto more;
2234 	ret = 0;
2235 out:
2236 	*num = remaining_pages_total;
2237 	return ret;
2238 }
2239 
2240 /**
2241  * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
2242  * @vma: user vma to map to
2243  * @addr: target start user address of these pages
2244  * @pages: source kernel pages
2245  * @num: in: number of pages to map. out: number of pages that were *not*
2246  * mapped. (0 means all pages were successfully mapped).
2247  *
2248  * Preferred over vm_insert_page() when inserting multiple pages.
2249  *
2250  * In case of error, we may have mapped a subset of the provided
2251  * pages. It is the caller's responsibility to account for this case.
2252  *
2253  * The same restrictions apply as in vm_insert_page().
2254  */
2255 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
2256 			struct page **pages, unsigned long *num)
2257 {
2258 	const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
2259 
2260 	if (addr < vma->vm_start || end_addr >= vma->vm_end)
2261 		return -EFAULT;
2262 	if (!(vma->vm_flags & VM_MIXEDMAP)) {
2263 		BUG_ON(mmap_read_trylock(vma->vm_mm));
2264 		BUG_ON(vma->vm_flags & VM_PFNMAP);
2265 		vm_flags_set(vma, VM_MIXEDMAP);
2266 	}
2267 	/* Defer page refcount checking till we're about to map that page. */
2268 	return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
2269 }
2270 EXPORT_SYMBOL(vm_insert_pages);
2271 
2272 /**
2273  * vm_insert_page - insert single page into user vma
2274  * @vma: user vma to map to
2275  * @addr: target user address of this page
2276  * @page: source kernel page
2277  *
2278  * This allows drivers to insert individual pages they've allocated
2279  * into a user vma. The zeropage is supported in some VMAs,
2280  * see vm_mixed_zeropage_allowed().
2281  *
2282  * The page has to be a nice clean _individual_ kernel allocation.
2283  * If you allocate a compound page, you need to have marked it as
2284  * such (__GFP_COMP), or manually just split the page up yourself
2285  * (see split_page()).
2286  *
2287  * NOTE! Traditionally this was done with "remap_pfn_range()" which
2288  * took an arbitrary page protection parameter. This doesn't allow
2289  * that. Your vma protection will have to be set up correctly, which
2290  * means that if you want a shared writable mapping, you'd better
2291  * ask for a shared writable mapping!
2292  *
2293  * The page does not need to be reserved.
2294  *
2295  * Usually this function is called from f_op->mmap() handler
2296  * under mm->mmap_lock write-lock, so it can change vma->vm_flags.
2297  * Caller must set VM_MIXEDMAP on vma if it wants to call this
2298  * function from other places, for example from page-fault handler.
2299  *
2300  * Return: %0 on success, negative error code otherwise.
2301  */
2302 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
2303 			struct page *page)
2304 {
2305 	if (addr < vma->vm_start || addr >= vma->vm_end)
2306 		return -EFAULT;
2307 	if (!(vma->vm_flags & VM_MIXEDMAP)) {
2308 		BUG_ON(mmap_read_trylock(vma->vm_mm));
2309 		BUG_ON(vma->vm_flags & VM_PFNMAP);
2310 		vm_flags_set(vma, VM_MIXEDMAP);
2311 	}
2312 	return insert_page(vma, addr, page, vma->vm_page_prot);
2313 }
2314 EXPORT_SYMBOL(vm_insert_page);
2315 
2316 /*
2317  * __vm_map_pages - maps range of kernel pages into user vma
2318  * @vma: user vma to map to
2319  * @pages: pointer to array of source kernel pages
2320  * @num: number of pages in page array
2321  * @offset: user's requested vm_pgoff
2322  *
2323  * This allows drivers to map range of kernel pages into a user vma.
2324  * The zeropage is supported in some VMAs, see
2325  * vm_mixed_zeropage_allowed().
2326  *
2327  * Return: 0 on success and error code otherwise.
2328  */
2329 static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2330 				unsigned long num, unsigned long offset)
2331 {
2332 	unsigned long count = vma_pages(vma);
2333 	unsigned long uaddr = vma->vm_start;
2334 	int ret, i;
2335 
2336 	/* Fail if the user requested offset is beyond the end of the object */
2337 	if (offset >= num)
2338 		return -ENXIO;
2339 
2340 	/* Fail if the user requested size exceeds available object size */
2341 	if (count > num - offset)
2342 		return -ENXIO;
2343 
2344 	for (i = 0; i < count; i++) {
2345 		ret = vm_insert_page(vma, uaddr, pages[offset + i]);
2346 		if (ret < 0)
2347 			return ret;
2348 		uaddr += PAGE_SIZE;
2349 	}
2350 
2351 	return 0;
2352 }
2353 
2354 /**
2355  * vm_map_pages - maps range of kernel pages starts with non zero offset
2356  * @vma: user vma to map to
2357  * @pages: pointer to array of source kernel pages
2358  * @num: number of pages in page array
2359  *
2360  * Maps an object consisting of @num pages, catering for the user's
2361  * requested vm_pgoff
2362  *
2363  * If we fail to insert any page into the vma, the function will return
2364  * immediately leaving any previously inserted pages present.  Callers
2365  * from the mmap handler may immediately return the error as their caller
2366  * will destroy the vma, removing any successfully inserted pages. Other
2367  * callers should make their own arrangements for calling unmap_region().
2368  *
2369  * Context: Process context. Called by mmap handlers.
2370  * Return: 0 on success and error code otherwise.
2371  */
2372 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2373 				unsigned long num)
2374 {
2375 	return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
2376 }
2377 EXPORT_SYMBOL(vm_map_pages);
2378 
2379 /**
2380  * vm_map_pages_zero - map range of kernel pages starts with zero offset
2381  * @vma: user vma to map to
2382  * @pages: pointer to array of source kernel pages
2383  * @num: number of pages in page array
2384  *
2385  * Similar to vm_map_pages(), except that it explicitly sets the offset
2386  * to 0. This function is intended for the drivers that did not consider
2387  * vm_pgoff.
2388  *
2389  * Context: Process context. Called by mmap handlers.
2390  * Return: 0 on success and error code otherwise.
2391  */
2392 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
2393 				unsigned long num)
2394 {
2395 	return __vm_map_pages(vma, pages, num, 0);
2396 }
2397 EXPORT_SYMBOL(vm_map_pages_zero);
2398 
2399 static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2400 			pfn_t pfn, pgprot_t prot, bool mkwrite)
2401 {
2402 	struct mm_struct *mm = vma->vm_mm;
2403 	pte_t *pte, entry;
2404 	spinlock_t *ptl;
2405 
2406 	pte = get_locked_pte(mm, addr, &ptl);
2407 	if (!pte)
2408 		return VM_FAULT_OOM;
2409 	entry = ptep_get(pte);
2410 	if (!pte_none(entry)) {
2411 		if (mkwrite) {
2412 			/*
2413 			 * For read faults on private mappings the PFN passed
2414 			 * in may not match the PFN we have mapped if the
2415 			 * mapped PFN is a writeable COW page.  In the mkwrite
2416 			 * case we are creating a writable PTE for a shared
2417 			 * mapping and we expect the PFNs to match. If they
2418 			 * don't match, we are likely racing with block
2419 			 * allocation and mapping invalidation so just skip the
2420 			 * update.
2421 			 */
2422 			if (pte_pfn(entry) != pfn_t_to_pfn(pfn)) {
2423 				WARN_ON_ONCE(!is_zero_pfn(pte_pfn(entry)));
2424 				goto out_unlock;
2425 			}
2426 			entry = pte_mkyoung(entry);
2427 			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2428 			if (ptep_set_access_flags(vma, addr, pte, entry, 1))
2429 				update_mmu_cache(vma, addr, pte);
2430 		}
2431 		goto out_unlock;
2432 	}
2433 
2434 	/* Ok, finally just insert the thing.. */
2435 	if (pfn_t_devmap(pfn))
2436 		entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
2437 	else
2438 		entry = pte_mkspecial(pfn_t_pte(pfn, prot));
2439 
2440 	if (mkwrite) {
2441 		entry = pte_mkyoung(entry);
2442 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2443 	}
2444 
2445 	set_pte_at(mm, addr, pte, entry);
2446 	update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
2447 
2448 out_unlock:
2449 	pte_unmap_unlock(pte, ptl);
2450 	return VM_FAULT_NOPAGE;
2451 }
2452 
2453 /**
2454  * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
2455  * @vma: user vma to map to
2456  * @addr: target user address of this page
2457  * @pfn: source kernel pfn
2458  * @pgprot: pgprot flags for the inserted page
2459  *
2460  * This is exactly like vmf_insert_pfn(), except that it allows drivers
2461  * to override pgprot on a per-page basis.
2462  *
2463  * This only makes sense for IO mappings, and it makes no sense for
2464  * COW mappings.  In general, using multiple vmas is preferable;
2465  * vmf_insert_pfn_prot should only be used if using multiple VMAs is
2466  * impractical.
2467  *
2468  * pgprot typically only differs from @vma->vm_page_prot when drivers set
2469  * caching- and encryption bits different than those of @vma->vm_page_prot,
2470  * because the caching- or encryption mode may not be known at mmap() time.
2471  *
2472  * This is ok as long as @vma->vm_page_prot is not used by the core vm
2473  * to set caching and encryption bits for those vmas (except for COW pages).
2474  * This is ensured by core vm only modifying these page table entries using
2475  * functions that don't touch caching- or encryption bits, using pte_modify()
2476  * if needed. (See for example mprotect()).
2477  *
2478  * Also when new page-table entries are created, this is only done using the
2479  * fault() callback, and never using the value of vma->vm_page_prot,
2480  * except for page-table entries that point to anonymous pages as the result
2481  * of COW.
2482  *
2483  * Context: Process context.  May allocate using %GFP_KERNEL.
2484  * Return: vm_fault_t value.
2485  */
2486 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2487 			unsigned long pfn, pgprot_t pgprot)
2488 {
2489 	/*
2490 	 * Technically, architectures with pte_special can avoid all these
2491 	 * restrictions (same for remap_pfn_range).  However we would like
2492 	 * consistency in testing and feature parity among all, so we should
2493 	 * try to keep these invariants in place for everybody.
2494 	 */
2495 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
2496 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
2497 						(VM_PFNMAP|VM_MIXEDMAP));
2498 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
2499 	BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
2500 
2501 	if (addr < vma->vm_start || addr >= vma->vm_end)
2502 		return VM_FAULT_SIGBUS;
2503 
2504 	if (!pfn_modify_allowed(pfn, pgprot))
2505 		return VM_FAULT_SIGBUS;
2506 
2507 	track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
2508 
2509 	return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
2510 			false);
2511 }
2512 EXPORT_SYMBOL(vmf_insert_pfn_prot);
2513 
2514 /**
2515  * vmf_insert_pfn - insert single pfn into user vma
2516  * @vma: user vma to map to
2517  * @addr: target user address of this page
2518  * @pfn: source kernel pfn
2519  *
2520  * Similar to vm_insert_page, this allows drivers to insert individual pages
2521  * they've allocated into a user vma. Same comments apply.
2522  *
2523  * This function should only be called from a vm_ops->fault handler, and
2524  * in that case the handler should return the result of this function.
2525  *
2526  * vma cannot be a COW mapping.
2527  *
2528  * As this is called only for pages that do not currently exist, we
2529  * do not need to flush old virtual caches or the TLB.
2530  *
2531  * Context: Process context.  May allocate using %GFP_KERNEL.
2532  * Return: vm_fault_t value.
2533  */
2534 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2535 			unsigned long pfn)
2536 {
2537 	return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
2538 }
2539 EXPORT_SYMBOL(vmf_insert_pfn);
2540 
2541 static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn, bool mkwrite)
2542 {
2543 	if (unlikely(is_zero_pfn(pfn_t_to_pfn(pfn))) &&
2544 	    (mkwrite || !vm_mixed_zeropage_allowed(vma)))
2545 		return false;
2546 	/* these checks mirror the abort conditions in vm_normal_page */
2547 	if (vma->vm_flags & VM_MIXEDMAP)
2548 		return true;
2549 	if (pfn_t_devmap(pfn))
2550 		return true;
2551 	if (pfn_t_special(pfn))
2552 		return true;
2553 	if (is_zero_pfn(pfn_t_to_pfn(pfn)))
2554 		return true;
2555 	return false;
2556 }
2557 
2558 static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
2559 		unsigned long addr, pfn_t pfn, bool mkwrite)
2560 {
2561 	pgprot_t pgprot = vma->vm_page_prot;
2562 	int err;
2563 
2564 	if (!vm_mixed_ok(vma, pfn, mkwrite))
2565 		return VM_FAULT_SIGBUS;
2566 
2567 	if (addr < vma->vm_start || addr >= vma->vm_end)
2568 		return VM_FAULT_SIGBUS;
2569 
2570 	track_pfn_insert(vma, &pgprot, pfn);
2571 
2572 	if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
2573 		return VM_FAULT_SIGBUS;
2574 
2575 	/*
2576 	 * If we don't have pte special, then we have to use the pfn_valid()
2577 	 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
2578 	 * refcount the page if pfn_valid is true (hence insert_page rather
2579 	 * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
2580 	 * without pte special, it would there be refcounted as a normal page.
2581 	 */
2582 	if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
2583 	    !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
2584 		struct page *page;
2585 
2586 		/*
2587 		 * At this point we are committed to insert_page()
2588 		 * regardless of whether the caller specified flags that
2589 		 * result in pfn_t_has_page() == false.
2590 		 */
2591 		page = pfn_to_page(pfn_t_to_pfn(pfn));
2592 		err = insert_page(vma, addr, page, pgprot);
2593 	} else {
2594 		return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
2595 	}
2596 
2597 	if (err == -ENOMEM)
2598 		return VM_FAULT_OOM;
2599 	if (err < 0 && err != -EBUSY)
2600 		return VM_FAULT_SIGBUS;
2601 
2602 	return VM_FAULT_NOPAGE;
2603 }
2604 
2605 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2606 		pfn_t pfn)
2607 {
2608 	return __vm_insert_mixed(vma, addr, pfn, false);
2609 }
2610 EXPORT_SYMBOL(vmf_insert_mixed);
2611 
2612 /*
2613  *  If the insertion of PTE failed because someone else already added a
2614  *  different entry in the mean time, we treat that as success as we assume
2615  *  the same entry was actually inserted.
2616  */
2617 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2618 		unsigned long addr, pfn_t pfn)
2619 {
2620 	return __vm_insert_mixed(vma, addr, pfn, true);
2621 }
2622 
2623 /*
2624  * maps a range of physical memory into the requested pages. the old
2625  * mappings are removed. any references to nonexistent pages results
2626  * in null mappings (currently treated as "copy-on-access")
2627  */
2628 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
2629 			unsigned long addr, unsigned long end,
2630 			unsigned long pfn, pgprot_t prot)
2631 {
2632 	pte_t *pte, *mapped_pte;
2633 	spinlock_t *ptl;
2634 	int err = 0;
2635 
2636 	mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
2637 	if (!pte)
2638 		return -ENOMEM;
2639 	arch_enter_lazy_mmu_mode();
2640 	do {
2641 		BUG_ON(!pte_none(ptep_get(pte)));
2642 		if (!pfn_modify_allowed(pfn, prot)) {
2643 			err = -EACCES;
2644 			break;
2645 		}
2646 		set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
2647 		pfn++;
2648 	} while (pte++, addr += PAGE_SIZE, addr != end);
2649 	arch_leave_lazy_mmu_mode();
2650 	pte_unmap_unlock(mapped_pte, ptl);
2651 	return err;
2652 }
2653 
2654 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
2655 			unsigned long addr, unsigned long end,
2656 			unsigned long pfn, pgprot_t prot)
2657 {
2658 	pmd_t *pmd;
2659 	unsigned long next;
2660 	int err;
2661 
2662 	pfn -= addr >> PAGE_SHIFT;
2663 	pmd = pmd_alloc(mm, pud, addr);
2664 	if (!pmd)
2665 		return -ENOMEM;
2666 	VM_BUG_ON(pmd_trans_huge(*pmd));
2667 	do {
2668 		next = pmd_addr_end(addr, end);
2669 		err = remap_pte_range(mm, pmd, addr, next,
2670 				pfn + (addr >> PAGE_SHIFT), prot);
2671 		if (err)
2672 			return err;
2673 	} while (pmd++, addr = next, addr != end);
2674 	return 0;
2675 }
2676 
2677 static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
2678 			unsigned long addr, unsigned long end,
2679 			unsigned long pfn, pgprot_t prot)
2680 {
2681 	pud_t *pud;
2682 	unsigned long next;
2683 	int err;
2684 
2685 	pfn -= addr >> PAGE_SHIFT;
2686 	pud = pud_alloc(mm, p4d, addr);
2687 	if (!pud)
2688 		return -ENOMEM;
2689 	do {
2690 		next = pud_addr_end(addr, end);
2691 		err = remap_pmd_range(mm, pud, addr, next,
2692 				pfn + (addr >> PAGE_SHIFT), prot);
2693 		if (err)
2694 			return err;
2695 	} while (pud++, addr = next, addr != end);
2696 	return 0;
2697 }
2698 
2699 static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2700 			unsigned long addr, unsigned long end,
2701 			unsigned long pfn, pgprot_t prot)
2702 {
2703 	p4d_t *p4d;
2704 	unsigned long next;
2705 	int err;
2706 
2707 	pfn -= addr >> PAGE_SHIFT;
2708 	p4d = p4d_alloc(mm, pgd, addr);
2709 	if (!p4d)
2710 		return -ENOMEM;
2711 	do {
2712 		next = p4d_addr_end(addr, end);
2713 		err = remap_pud_range(mm, p4d, addr, next,
2714 				pfn + (addr >> PAGE_SHIFT), prot);
2715 		if (err)
2716 			return err;
2717 	} while (p4d++, addr = next, addr != end);
2718 	return 0;
2719 }
2720 
2721 static int remap_pfn_range_internal(struct vm_area_struct *vma, unsigned long addr,
2722 		unsigned long pfn, unsigned long size, pgprot_t prot)
2723 {
2724 	pgd_t *pgd;
2725 	unsigned long next;
2726 	unsigned long end = addr + PAGE_ALIGN(size);
2727 	struct mm_struct *mm = vma->vm_mm;
2728 	int err;
2729 
2730 	if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
2731 		return -EINVAL;
2732 
2733 	/*
2734 	 * Physically remapped pages are special. Tell the
2735 	 * rest of the world about it:
2736 	 *   VM_IO tells people not to look at these pages
2737 	 *	(accesses can have side effects).
2738 	 *   VM_PFNMAP tells the core MM that the base pages are just
2739 	 *	raw PFN mappings, and do not have a "struct page" associated
2740 	 *	with them.
2741 	 *   VM_DONTEXPAND
2742 	 *      Disable vma merging and expanding with mremap().
2743 	 *   VM_DONTDUMP
2744 	 *      Omit vma from core dump, even when VM_IO turned off.
2745 	 *
2746 	 * There's a horrible special case to handle copy-on-write
2747 	 * behaviour that some programs depend on. We mark the "original"
2748 	 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
2749 	 * See vm_normal_page() for details.
2750 	 */
2751 	if (is_cow_mapping(vma->vm_flags)) {
2752 		if (addr != vma->vm_start || end != vma->vm_end)
2753 			return -EINVAL;
2754 		vma->vm_pgoff = pfn;
2755 	}
2756 
2757 	vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
2758 
2759 	BUG_ON(addr >= end);
2760 	pfn -= addr >> PAGE_SHIFT;
2761 	pgd = pgd_offset(mm, addr);
2762 	flush_cache_range(vma, addr, end);
2763 	do {
2764 		next = pgd_addr_end(addr, end);
2765 		err = remap_p4d_range(mm, pgd, addr, next,
2766 				pfn + (addr >> PAGE_SHIFT), prot);
2767 		if (err)
2768 			return err;
2769 	} while (pgd++, addr = next, addr != end);
2770 
2771 	return 0;
2772 }
2773 
2774 /*
2775  * Variant of remap_pfn_range that does not call track_pfn_remap.  The caller
2776  * must have pre-validated the caching bits of the pgprot_t.
2777  */
2778 int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
2779 		unsigned long pfn, unsigned long size, pgprot_t prot)
2780 {
2781 	int error = remap_pfn_range_internal(vma, addr, pfn, size, prot);
2782 
2783 	if (!error)
2784 		return 0;
2785 
2786 	/*
2787 	 * A partial pfn range mapping is dangerous: it does not
2788 	 * maintain page reference counts, and callers may free
2789 	 * pages due to the error. So zap it early.
2790 	 */
2791 	zap_page_range_single(vma, addr, size, NULL);
2792 	return error;
2793 }
2794 
2795 /**
2796  * remap_pfn_range - remap kernel memory to userspace
2797  * @vma: user vma to map to
2798  * @addr: target page aligned user address to start at
2799  * @pfn: page frame number of kernel physical memory address
2800  * @size: size of mapping area
2801  * @prot: page protection flags for this mapping
2802  *
2803  * Note: this is only safe if the mm semaphore is held when called.
2804  *
2805  * Return: %0 on success, negative error code otherwise.
2806  */
2807 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
2808 		    unsigned long pfn, unsigned long size, pgprot_t prot)
2809 {
2810 	int err;
2811 
2812 	err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size));
2813 	if (err)
2814 		return -EINVAL;
2815 
2816 	err = remap_pfn_range_notrack(vma, addr, pfn, size, prot);
2817 	if (err)
2818 		untrack_pfn(vma, pfn, PAGE_ALIGN(size), true);
2819 	return err;
2820 }
2821 EXPORT_SYMBOL(remap_pfn_range);
2822 
2823 /**
2824  * vm_iomap_memory - remap memory to userspace
2825  * @vma: user vma to map to
2826  * @start: start of the physical memory to be mapped
2827  * @len: size of area
2828  *
2829  * This is a simplified io_remap_pfn_range() for common driver use. The
2830  * driver just needs to give us the physical memory range to be mapped,
2831  * we'll figure out the rest from the vma information.
2832  *
2833  * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
2834  * whatever write-combining details or similar.
2835  *
2836  * Return: %0 on success, negative error code otherwise.
2837  */
2838 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
2839 {
2840 	unsigned long vm_len, pfn, pages;
2841 
2842 	/* Check that the physical memory area passed in looks valid */
2843 	if (start + len < start)
2844 		return -EINVAL;
2845 	/*
2846 	 * You *really* shouldn't map things that aren't page-aligned,
2847 	 * but we've historically allowed it because IO memory might
2848 	 * just have smaller alignment.
2849 	 */
2850 	len += start & ~PAGE_MASK;
2851 	pfn = start >> PAGE_SHIFT;
2852 	pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
2853 	if (pfn + pages < pfn)
2854 		return -EINVAL;
2855 
2856 	/* We start the mapping 'vm_pgoff' pages into the area */
2857 	if (vma->vm_pgoff > pages)
2858 		return -EINVAL;
2859 	pfn += vma->vm_pgoff;
2860 	pages -= vma->vm_pgoff;
2861 
2862 	/* Can we fit all of the mapping? */
2863 	vm_len = vma->vm_end - vma->vm_start;
2864 	if (vm_len >> PAGE_SHIFT > pages)
2865 		return -EINVAL;
2866 
2867 	/* Ok, let it rip */
2868 	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
2869 }
2870 EXPORT_SYMBOL(vm_iomap_memory);
2871 
2872 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2873 				     unsigned long addr, unsigned long end,
2874 				     pte_fn_t fn, void *data, bool create,
2875 				     pgtbl_mod_mask *mask)
2876 {
2877 	pte_t *pte, *mapped_pte;
2878 	int err = 0;
2879 	spinlock_t *ptl;
2880 
2881 	if (create) {
2882 		mapped_pte = pte = (mm == &init_mm) ?
2883 			pte_alloc_kernel_track(pmd, addr, mask) :
2884 			pte_alloc_map_lock(mm, pmd, addr, &ptl);
2885 		if (!pte)
2886 			return -ENOMEM;
2887 	} else {
2888 		mapped_pte = pte = (mm == &init_mm) ?
2889 			pte_offset_kernel(pmd, addr) :
2890 			pte_offset_map_lock(mm, pmd, addr, &ptl);
2891 		if (!pte)
2892 			return -EINVAL;
2893 	}
2894 
2895 	arch_enter_lazy_mmu_mode();
2896 
2897 	if (fn) {
2898 		do {
2899 			if (create || !pte_none(ptep_get(pte))) {
2900 				err = fn(pte++, addr, data);
2901 				if (err)
2902 					break;
2903 			}
2904 		} while (addr += PAGE_SIZE, addr != end);
2905 	}
2906 	*mask |= PGTBL_PTE_MODIFIED;
2907 
2908 	arch_leave_lazy_mmu_mode();
2909 
2910 	if (mm != &init_mm)
2911 		pte_unmap_unlock(mapped_pte, ptl);
2912 	return err;
2913 }
2914 
2915 static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
2916 				     unsigned long addr, unsigned long end,
2917 				     pte_fn_t fn, void *data, bool create,
2918 				     pgtbl_mod_mask *mask)
2919 {
2920 	pmd_t *pmd;
2921 	unsigned long next;
2922 	int err = 0;
2923 
2924 	BUG_ON(pud_leaf(*pud));
2925 
2926 	if (create) {
2927 		pmd = pmd_alloc_track(mm, pud, addr, mask);
2928 		if (!pmd)
2929 			return -ENOMEM;
2930 	} else {
2931 		pmd = pmd_offset(pud, addr);
2932 	}
2933 	do {
2934 		next = pmd_addr_end(addr, end);
2935 		if (pmd_none(*pmd) && !create)
2936 			continue;
2937 		if (WARN_ON_ONCE(pmd_leaf(*pmd)))
2938 			return -EINVAL;
2939 		if (!pmd_none(*pmd) && WARN_ON_ONCE(pmd_bad(*pmd))) {
2940 			if (!create)
2941 				continue;
2942 			pmd_clear_bad(pmd);
2943 		}
2944 		err = apply_to_pte_range(mm, pmd, addr, next,
2945 					 fn, data, create, mask);
2946 		if (err)
2947 			break;
2948 	} while (pmd++, addr = next, addr != end);
2949 
2950 	return err;
2951 }
2952 
2953 static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
2954 				     unsigned long addr, unsigned long end,
2955 				     pte_fn_t fn, void *data, bool create,
2956 				     pgtbl_mod_mask *mask)
2957 {
2958 	pud_t *pud;
2959 	unsigned long next;
2960 	int err = 0;
2961 
2962 	if (create) {
2963 		pud = pud_alloc_track(mm, p4d, addr, mask);
2964 		if (!pud)
2965 			return -ENOMEM;
2966 	} else {
2967 		pud = pud_offset(p4d, addr);
2968 	}
2969 	do {
2970 		next = pud_addr_end(addr, end);
2971 		if (pud_none(*pud) && !create)
2972 			continue;
2973 		if (WARN_ON_ONCE(pud_leaf(*pud)))
2974 			return -EINVAL;
2975 		if (!pud_none(*pud) && WARN_ON_ONCE(pud_bad(*pud))) {
2976 			if (!create)
2977 				continue;
2978 			pud_clear_bad(pud);
2979 		}
2980 		err = apply_to_pmd_range(mm, pud, addr, next,
2981 					 fn, data, create, mask);
2982 		if (err)
2983 			break;
2984 	} while (pud++, addr = next, addr != end);
2985 
2986 	return err;
2987 }
2988 
2989 static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2990 				     unsigned long addr, unsigned long end,
2991 				     pte_fn_t fn, void *data, bool create,
2992 				     pgtbl_mod_mask *mask)
2993 {
2994 	p4d_t *p4d;
2995 	unsigned long next;
2996 	int err = 0;
2997 
2998 	if (create) {
2999 		p4d = p4d_alloc_track(mm, pgd, addr, mask);
3000 		if (!p4d)
3001 			return -ENOMEM;
3002 	} else {
3003 		p4d = p4d_offset(pgd, addr);
3004 	}
3005 	do {
3006 		next = p4d_addr_end(addr, end);
3007 		if (p4d_none(*p4d) && !create)
3008 			continue;
3009 		if (WARN_ON_ONCE(p4d_leaf(*p4d)))
3010 			return -EINVAL;
3011 		if (!p4d_none(*p4d) && WARN_ON_ONCE(p4d_bad(*p4d))) {
3012 			if (!create)
3013 				continue;
3014 			p4d_clear_bad(p4d);
3015 		}
3016 		err = apply_to_pud_range(mm, p4d, addr, next,
3017 					 fn, data, create, mask);
3018 		if (err)
3019 			break;
3020 	} while (p4d++, addr = next, addr != end);
3021 
3022 	return err;
3023 }
3024 
3025 static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
3026 				 unsigned long size, pte_fn_t fn,
3027 				 void *data, bool create)
3028 {
3029 	pgd_t *pgd;
3030 	unsigned long start = addr, next;
3031 	unsigned long end = addr + size;
3032 	pgtbl_mod_mask mask = 0;
3033 	int err = 0;
3034 
3035 	if (WARN_ON(addr >= end))
3036 		return -EINVAL;
3037 
3038 	pgd = pgd_offset(mm, addr);
3039 	do {
3040 		next = pgd_addr_end(addr, end);
3041 		if (pgd_none(*pgd) && !create)
3042 			continue;
3043 		if (WARN_ON_ONCE(pgd_leaf(*pgd)))
3044 			return -EINVAL;
3045 		if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) {
3046 			if (!create)
3047 				continue;
3048 			pgd_clear_bad(pgd);
3049 		}
3050 		err = apply_to_p4d_range(mm, pgd, addr, next,
3051 					 fn, data, create, &mask);
3052 		if (err)
3053 			break;
3054 	} while (pgd++, addr = next, addr != end);
3055 
3056 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
3057 		arch_sync_kernel_mappings(start, start + size);
3058 
3059 	return err;
3060 }
3061 
3062 /*
3063  * Scan a region of virtual memory, filling in page tables as necessary
3064  * and calling a provided function on each leaf page table.
3065  */
3066 int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
3067 			unsigned long size, pte_fn_t fn, void *data)
3068 {
3069 	return __apply_to_page_range(mm, addr, size, fn, data, true);
3070 }
3071 EXPORT_SYMBOL_GPL(apply_to_page_range);
3072 
3073 /*
3074  * Scan a region of virtual memory, calling a provided function on
3075  * each leaf page table where it exists.
3076  *
3077  * Unlike apply_to_page_range, this does _not_ fill in page tables
3078  * where they are absent.
3079  */
3080 int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
3081 				 unsigned long size, pte_fn_t fn, void *data)
3082 {
3083 	return __apply_to_page_range(mm, addr, size, fn, data, false);
3084 }
3085 
3086 /*
3087  * handle_pte_fault chooses page fault handler according to an entry which was
3088  * read non-atomically.  Before making any commitment, on those architectures
3089  * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
3090  * parts, do_swap_page must check under lock before unmapping the pte and
3091  * proceeding (but do_wp_page is only called after already making such a check;
3092  * and do_anonymous_page can safely check later on).
3093  */
3094 static inline int pte_unmap_same(struct vm_fault *vmf)
3095 {
3096 	int same = 1;
3097 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION)
3098 	if (sizeof(pte_t) > sizeof(unsigned long)) {
3099 		spin_lock(vmf->ptl);
3100 		same = pte_same(ptep_get(vmf->pte), vmf->orig_pte);
3101 		spin_unlock(vmf->ptl);
3102 	}
3103 #endif
3104 	pte_unmap(vmf->pte);
3105 	vmf->pte = NULL;
3106 	return same;
3107 }
3108 
3109 /*
3110  * Return:
3111  *	0:		copied succeeded
3112  *	-EHWPOISON:	copy failed due to hwpoison in source page
3113  *	-EAGAIN:	copied failed (some other reason)
3114  */
3115 static inline int __wp_page_copy_user(struct page *dst, struct page *src,
3116 				      struct vm_fault *vmf)
3117 {
3118 	int ret;
3119 	void *kaddr;
3120 	void __user *uaddr;
3121 	struct vm_area_struct *vma = vmf->vma;
3122 	struct mm_struct *mm = vma->vm_mm;
3123 	unsigned long addr = vmf->address;
3124 
3125 	if (likely(src)) {
3126 		if (copy_mc_user_highpage(dst, src, addr, vma))
3127 			return -EHWPOISON;
3128 		return 0;
3129 	}
3130 
3131 	/*
3132 	 * If the source page was a PFN mapping, we don't have
3133 	 * a "struct page" for it. We do a best-effort copy by
3134 	 * just copying from the original user address. If that
3135 	 * fails, we just zero-fill it. Live with it.
3136 	 */
3137 	kaddr = kmap_local_page(dst);
3138 	pagefault_disable();
3139 	uaddr = (void __user *)(addr & PAGE_MASK);
3140 
3141 	/*
3142 	 * On architectures with software "accessed" bits, we would
3143 	 * take a double page fault, so mark it accessed here.
3144 	 */
3145 	vmf->pte = NULL;
3146 	if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) {
3147 		pte_t entry;
3148 
3149 		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
3150 		if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
3151 			/*
3152 			 * Other thread has already handled the fault
3153 			 * and update local tlb only
3154 			 */
3155 			if (vmf->pte)
3156 				update_mmu_tlb(vma, addr, vmf->pte);
3157 			ret = -EAGAIN;
3158 			goto pte_unlock;
3159 		}
3160 
3161 		entry = pte_mkyoung(vmf->orig_pte);
3162 		if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
3163 			update_mmu_cache_range(vmf, vma, addr, vmf->pte, 1);
3164 	}
3165 
3166 	/*
3167 	 * This really shouldn't fail, because the page is there
3168 	 * in the page tables. But it might just be unreadable,
3169 	 * in which case we just give up and fill the result with
3170 	 * zeroes.
3171 	 */
3172 	if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
3173 		if (vmf->pte)
3174 			goto warn;
3175 
3176 		/* Re-validate under PTL if the page is still mapped */
3177 		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
3178 		if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
3179 			/* The PTE changed under us, update local tlb */
3180 			if (vmf->pte)
3181 				update_mmu_tlb(vma, addr, vmf->pte);
3182 			ret = -EAGAIN;
3183 			goto pte_unlock;
3184 		}
3185 
3186 		/*
3187 		 * The same page can be mapped back since last copy attempt.
3188 		 * Try to copy again under PTL.
3189 		 */
3190 		if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
3191 			/*
3192 			 * Give a warn in case there can be some obscure
3193 			 * use-case
3194 			 */
3195 warn:
3196 			WARN_ON_ONCE(1);
3197 			clear_page(kaddr);
3198 		}
3199 	}
3200 
3201 	ret = 0;
3202 
3203 pte_unlock:
3204 	if (vmf->pte)
3205 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3206 	pagefault_enable();
3207 	kunmap_local(kaddr);
3208 	flush_dcache_page(dst);
3209 
3210 	return ret;
3211 }
3212 
3213 static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
3214 {
3215 	struct file *vm_file = vma->vm_file;
3216 
3217 	if (vm_file)
3218 		return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
3219 
3220 	/*
3221 	 * Special mappings (e.g. VDSO) do not have any file so fake
3222 	 * a default GFP_KERNEL for them.
3223 	 */
3224 	return GFP_KERNEL;
3225 }
3226 
3227 /*
3228  * Notify the address space that the page is about to become writable so that
3229  * it can prohibit this or wait for the page to get into an appropriate state.
3230  *
3231  * We do this without the lock held, so that it can sleep if it needs to.
3232  */
3233 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf, struct folio *folio)
3234 {
3235 	vm_fault_t ret;
3236 	unsigned int old_flags = vmf->flags;
3237 
3238 	vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
3239 
3240 	if (vmf->vma->vm_file &&
3241 	    IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
3242 		return VM_FAULT_SIGBUS;
3243 
3244 	ret = vmf->vma->vm_ops->page_mkwrite(vmf);
3245 	/* Restore original flags so that caller is not surprised */
3246 	vmf->flags = old_flags;
3247 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
3248 		return ret;
3249 	if (unlikely(!(ret & VM_FAULT_LOCKED))) {
3250 		folio_lock(folio);
3251 		if (!folio->mapping) {
3252 			folio_unlock(folio);
3253 			return 0; /* retry */
3254 		}
3255 		ret |= VM_FAULT_LOCKED;
3256 	} else
3257 		VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
3258 	return ret;
3259 }
3260 
3261 /*
3262  * Handle dirtying of a page in shared file mapping on a write fault.
3263  *
3264  * The function expects the page to be locked and unlocks it.
3265  */
3266 static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
3267 {
3268 	struct vm_area_struct *vma = vmf->vma;
3269 	struct address_space *mapping;
3270 	struct folio *folio = page_folio(vmf->page);
3271 	bool dirtied;
3272 	bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
3273 
3274 	dirtied = folio_mark_dirty(folio);
3275 	VM_BUG_ON_FOLIO(folio_test_anon(folio), folio);
3276 	/*
3277 	 * Take a local copy of the address_space - folio.mapping may be zeroed
3278 	 * by truncate after folio_unlock().   The address_space itself remains
3279 	 * pinned by vma->vm_file's reference.  We rely on folio_unlock()'s
3280 	 * release semantics to prevent the compiler from undoing this copying.
3281 	 */
3282 	mapping = folio_raw_mapping(folio);
3283 	folio_unlock(folio);
3284 
3285 	if (!page_mkwrite)
3286 		file_update_time(vma->vm_file);
3287 
3288 	/*
3289 	 * Throttle page dirtying rate down to writeback speed.
3290 	 *
3291 	 * mapping may be NULL here because some device drivers do not
3292 	 * set page.mapping but still dirty their pages
3293 	 *
3294 	 * Drop the mmap_lock before waiting on IO, if we can. The file
3295 	 * is pinning the mapping, as per above.
3296 	 */
3297 	if ((dirtied || page_mkwrite) && mapping) {
3298 		struct file *fpin;
3299 
3300 		fpin = maybe_unlock_mmap_for_io(vmf, NULL);
3301 		balance_dirty_pages_ratelimited(mapping);
3302 		if (fpin) {
3303 			fput(fpin);
3304 			return VM_FAULT_COMPLETED;
3305 		}
3306 	}
3307 
3308 	return 0;
3309 }
3310 
3311 /*
3312  * Handle write page faults for pages that can be reused in the current vma
3313  *
3314  * This can happen either due to the mapping being with the VM_SHARED flag,
3315  * or due to us being the last reference standing to the page. In either
3316  * case, all we need to do here is to mark the page as writable and update
3317  * any related book-keeping.
3318  */
3319 static inline void wp_page_reuse(struct vm_fault *vmf, struct folio *folio)
3320 	__releases(vmf->ptl)
3321 {
3322 	struct vm_area_struct *vma = vmf->vma;
3323 	pte_t entry;
3324 
3325 	VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE));
3326 	VM_WARN_ON(is_zero_pfn(pte_pfn(vmf->orig_pte)));
3327 
3328 	if (folio) {
3329 		VM_BUG_ON(folio_test_anon(folio) &&
3330 			  !PageAnonExclusive(vmf->page));
3331 		/*
3332 		 * Clear the folio's cpupid information as the existing
3333 		 * information potentially belongs to a now completely
3334 		 * unrelated process.
3335 		 */
3336 		folio_xchg_last_cpupid(folio, (1 << LAST_CPUPID_SHIFT) - 1);
3337 	}
3338 
3339 	flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3340 	entry = pte_mkyoung(vmf->orig_pte);
3341 	entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3342 	if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
3343 		update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
3344 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3345 	count_vm_event(PGREUSE);
3346 }
3347 
3348 /*
3349  * We could add a bitflag somewhere, but for now, we know that all
3350  * vm_ops that have a ->map_pages have been audited and don't need
3351  * the mmap_lock to be held.
3352  */
3353 static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf)
3354 {
3355 	struct vm_area_struct *vma = vmf->vma;
3356 
3357 	if (vma->vm_ops->map_pages || !(vmf->flags & FAULT_FLAG_VMA_LOCK))
3358 		return 0;
3359 	vma_end_read(vma);
3360 	return VM_FAULT_RETRY;
3361 }
3362 
3363 /**
3364  * __vmf_anon_prepare - Prepare to handle an anonymous fault.
3365  * @vmf: The vm_fault descriptor passed from the fault handler.
3366  *
3367  * When preparing to insert an anonymous page into a VMA from a
3368  * fault handler, call this function rather than anon_vma_prepare().
3369  * If this vma does not already have an associated anon_vma and we are
3370  * only protected by the per-VMA lock, the caller must retry with the
3371  * mmap_lock held.  __anon_vma_prepare() will look at adjacent VMAs to
3372  * determine if this VMA can share its anon_vma, and that's not safe to
3373  * do with only the per-VMA lock held for this VMA.
3374  *
3375  * Return: 0 if fault handling can proceed.  Any other value should be
3376  * returned to the caller.
3377  */
3378 vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf)
3379 {
3380 	struct vm_area_struct *vma = vmf->vma;
3381 	vm_fault_t ret = 0;
3382 
3383 	if (likely(vma->anon_vma))
3384 		return 0;
3385 	if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
3386 		if (!mmap_read_trylock(vma->vm_mm))
3387 			return VM_FAULT_RETRY;
3388 	}
3389 	if (__anon_vma_prepare(vma))
3390 		ret = VM_FAULT_OOM;
3391 	if (vmf->flags & FAULT_FLAG_VMA_LOCK)
3392 		mmap_read_unlock(vma->vm_mm);
3393 	return ret;
3394 }
3395 
3396 /*
3397  * Handle the case of a page which we actually need to copy to a new page,
3398  * either due to COW or unsharing.
3399  *
3400  * Called with mmap_lock locked and the old page referenced, but
3401  * without the ptl held.
3402  *
3403  * High level logic flow:
3404  *
3405  * - Allocate a page, copy the content of the old page to the new one.
3406  * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
3407  * - Take the PTL. If the pte changed, bail out and release the allocated page
3408  * - If the pte is still the way we remember it, update the page table and all
3409  *   relevant references. This includes dropping the reference the page-table
3410  *   held to the old page, as well as updating the rmap.
3411  * - In any case, unlock the PTL and drop the reference we took to the old page.
3412  */
3413 static vm_fault_t wp_page_copy(struct vm_fault *vmf)
3414 {
3415 	const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
3416 	struct vm_area_struct *vma = vmf->vma;
3417 	struct mm_struct *mm = vma->vm_mm;
3418 	struct folio *old_folio = NULL;
3419 	struct folio *new_folio = NULL;
3420 	pte_t entry;
3421 	int page_copied = 0;
3422 	struct mmu_notifier_range range;
3423 	vm_fault_t ret;
3424 	bool pfn_is_zero;
3425 
3426 	delayacct_wpcopy_start();
3427 
3428 	if (vmf->page)
3429 		old_folio = page_folio(vmf->page);
3430 	ret = vmf_anon_prepare(vmf);
3431 	if (unlikely(ret))
3432 		goto out;
3433 
3434 	pfn_is_zero = is_zero_pfn(pte_pfn(vmf->orig_pte));
3435 	new_folio = folio_prealloc(mm, vma, vmf->address, pfn_is_zero);
3436 	if (!new_folio)
3437 		goto oom;
3438 
3439 	if (!pfn_is_zero) {
3440 		int err;
3441 
3442 		err = __wp_page_copy_user(&new_folio->page, vmf->page, vmf);
3443 		if (err) {
3444 			/*
3445 			 * COW failed, if the fault was solved by other,
3446 			 * it's fine. If not, userspace would re-fault on
3447 			 * the same address and we will handle the fault
3448 			 * from the second attempt.
3449 			 * The -EHWPOISON case will not be retried.
3450 			 */
3451 			folio_put(new_folio);
3452 			if (old_folio)
3453 				folio_put(old_folio);
3454 
3455 			delayacct_wpcopy_end();
3456 			return err == -EHWPOISON ? VM_FAULT_HWPOISON : 0;
3457 		}
3458 		kmsan_copy_page_meta(&new_folio->page, vmf->page);
3459 	}
3460 
3461 	__folio_mark_uptodate(new_folio);
3462 
3463 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
3464 				vmf->address & PAGE_MASK,
3465 				(vmf->address & PAGE_MASK) + PAGE_SIZE);
3466 	mmu_notifier_invalidate_range_start(&range);
3467 
3468 	/*
3469 	 * Re-check the pte - we dropped the lock
3470 	 */
3471 	vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
3472 	if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
3473 		if (old_folio) {
3474 			if (!folio_test_anon(old_folio)) {
3475 				dec_mm_counter(mm, mm_counter_file(old_folio));
3476 				inc_mm_counter(mm, MM_ANONPAGES);
3477 			}
3478 		} else {
3479 			ksm_might_unmap_zero_page(mm, vmf->orig_pte);
3480 			inc_mm_counter(mm, MM_ANONPAGES);
3481 		}
3482 		flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3483 		entry = mk_pte(&new_folio->page, vma->vm_page_prot);
3484 		entry = pte_sw_mkyoung(entry);
3485 		if (unlikely(unshare)) {
3486 			if (pte_soft_dirty(vmf->orig_pte))
3487 				entry = pte_mksoft_dirty(entry);
3488 			if (pte_uffd_wp(vmf->orig_pte))
3489 				entry = pte_mkuffd_wp(entry);
3490 		} else {
3491 			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3492 		}
3493 
3494 		/*
3495 		 * Clear the pte entry and flush it first, before updating the
3496 		 * pte with the new entry, to keep TLBs on different CPUs in
3497 		 * sync. This code used to set the new PTE then flush TLBs, but
3498 		 * that left a window where the new PTE could be loaded into
3499 		 * some TLBs while the old PTE remains in others.
3500 		 */
3501 		ptep_clear_flush(vma, vmf->address, vmf->pte);
3502 		folio_add_new_anon_rmap(new_folio, vma, vmf->address, RMAP_EXCLUSIVE);
3503 		folio_add_lru_vma(new_folio, vma);
3504 		BUG_ON(unshare && pte_write(entry));
3505 		set_pte_at(mm, vmf->address, vmf->pte, entry);
3506 		update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
3507 		if (old_folio) {
3508 			/*
3509 			 * Only after switching the pte to the new page may
3510 			 * we remove the mapcount here. Otherwise another
3511 			 * process may come and find the rmap count decremented
3512 			 * before the pte is switched to the new page, and
3513 			 * "reuse" the old page writing into it while our pte
3514 			 * here still points into it and can be read by other
3515 			 * threads.
3516 			 *
3517 			 * The critical issue is to order this
3518 			 * folio_remove_rmap_pte() with the ptp_clear_flush
3519 			 * above. Those stores are ordered by (if nothing else,)
3520 			 * the barrier present in the atomic_add_negative
3521 			 * in folio_remove_rmap_pte();
3522 			 *
3523 			 * Then the TLB flush in ptep_clear_flush ensures that
3524 			 * no process can access the old page before the
3525 			 * decremented mapcount is visible. And the old page
3526 			 * cannot be reused until after the decremented
3527 			 * mapcount is visible. So transitively, TLBs to
3528 			 * old page will be flushed before it can be reused.
3529 			 */
3530 			folio_remove_rmap_pte(old_folio, vmf->page, vma);
3531 		}
3532 
3533 		/* Free the old page.. */
3534 		new_folio = old_folio;
3535 		page_copied = 1;
3536 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3537 	} else if (vmf->pte) {
3538 		update_mmu_tlb(vma, vmf->address, vmf->pte);
3539 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3540 	}
3541 
3542 	mmu_notifier_invalidate_range_end(&range);
3543 
3544 	if (new_folio)
3545 		folio_put(new_folio);
3546 	if (old_folio) {
3547 		if (page_copied)
3548 			free_swap_cache(old_folio);
3549 		folio_put(old_folio);
3550 	}
3551 
3552 	delayacct_wpcopy_end();
3553 	return 0;
3554 oom:
3555 	ret = VM_FAULT_OOM;
3556 out:
3557 	if (old_folio)
3558 		folio_put(old_folio);
3559 
3560 	delayacct_wpcopy_end();
3561 	return ret;
3562 }
3563 
3564 /**
3565  * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
3566  *			  writeable once the page is prepared
3567  *
3568  * @vmf: structure describing the fault
3569  * @folio: the folio of vmf->page
3570  *
3571  * This function handles all that is needed to finish a write page fault in a
3572  * shared mapping due to PTE being read-only once the mapped page is prepared.
3573  * It handles locking of PTE and modifying it.
3574  *
3575  * The function expects the page to be locked or other protection against
3576  * concurrent faults / writeback (such as DAX radix tree locks).
3577  *
3578  * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before
3579  * we acquired PTE lock.
3580  */
3581 static vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf, struct folio *folio)
3582 {
3583 	WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
3584 	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
3585 				       &vmf->ptl);
3586 	if (!vmf->pte)
3587 		return VM_FAULT_NOPAGE;
3588 	/*
3589 	 * We might have raced with another page fault while we released the
3590 	 * pte_offset_map_lock.
3591 	 */
3592 	if (!pte_same(ptep_get(vmf->pte), vmf->orig_pte)) {
3593 		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
3594 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3595 		return VM_FAULT_NOPAGE;
3596 	}
3597 	wp_page_reuse(vmf, folio);
3598 	return 0;
3599 }
3600 
3601 /*
3602  * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
3603  * mapping
3604  */
3605 static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
3606 {
3607 	struct vm_area_struct *vma = vmf->vma;
3608 
3609 	if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
3610 		vm_fault_t ret;
3611 
3612 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3613 		ret = vmf_can_call_fault(vmf);
3614 		if (ret)
3615 			return ret;
3616 
3617 		vmf->flags |= FAULT_FLAG_MKWRITE;
3618 		ret = vma->vm_ops->pfn_mkwrite(vmf);
3619 		if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
3620 			return ret;
3621 		return finish_mkwrite_fault(vmf, NULL);
3622 	}
3623 	wp_page_reuse(vmf, NULL);
3624 	return 0;
3625 }
3626 
3627 static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio)
3628 	__releases(vmf->ptl)
3629 {
3630 	struct vm_area_struct *vma = vmf->vma;
3631 	vm_fault_t ret = 0;
3632 
3633 	folio_get(folio);
3634 
3635 	if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
3636 		vm_fault_t tmp;
3637 
3638 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3639 		tmp = vmf_can_call_fault(vmf);
3640 		if (tmp) {
3641 			folio_put(folio);
3642 			return tmp;
3643 		}
3644 
3645 		tmp = do_page_mkwrite(vmf, folio);
3646 		if (unlikely(!tmp || (tmp &
3647 				      (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
3648 			folio_put(folio);
3649 			return tmp;
3650 		}
3651 		tmp = finish_mkwrite_fault(vmf, folio);
3652 		if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
3653 			folio_unlock(folio);
3654 			folio_put(folio);
3655 			return tmp;
3656 		}
3657 	} else {
3658 		wp_page_reuse(vmf, folio);
3659 		folio_lock(folio);
3660 	}
3661 	ret |= fault_dirty_shared_page(vmf);
3662 	folio_put(folio);
3663 
3664 	return ret;
3665 }
3666 
3667 static bool wp_can_reuse_anon_folio(struct folio *folio,
3668 				    struct vm_area_struct *vma)
3669 {
3670 	/*
3671 	 * We could currently only reuse a subpage of a large folio if no
3672 	 * other subpages of the large folios are still mapped. However,
3673 	 * let's just consistently not reuse subpages even if we could
3674 	 * reuse in that scenario, and give back a large folio a bit
3675 	 * sooner.
3676 	 */
3677 	if (folio_test_large(folio))
3678 		return false;
3679 
3680 	/*
3681 	 * We have to verify under folio lock: these early checks are
3682 	 * just an optimization to avoid locking the folio and freeing
3683 	 * the swapcache if there is little hope that we can reuse.
3684 	 *
3685 	 * KSM doesn't necessarily raise the folio refcount.
3686 	 */
3687 	if (folio_test_ksm(folio) || folio_ref_count(folio) > 3)
3688 		return false;
3689 	if (!folio_test_lru(folio))
3690 		/*
3691 		 * We cannot easily detect+handle references from
3692 		 * remote LRU caches or references to LRU folios.
3693 		 */
3694 		lru_add_drain();
3695 	if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio))
3696 		return false;
3697 	if (!folio_trylock(folio))
3698 		return false;
3699 	if (folio_test_swapcache(folio))
3700 		folio_free_swap(folio);
3701 	if (folio_test_ksm(folio) || folio_ref_count(folio) != 1) {
3702 		folio_unlock(folio);
3703 		return false;
3704 	}
3705 	/*
3706 	 * Ok, we've got the only folio reference from our mapping
3707 	 * and the folio is locked, it's dark out, and we're wearing
3708 	 * sunglasses. Hit it.
3709 	 */
3710 	folio_move_anon_rmap(folio, vma);
3711 	folio_unlock(folio);
3712 	return true;
3713 }
3714 
3715 /*
3716  * This routine handles present pages, when
3717  * * users try to write to a shared page (FAULT_FLAG_WRITE)
3718  * * GUP wants to take a R/O pin on a possibly shared anonymous page
3719  *   (FAULT_FLAG_UNSHARE)
3720  *
3721  * It is done by copying the page to a new address and decrementing the
3722  * shared-page counter for the old page.
3723  *
3724  * Note that this routine assumes that the protection checks have been
3725  * done by the caller (the low-level page fault routine in most cases).
3726  * Thus, with FAULT_FLAG_WRITE, we can safely just mark it writable once we've
3727  * done any necessary COW.
3728  *
3729  * In case of FAULT_FLAG_WRITE, we also mark the page dirty at this point even
3730  * though the page will change only once the write actually happens. This
3731  * avoids a few races, and potentially makes it more efficient.
3732  *
3733  * We enter with non-exclusive mmap_lock (to exclude vma changes,
3734  * but allow concurrent faults), with pte both mapped and locked.
3735  * We return with mmap_lock still held, but pte unmapped and unlocked.
3736  */
3737 static vm_fault_t do_wp_page(struct vm_fault *vmf)
3738 	__releases(vmf->ptl)
3739 {
3740 	const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
3741 	struct vm_area_struct *vma = vmf->vma;
3742 	struct folio *folio = NULL;
3743 	pte_t pte;
3744 
3745 	if (likely(!unshare)) {
3746 		if (userfaultfd_pte_wp(vma, ptep_get(vmf->pte))) {
3747 			if (!userfaultfd_wp_async(vma)) {
3748 				pte_unmap_unlock(vmf->pte, vmf->ptl);
3749 				return handle_userfault(vmf, VM_UFFD_WP);
3750 			}
3751 
3752 			/*
3753 			 * Nothing needed (cache flush, TLB invalidations,
3754 			 * etc.) because we're only removing the uffd-wp bit,
3755 			 * which is completely invisible to the user.
3756 			 */
3757 			pte = pte_clear_uffd_wp(ptep_get(vmf->pte));
3758 
3759 			set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
3760 			/*
3761 			 * Update this to be prepared for following up CoW
3762 			 * handling
3763 			 */
3764 			vmf->orig_pte = pte;
3765 		}
3766 
3767 		/*
3768 		 * Userfaultfd write-protect can defer flushes. Ensure the TLB
3769 		 * is flushed in this case before copying.
3770 		 */
3771 		if (unlikely(userfaultfd_wp(vmf->vma) &&
3772 			     mm_tlb_flush_pending(vmf->vma->vm_mm)))
3773 			flush_tlb_page(vmf->vma, vmf->address);
3774 	}
3775 
3776 	vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
3777 
3778 	if (vmf->page)
3779 		folio = page_folio(vmf->page);
3780 
3781 	/*
3782 	 * Shared mapping: we are guaranteed to have VM_WRITE and
3783 	 * FAULT_FLAG_WRITE set at this point.
3784 	 */
3785 	if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
3786 		/*
3787 		 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
3788 		 * VM_PFNMAP VMA.
3789 		 *
3790 		 * We should not cow pages in a shared writeable mapping.
3791 		 * Just mark the pages writable and/or call ops->pfn_mkwrite.
3792 		 */
3793 		if (!vmf->page)
3794 			return wp_pfn_shared(vmf);
3795 		return wp_page_shared(vmf, folio);
3796 	}
3797 
3798 	/*
3799 	 * Private mapping: create an exclusive anonymous page copy if reuse
3800 	 * is impossible. We might miss VM_WRITE for FOLL_FORCE handling.
3801 	 *
3802 	 * If we encounter a page that is marked exclusive, we must reuse
3803 	 * the page without further checks.
3804 	 */
3805 	if (folio && folio_test_anon(folio) &&
3806 	    (PageAnonExclusive(vmf->page) || wp_can_reuse_anon_folio(folio, vma))) {
3807 		if (!PageAnonExclusive(vmf->page))
3808 			SetPageAnonExclusive(vmf->page);
3809 		if (unlikely(unshare)) {
3810 			pte_unmap_unlock(vmf->pte, vmf->ptl);
3811 			return 0;
3812 		}
3813 		wp_page_reuse(vmf, folio);
3814 		return 0;
3815 	}
3816 	/*
3817 	 * Ok, we need to copy. Oh, well..
3818 	 */
3819 	if (folio)
3820 		folio_get(folio);
3821 
3822 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3823 #ifdef CONFIG_KSM
3824 	if (folio && folio_test_ksm(folio))
3825 		count_vm_event(COW_KSM);
3826 #endif
3827 	return wp_page_copy(vmf);
3828 }
3829 
3830 static void unmap_mapping_range_vma(struct vm_area_struct *vma,
3831 		unsigned long start_addr, unsigned long end_addr,
3832 		struct zap_details *details)
3833 {
3834 	zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
3835 }
3836 
3837 static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
3838 					    pgoff_t first_index,
3839 					    pgoff_t last_index,
3840 					    struct zap_details *details)
3841 {
3842 	struct vm_area_struct *vma;
3843 	pgoff_t vba, vea, zba, zea;
3844 
3845 	vma_interval_tree_foreach(vma, root, first_index, last_index) {
3846 		vba = vma->vm_pgoff;
3847 		vea = vba + vma_pages(vma) - 1;
3848 		zba = max(first_index, vba);
3849 		zea = min(last_index, vea);
3850 
3851 		unmap_mapping_range_vma(vma,
3852 			((zba - vba) << PAGE_SHIFT) + vma->vm_start,
3853 			((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
3854 				details);
3855 	}
3856 }
3857 
3858 /**
3859  * unmap_mapping_folio() - Unmap single folio from processes.
3860  * @folio: The locked folio to be unmapped.
3861  *
3862  * Unmap this folio from any userspace process which still has it mmaped.
3863  * Typically, for efficiency, the range of nearby pages has already been
3864  * unmapped by unmap_mapping_pages() or unmap_mapping_range().  But once
3865  * truncation or invalidation holds the lock on a folio, it may find that
3866  * the page has been remapped again: and then uses unmap_mapping_folio()
3867  * to unmap it finally.
3868  */
3869 void unmap_mapping_folio(struct folio *folio)
3870 {
3871 	struct address_space *mapping = folio->mapping;
3872 	struct zap_details details = { };
3873 	pgoff_t	first_index;
3874 	pgoff_t	last_index;
3875 
3876 	VM_BUG_ON(!folio_test_locked(folio));
3877 
3878 	first_index = folio->index;
3879 	last_index = folio_next_index(folio) - 1;
3880 
3881 	details.even_cows = false;
3882 	details.single_folio = folio;
3883 	details.zap_flags = ZAP_FLAG_DROP_MARKER;
3884 
3885 	i_mmap_lock_read(mapping);
3886 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3887 		unmap_mapping_range_tree(&mapping->i_mmap, first_index,
3888 					 last_index, &details);
3889 	i_mmap_unlock_read(mapping);
3890 }
3891 
3892 /**
3893  * unmap_mapping_pages() - Unmap pages from processes.
3894  * @mapping: The address space containing pages to be unmapped.
3895  * @start: Index of first page to be unmapped.
3896  * @nr: Number of pages to be unmapped.  0 to unmap to end of file.
3897  * @even_cows: Whether to unmap even private COWed pages.
3898  *
3899  * Unmap the pages in this address space from any userspace process which
3900  * has them mmaped.  Generally, you want to remove COWed pages as well when
3901  * a file is being truncated, but not when invalidating pages from the page
3902  * cache.
3903  */
3904 void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
3905 		pgoff_t nr, bool even_cows)
3906 {
3907 	struct zap_details details = { };
3908 	pgoff_t	first_index = start;
3909 	pgoff_t	last_index = start + nr - 1;
3910 
3911 	details.even_cows = even_cows;
3912 	if (last_index < first_index)
3913 		last_index = ULONG_MAX;
3914 
3915 	i_mmap_lock_read(mapping);
3916 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3917 		unmap_mapping_range_tree(&mapping->i_mmap, first_index,
3918 					 last_index, &details);
3919 	i_mmap_unlock_read(mapping);
3920 }
3921 EXPORT_SYMBOL_GPL(unmap_mapping_pages);
3922 
3923 /**
3924  * unmap_mapping_range - unmap the portion of all mmaps in the specified
3925  * address_space corresponding to the specified byte range in the underlying
3926  * file.
3927  *
3928  * @mapping: the address space containing mmaps to be unmapped.
3929  * @holebegin: byte in first page to unmap, relative to the start of
3930  * the underlying file.  This will be rounded down to a PAGE_SIZE
3931  * boundary.  Note that this is different from truncate_pagecache(), which
3932  * must keep the partial page.  In contrast, we must get rid of
3933  * partial pages.
3934  * @holelen: size of prospective hole in bytes.  This will be rounded
3935  * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
3936  * end of the file.
3937  * @even_cows: 1 when truncating a file, unmap even private COWed pages;
3938  * but 0 when invalidating pagecache, don't throw away private data.
3939  */
3940 void unmap_mapping_range(struct address_space *mapping,
3941 		loff_t const holebegin, loff_t const holelen, int even_cows)
3942 {
3943 	pgoff_t hba = (pgoff_t)(holebegin) >> PAGE_SHIFT;
3944 	pgoff_t hlen = ((pgoff_t)(holelen) + PAGE_SIZE - 1) >> PAGE_SHIFT;
3945 
3946 	/* Check for overflow. */
3947 	if (sizeof(holelen) > sizeof(hlen)) {
3948 		long long holeend =
3949 			(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3950 		if (holeend & ~(long long)ULONG_MAX)
3951 			hlen = ULONG_MAX - hba + 1;
3952 	}
3953 
3954 	unmap_mapping_pages(mapping, hba, hlen, even_cows);
3955 }
3956 EXPORT_SYMBOL(unmap_mapping_range);
3957 
3958 /*
3959  * Restore a potential device exclusive pte to a working pte entry
3960  */
3961 static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
3962 {
3963 	struct folio *folio = page_folio(vmf->page);
3964 	struct vm_area_struct *vma = vmf->vma;
3965 	struct mmu_notifier_range range;
3966 	vm_fault_t ret;
3967 
3968 	/*
3969 	 * We need a reference to lock the folio because we don't hold
3970 	 * the PTL so a racing thread can remove the device-exclusive
3971 	 * entry and unmap it. If the folio is free the entry must
3972 	 * have been removed already. If it happens to have already
3973 	 * been re-allocated after being freed all we do is lock and
3974 	 * unlock it.
3975 	 */
3976 	if (!folio_try_get(folio))
3977 		return 0;
3978 
3979 	ret = folio_lock_or_retry(folio, vmf);
3980 	if (ret) {
3981 		folio_put(folio);
3982 		return ret;
3983 	}
3984 	mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0,
3985 				vma->vm_mm, vmf->address & PAGE_MASK,
3986 				(vmf->address & PAGE_MASK) + PAGE_SIZE, NULL);
3987 	mmu_notifier_invalidate_range_start(&range);
3988 
3989 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3990 				&vmf->ptl);
3991 	if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
3992 		restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte);
3993 
3994 	if (vmf->pte)
3995 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3996 	folio_unlock(folio);
3997 	folio_put(folio);
3998 
3999 	mmu_notifier_invalidate_range_end(&range);
4000 	return 0;
4001 }
4002 
4003 static inline bool should_try_to_free_swap(struct folio *folio,
4004 					   struct vm_area_struct *vma,
4005 					   unsigned int fault_flags)
4006 {
4007 	if (!folio_test_swapcache(folio))
4008 		return false;
4009 	if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) ||
4010 	    folio_test_mlocked(folio))
4011 		return true;
4012 	/*
4013 	 * If we want to map a page that's in the swapcache writable, we
4014 	 * have to detect via the refcount if we're really the exclusive
4015 	 * user. Try freeing the swapcache to get rid of the swapcache
4016 	 * reference only in case it's likely that we'll be the exlusive user.
4017 	 */
4018 	return (fault_flags & FAULT_FLAG_WRITE) && !folio_test_ksm(folio) &&
4019 		folio_ref_count(folio) == (1 + folio_nr_pages(folio));
4020 }
4021 
4022 static vm_fault_t pte_marker_clear(struct vm_fault *vmf)
4023 {
4024 	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
4025 				       vmf->address, &vmf->ptl);
4026 	if (!vmf->pte)
4027 		return 0;
4028 	/*
4029 	 * Be careful so that we will only recover a special uffd-wp pte into a
4030 	 * none pte.  Otherwise it means the pte could have changed, so retry.
4031 	 *
4032 	 * This should also cover the case where e.g. the pte changed
4033 	 * quickly from a PTE_MARKER_UFFD_WP into PTE_MARKER_POISONED.
4034 	 * So is_pte_marker() check is not enough to safely drop the pte.
4035 	 */
4036 	if (pte_same(vmf->orig_pte, ptep_get(vmf->pte)))
4037 		pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte);
4038 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4039 	return 0;
4040 }
4041 
4042 static vm_fault_t do_pte_missing(struct vm_fault *vmf)
4043 {
4044 	if (vma_is_anonymous(vmf->vma))
4045 		return do_anonymous_page(vmf);
4046 	else
4047 		return do_fault(vmf);
4048 }
4049 
4050 /*
4051  * This is actually a page-missing access, but with uffd-wp special pte
4052  * installed.  It means this pte was wr-protected before being unmapped.
4053  */
4054 static vm_fault_t pte_marker_handle_uffd_wp(struct vm_fault *vmf)
4055 {
4056 	/*
4057 	 * Just in case there're leftover special ptes even after the region
4058 	 * got unregistered - we can simply clear them.
4059 	 */
4060 	if (unlikely(!userfaultfd_wp(vmf->vma)))
4061 		return pte_marker_clear(vmf);
4062 
4063 	return do_pte_missing(vmf);
4064 }
4065 
4066 static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
4067 {
4068 	swp_entry_t entry = pte_to_swp_entry(vmf->orig_pte);
4069 	unsigned long marker = pte_marker_get(entry);
4070 
4071 	/*
4072 	 * PTE markers should never be empty.  If anything weird happened,
4073 	 * the best thing to do is to kill the process along with its mm.
4074 	 */
4075 	if (WARN_ON_ONCE(!marker))
4076 		return VM_FAULT_SIGBUS;
4077 
4078 	/* Higher priority than uffd-wp when data corrupted */
4079 	if (marker & PTE_MARKER_POISONED)
4080 		return VM_FAULT_HWPOISON;
4081 
4082 	/* Hitting a guard page is always a fatal condition. */
4083 	if (marker & PTE_MARKER_GUARD)
4084 		return VM_FAULT_SIGSEGV;
4085 
4086 	if (pte_marker_entry_uffd_wp(entry))
4087 		return pte_marker_handle_uffd_wp(vmf);
4088 
4089 	/* This is an unknown pte marker */
4090 	return VM_FAULT_SIGBUS;
4091 }
4092 
4093 static struct folio *__alloc_swap_folio(struct vm_fault *vmf)
4094 {
4095 	struct vm_area_struct *vma = vmf->vma;
4096 	struct folio *folio;
4097 	swp_entry_t entry;
4098 
4099 	folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vmf->address);
4100 	if (!folio)
4101 		return NULL;
4102 
4103 	entry = pte_to_swp_entry(vmf->orig_pte);
4104 	if (mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
4105 					   GFP_KERNEL, entry)) {
4106 		folio_put(folio);
4107 		return NULL;
4108 	}
4109 
4110 	return folio;
4111 }
4112 
4113 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4114 static inline int non_swapcache_batch(swp_entry_t entry, int max_nr)
4115 {
4116 	struct swap_info_struct *si = swp_swap_info(entry);
4117 	pgoff_t offset = swp_offset(entry);
4118 	int i;
4119 
4120 	/*
4121 	 * While allocating a large folio and doing swap_read_folio, which is
4122 	 * the case the being faulted pte doesn't have swapcache. We need to
4123 	 * ensure all PTEs have no cache as well, otherwise, we might go to
4124 	 * swap devices while the content is in swapcache.
4125 	 */
4126 	for (i = 0; i < max_nr; i++) {
4127 		if ((si->swap_map[offset + i] & SWAP_HAS_CACHE))
4128 			return i;
4129 	}
4130 
4131 	return i;
4132 }
4133 
4134 /*
4135  * Check if the PTEs within a range are contiguous swap entries
4136  * and have consistent swapcache, zeromap.
4137  */
4138 static bool can_swapin_thp(struct vm_fault *vmf, pte_t *ptep, int nr_pages)
4139 {
4140 	unsigned long addr;
4141 	swp_entry_t entry;
4142 	int idx;
4143 	pte_t pte;
4144 
4145 	addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE);
4146 	idx = (vmf->address - addr) / PAGE_SIZE;
4147 	pte = ptep_get(ptep);
4148 
4149 	if (!pte_same(pte, pte_move_swp_offset(vmf->orig_pte, -idx)))
4150 		return false;
4151 	entry = pte_to_swp_entry(pte);
4152 	if (swap_pte_batch(ptep, nr_pages, pte) != nr_pages)
4153 		return false;
4154 
4155 	/*
4156 	 * swap_read_folio() can't handle the case a large folio is hybridly
4157 	 * from different backends. And they are likely corner cases. Similar
4158 	 * things might be added once zswap support large folios.
4159 	 */
4160 	if (unlikely(swap_zeromap_batch(entry, nr_pages, NULL) != nr_pages))
4161 		return false;
4162 	if (unlikely(non_swapcache_batch(entry, nr_pages) != nr_pages))
4163 		return false;
4164 
4165 	return true;
4166 }
4167 
4168 static inline unsigned long thp_swap_suitable_orders(pgoff_t swp_offset,
4169 						     unsigned long addr,
4170 						     unsigned long orders)
4171 {
4172 	int order, nr;
4173 
4174 	order = highest_order(orders);
4175 
4176 	/*
4177 	 * To swap in a THP with nr pages, we require that its first swap_offset
4178 	 * is aligned with that number, as it was when the THP was swapped out.
4179 	 * This helps filter out most invalid entries.
4180 	 */
4181 	while (orders) {
4182 		nr = 1 << order;
4183 		if ((addr >> PAGE_SHIFT) % nr == swp_offset % nr)
4184 			break;
4185 		order = next_order(&orders, order);
4186 	}
4187 
4188 	return orders;
4189 }
4190 
4191 static struct folio *alloc_swap_folio(struct vm_fault *vmf)
4192 {
4193 	struct vm_area_struct *vma = vmf->vma;
4194 	unsigned long orders;
4195 	struct folio *folio;
4196 	unsigned long addr;
4197 	swp_entry_t entry;
4198 	spinlock_t *ptl;
4199 	pte_t *pte;
4200 	gfp_t gfp;
4201 	int order;
4202 
4203 	/*
4204 	 * If uffd is active for the vma we need per-page fault fidelity to
4205 	 * maintain the uffd semantics.
4206 	 */
4207 	if (unlikely(userfaultfd_armed(vma)))
4208 		goto fallback;
4209 
4210 	/*
4211 	 * A large swapped out folio could be partially or fully in zswap. We
4212 	 * lack handling for such cases, so fallback to swapping in order-0
4213 	 * folio.
4214 	 */
4215 	if (!zswap_never_enabled())
4216 		goto fallback;
4217 
4218 	entry = pte_to_swp_entry(vmf->orig_pte);
4219 	/*
4220 	 * Get a list of all the (large) orders below PMD_ORDER that are enabled
4221 	 * and suitable for swapping THP.
4222 	 */
4223 	orders = thp_vma_allowable_orders(vma, vma->vm_flags,
4224 			TVA_IN_PF | TVA_ENFORCE_SYSFS, BIT(PMD_ORDER) - 1);
4225 	orders = thp_vma_suitable_orders(vma, vmf->address, orders);
4226 	orders = thp_swap_suitable_orders(swp_offset(entry),
4227 					  vmf->address, orders);
4228 
4229 	if (!orders)
4230 		goto fallback;
4231 
4232 	pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
4233 				  vmf->address & PMD_MASK, &ptl);
4234 	if (unlikely(!pte))
4235 		goto fallback;
4236 
4237 	/*
4238 	 * For do_swap_page, find the highest order where the aligned range is
4239 	 * completely swap entries with contiguous swap offsets.
4240 	 */
4241 	order = highest_order(orders);
4242 	while (orders) {
4243 		addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
4244 		if (can_swapin_thp(vmf, pte + pte_index(addr), 1 << order))
4245 			break;
4246 		order = next_order(&orders, order);
4247 	}
4248 
4249 	pte_unmap_unlock(pte, ptl);
4250 
4251 	/* Try allocating the highest of the remaining orders. */
4252 	gfp = vma_thp_gfp_mask(vma);
4253 	while (orders) {
4254 		addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
4255 		folio = vma_alloc_folio(gfp, order, vma, addr);
4256 		if (folio) {
4257 			if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
4258 							    gfp, entry))
4259 				return folio;
4260 			count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK_CHARGE);
4261 			folio_put(folio);
4262 		}
4263 		count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK);
4264 		order = next_order(&orders, order);
4265 	}
4266 
4267 fallback:
4268 	return __alloc_swap_folio(vmf);
4269 }
4270 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
4271 static struct folio *alloc_swap_folio(struct vm_fault *vmf)
4272 {
4273 	return __alloc_swap_folio(vmf);
4274 }
4275 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4276 
4277 static DECLARE_WAIT_QUEUE_HEAD(swapcache_wq);
4278 
4279 /*
4280  * We enter with non-exclusive mmap_lock (to exclude vma changes,
4281  * but allow concurrent faults), and pte mapped but not yet locked.
4282  * We return with pte unmapped and unlocked.
4283  *
4284  * We return with the mmap_lock locked or unlocked in the same cases
4285  * as does filemap_fault().
4286  */
4287 vm_fault_t do_swap_page(struct vm_fault *vmf)
4288 {
4289 	struct vm_area_struct *vma = vmf->vma;
4290 	struct folio *swapcache, *folio = NULL;
4291 	DECLARE_WAITQUEUE(wait, current);
4292 	struct page *page;
4293 	struct swap_info_struct *si = NULL;
4294 	rmap_t rmap_flags = RMAP_NONE;
4295 	bool need_clear_cache = false;
4296 	bool exclusive = false;
4297 	swp_entry_t entry;
4298 	pte_t pte;
4299 	vm_fault_t ret = 0;
4300 	void *shadow = NULL;
4301 	int nr_pages;
4302 	unsigned long page_idx;
4303 	unsigned long address;
4304 	pte_t *ptep;
4305 
4306 	if (!pte_unmap_same(vmf))
4307 		goto out;
4308 
4309 	entry = pte_to_swp_entry(vmf->orig_pte);
4310 	if (unlikely(non_swap_entry(entry))) {
4311 		if (is_migration_entry(entry)) {
4312 			migration_entry_wait(vma->vm_mm, vmf->pmd,
4313 					     vmf->address);
4314 		} else if (is_device_exclusive_entry(entry)) {
4315 			vmf->page = pfn_swap_entry_to_page(entry);
4316 			ret = remove_device_exclusive_entry(vmf);
4317 		} else if (is_device_private_entry(entry)) {
4318 			if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
4319 				/*
4320 				 * migrate_to_ram is not yet ready to operate
4321 				 * under VMA lock.
4322 				 */
4323 				vma_end_read(vma);
4324 				ret = VM_FAULT_RETRY;
4325 				goto out;
4326 			}
4327 
4328 			vmf->page = pfn_swap_entry_to_page(entry);
4329 			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4330 					vmf->address, &vmf->ptl);
4331 			if (unlikely(!vmf->pte ||
4332 				     !pte_same(ptep_get(vmf->pte),
4333 							vmf->orig_pte)))
4334 				goto unlock;
4335 
4336 			/*
4337 			 * Get a page reference while we know the page can't be
4338 			 * freed.
4339 			 */
4340 			get_page(vmf->page);
4341 			pte_unmap_unlock(vmf->pte, vmf->ptl);
4342 			ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
4343 			put_page(vmf->page);
4344 		} else if (is_hwpoison_entry(entry)) {
4345 			ret = VM_FAULT_HWPOISON;
4346 		} else if (is_pte_marker_entry(entry)) {
4347 			ret = handle_pte_marker(vmf);
4348 		} else {
4349 			print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
4350 			ret = VM_FAULT_SIGBUS;
4351 		}
4352 		goto out;
4353 	}
4354 
4355 	/* Prevent swapoff from happening to us. */
4356 	si = get_swap_device(entry);
4357 	if (unlikely(!si))
4358 		goto out;
4359 
4360 	folio = swap_cache_get_folio(entry, vma, vmf->address);
4361 	if (folio)
4362 		page = folio_file_page(folio, swp_offset(entry));
4363 	swapcache = folio;
4364 
4365 	if (!folio) {
4366 		if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
4367 		    __swap_count(entry) == 1) {
4368 			/* skip swapcache */
4369 			folio = alloc_swap_folio(vmf);
4370 			if (folio) {
4371 				__folio_set_locked(folio);
4372 				__folio_set_swapbacked(folio);
4373 
4374 				nr_pages = folio_nr_pages(folio);
4375 				if (folio_test_large(folio))
4376 					entry.val = ALIGN_DOWN(entry.val, nr_pages);
4377 				/*
4378 				 * Prevent parallel swapin from proceeding with
4379 				 * the cache flag. Otherwise, another thread
4380 				 * may finish swapin first, free the entry, and
4381 				 * swapout reusing the same entry. It's
4382 				 * undetectable as pte_same() returns true due
4383 				 * to entry reuse.
4384 				 */
4385 				if (swapcache_prepare(entry, nr_pages)) {
4386 					/*
4387 					 * Relax a bit to prevent rapid
4388 					 * repeated page faults.
4389 					 */
4390 					add_wait_queue(&swapcache_wq, &wait);
4391 					schedule_timeout_uninterruptible(1);
4392 					remove_wait_queue(&swapcache_wq, &wait);
4393 					goto out_page;
4394 				}
4395 				need_clear_cache = true;
4396 
4397 				mem_cgroup_swapin_uncharge_swap(entry, nr_pages);
4398 
4399 				shadow = get_shadow_from_swap_cache(entry);
4400 				if (shadow)
4401 					workingset_refault(folio, shadow);
4402 
4403 				folio_add_lru(folio);
4404 
4405 				/* To provide entry to swap_read_folio() */
4406 				folio->swap = entry;
4407 				swap_read_folio(folio, NULL);
4408 				folio->private = NULL;
4409 			}
4410 		} else {
4411 			folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
4412 						vmf);
4413 			swapcache = folio;
4414 		}
4415 
4416 		if (!folio) {
4417 			/*
4418 			 * Back out if somebody else faulted in this pte
4419 			 * while we released the pte lock.
4420 			 */
4421 			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4422 					vmf->address, &vmf->ptl);
4423 			if (likely(vmf->pte &&
4424 				   pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
4425 				ret = VM_FAULT_OOM;
4426 			goto unlock;
4427 		}
4428 
4429 		/* Had to read the page from swap area: Major fault */
4430 		ret = VM_FAULT_MAJOR;
4431 		count_vm_event(PGMAJFAULT);
4432 		count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
4433 		page = folio_file_page(folio, swp_offset(entry));
4434 	} else if (PageHWPoison(page)) {
4435 		/*
4436 		 * hwpoisoned dirty swapcache pages are kept for killing
4437 		 * owner processes (which may be unknown at hwpoison time)
4438 		 */
4439 		ret = VM_FAULT_HWPOISON;
4440 		goto out_release;
4441 	}
4442 
4443 	ret |= folio_lock_or_retry(folio, vmf);
4444 	if (ret & VM_FAULT_RETRY)
4445 		goto out_release;
4446 
4447 	if (swapcache) {
4448 		/*
4449 		 * Make sure folio_free_swap() or swapoff did not release the
4450 		 * swapcache from under us.  The page pin, and pte_same test
4451 		 * below, are not enough to exclude that.  Even if it is still
4452 		 * swapcache, we need to check that the page's swap has not
4453 		 * changed.
4454 		 */
4455 		if (unlikely(!folio_test_swapcache(folio) ||
4456 			     page_swap_entry(page).val != entry.val))
4457 			goto out_page;
4458 
4459 		/*
4460 		 * KSM sometimes has to copy on read faults, for example, if
4461 		 * page->index of !PageKSM() pages would be nonlinear inside the
4462 		 * anon VMA -- PageKSM() is lost on actual swapout.
4463 		 */
4464 		folio = ksm_might_need_to_copy(folio, vma, vmf->address);
4465 		if (unlikely(!folio)) {
4466 			ret = VM_FAULT_OOM;
4467 			folio = swapcache;
4468 			goto out_page;
4469 		} else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
4470 			ret = VM_FAULT_HWPOISON;
4471 			folio = swapcache;
4472 			goto out_page;
4473 		}
4474 		if (folio != swapcache)
4475 			page = folio_page(folio, 0);
4476 
4477 		/*
4478 		 * If we want to map a page that's in the swapcache writable, we
4479 		 * have to detect via the refcount if we're really the exclusive
4480 		 * owner. Try removing the extra reference from the local LRU
4481 		 * caches if required.
4482 		 */
4483 		if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache &&
4484 		    !folio_test_ksm(folio) && !folio_test_lru(folio))
4485 			lru_add_drain();
4486 	}
4487 
4488 	folio_throttle_swaprate(folio, GFP_KERNEL);
4489 
4490 	/*
4491 	 * Back out if somebody else already faulted in this pte.
4492 	 */
4493 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
4494 			&vmf->ptl);
4495 	if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
4496 		goto out_nomap;
4497 
4498 	if (unlikely(!folio_test_uptodate(folio))) {
4499 		ret = VM_FAULT_SIGBUS;
4500 		goto out_nomap;
4501 	}
4502 
4503 	/* allocated large folios for SWP_SYNCHRONOUS_IO */
4504 	if (folio_test_large(folio) && !folio_test_swapcache(folio)) {
4505 		unsigned long nr = folio_nr_pages(folio);
4506 		unsigned long folio_start = ALIGN_DOWN(vmf->address, nr * PAGE_SIZE);
4507 		unsigned long idx = (vmf->address - folio_start) / PAGE_SIZE;
4508 		pte_t *folio_ptep = vmf->pte - idx;
4509 		pte_t folio_pte = ptep_get(folio_ptep);
4510 
4511 		if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
4512 		    swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
4513 			goto out_nomap;
4514 
4515 		page_idx = idx;
4516 		address = folio_start;
4517 		ptep = folio_ptep;
4518 		goto check_folio;
4519 	}
4520 
4521 	nr_pages = 1;
4522 	page_idx = 0;
4523 	address = vmf->address;
4524 	ptep = vmf->pte;
4525 	if (folio_test_large(folio) && folio_test_swapcache(folio)) {
4526 		int nr = folio_nr_pages(folio);
4527 		unsigned long idx = folio_page_idx(folio, page);
4528 		unsigned long folio_start = address - idx * PAGE_SIZE;
4529 		unsigned long folio_end = folio_start + nr * PAGE_SIZE;
4530 		pte_t *folio_ptep;
4531 		pte_t folio_pte;
4532 
4533 		if (unlikely(folio_start < max(address & PMD_MASK, vma->vm_start)))
4534 			goto check_folio;
4535 		if (unlikely(folio_end > pmd_addr_end(address, vma->vm_end)))
4536 			goto check_folio;
4537 
4538 		folio_ptep = vmf->pte - idx;
4539 		folio_pte = ptep_get(folio_ptep);
4540 		if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
4541 		    swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
4542 			goto check_folio;
4543 
4544 		page_idx = idx;
4545 		address = folio_start;
4546 		ptep = folio_ptep;
4547 		nr_pages = nr;
4548 		entry = folio->swap;
4549 		page = &folio->page;
4550 	}
4551 
4552 check_folio:
4553 	/*
4554 	 * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte
4555 	 * must never point at an anonymous page in the swapcache that is
4556 	 * PG_anon_exclusive. Sanity check that this holds and especially, that
4557 	 * no filesystem set PG_mappedtodisk on a page in the swapcache. Sanity
4558 	 * check after taking the PT lock and making sure that nobody
4559 	 * concurrently faulted in this page and set PG_anon_exclusive.
4560 	 */
4561 	BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio));
4562 	BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page));
4563 
4564 	/*
4565 	 * Check under PT lock (to protect against concurrent fork() sharing
4566 	 * the swap entry concurrently) for certainly exclusive pages.
4567 	 */
4568 	if (!folio_test_ksm(folio)) {
4569 		exclusive = pte_swp_exclusive(vmf->orig_pte);
4570 		if (folio != swapcache) {
4571 			/*
4572 			 * We have a fresh page that is not exposed to the
4573 			 * swapcache -> certainly exclusive.
4574 			 */
4575 			exclusive = true;
4576 		} else if (exclusive && folio_test_writeback(folio) &&
4577 			  data_race(si->flags & SWP_STABLE_WRITES)) {
4578 			/*
4579 			 * This is tricky: not all swap backends support
4580 			 * concurrent page modifications while under writeback.
4581 			 *
4582 			 * So if we stumble over such a page in the swapcache
4583 			 * we must not set the page exclusive, otherwise we can
4584 			 * map it writable without further checks and modify it
4585 			 * while still under writeback.
4586 			 *
4587 			 * For these problematic swap backends, simply drop the
4588 			 * exclusive marker: this is perfectly fine as we start
4589 			 * writeback only if we fully unmapped the page and
4590 			 * there are no unexpected references on the page after
4591 			 * unmapping succeeded. After fully unmapped, no
4592 			 * further GUP references (FOLL_GET and FOLL_PIN) can
4593 			 * appear, so dropping the exclusive marker and mapping
4594 			 * it only R/O is fine.
4595 			 */
4596 			exclusive = false;
4597 		}
4598 	}
4599 
4600 	/*
4601 	 * Some architectures may have to restore extra metadata to the page
4602 	 * when reading from swap. This metadata may be indexed by swap entry
4603 	 * so this must be called before swap_free().
4604 	 */
4605 	arch_swap_restore(folio_swap(entry, folio), folio);
4606 
4607 	/*
4608 	 * Remove the swap entry and conditionally try to free up the swapcache.
4609 	 * We're already holding a reference on the page but haven't mapped it
4610 	 * yet.
4611 	 */
4612 	swap_free_nr(entry, nr_pages);
4613 	if (should_try_to_free_swap(folio, vma, vmf->flags))
4614 		folio_free_swap(folio);
4615 
4616 	add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
4617 	add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
4618 	pte = mk_pte(page, vma->vm_page_prot);
4619 	if (pte_swp_soft_dirty(vmf->orig_pte))
4620 		pte = pte_mksoft_dirty(pte);
4621 	if (pte_swp_uffd_wp(vmf->orig_pte))
4622 		pte = pte_mkuffd_wp(pte);
4623 
4624 	/*
4625 	 * Same logic as in do_wp_page(); however, optimize for pages that are
4626 	 * certainly not shared either because we just allocated them without
4627 	 * exposing them to the swapcache or because the swap entry indicates
4628 	 * exclusivity.
4629 	 */
4630 	if (!folio_test_ksm(folio) &&
4631 	    (exclusive || folio_ref_count(folio) == 1)) {
4632 		if ((vma->vm_flags & VM_WRITE) && !userfaultfd_pte_wp(vma, pte) &&
4633 		    !pte_needs_soft_dirty_wp(vma, pte)) {
4634 			pte = pte_mkwrite(pte, vma);
4635 			if (vmf->flags & FAULT_FLAG_WRITE) {
4636 				pte = pte_mkdirty(pte);
4637 				vmf->flags &= ~FAULT_FLAG_WRITE;
4638 			}
4639 		}
4640 		rmap_flags |= RMAP_EXCLUSIVE;
4641 	}
4642 	folio_ref_add(folio, nr_pages - 1);
4643 	flush_icache_pages(vma, page, nr_pages);
4644 	vmf->orig_pte = pte_advance_pfn(pte, page_idx);
4645 
4646 	/* ksm created a completely new copy */
4647 	if (unlikely(folio != swapcache && swapcache)) {
4648 		folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE);
4649 		folio_add_lru_vma(folio, vma);
4650 	} else if (!folio_test_anon(folio)) {
4651 		/*
4652 		 * We currently only expect small !anon folios which are either
4653 		 * fully exclusive or fully shared, or new allocated large
4654 		 * folios which are fully exclusive. If we ever get large
4655 		 * folios within swapcache here, we have to be careful.
4656 		 */
4657 		VM_WARN_ON_ONCE(folio_test_large(folio) && folio_test_swapcache(folio));
4658 		VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
4659 		folio_add_new_anon_rmap(folio, vma, address, rmap_flags);
4660 	} else {
4661 		folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address,
4662 					rmap_flags);
4663 	}
4664 
4665 	VM_BUG_ON(!folio_test_anon(folio) ||
4666 			(pte_write(pte) && !PageAnonExclusive(page)));
4667 	set_ptes(vma->vm_mm, address, ptep, pte, nr_pages);
4668 	arch_do_swap_page_nr(vma->vm_mm, vma, address,
4669 			pte, pte, nr_pages);
4670 
4671 	folio_unlock(folio);
4672 	if (folio != swapcache && swapcache) {
4673 		/*
4674 		 * Hold the lock to avoid the swap entry to be reused
4675 		 * until we take the PT lock for the pte_same() check
4676 		 * (to avoid false positives from pte_same). For
4677 		 * further safety release the lock after the swap_free
4678 		 * so that the swap count won't change under a
4679 		 * parallel locked swapcache.
4680 		 */
4681 		folio_unlock(swapcache);
4682 		folio_put(swapcache);
4683 	}
4684 
4685 	if (vmf->flags & FAULT_FLAG_WRITE) {
4686 		ret |= do_wp_page(vmf);
4687 		if (ret & VM_FAULT_ERROR)
4688 			ret &= VM_FAULT_ERROR;
4689 		goto out;
4690 	}
4691 
4692 	/* No need to invalidate - it was non-present before */
4693 	update_mmu_cache_range(vmf, vma, address, ptep, nr_pages);
4694 unlock:
4695 	if (vmf->pte)
4696 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4697 out:
4698 	/* Clear the swap cache pin for direct swapin after PTL unlock */
4699 	if (need_clear_cache) {
4700 		swapcache_clear(si, entry, nr_pages);
4701 		if (waitqueue_active(&swapcache_wq))
4702 			wake_up(&swapcache_wq);
4703 	}
4704 	if (si)
4705 		put_swap_device(si);
4706 	return ret;
4707 out_nomap:
4708 	if (vmf->pte)
4709 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4710 out_page:
4711 	folio_unlock(folio);
4712 out_release:
4713 	folio_put(folio);
4714 	if (folio != swapcache && swapcache) {
4715 		folio_unlock(swapcache);
4716 		folio_put(swapcache);
4717 	}
4718 	if (need_clear_cache) {
4719 		swapcache_clear(si, entry, nr_pages);
4720 		if (waitqueue_active(&swapcache_wq))
4721 			wake_up(&swapcache_wq);
4722 	}
4723 	if (si)
4724 		put_swap_device(si);
4725 	return ret;
4726 }
4727 
4728 static bool pte_range_none(pte_t *pte, int nr_pages)
4729 {
4730 	int i;
4731 
4732 	for (i = 0; i < nr_pages; i++) {
4733 		if (!pte_none(ptep_get_lockless(pte + i)))
4734 			return false;
4735 	}
4736 
4737 	return true;
4738 }
4739 
4740 static struct folio *alloc_anon_folio(struct vm_fault *vmf)
4741 {
4742 	struct vm_area_struct *vma = vmf->vma;
4743 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4744 	unsigned long orders;
4745 	struct folio *folio;
4746 	unsigned long addr;
4747 	pte_t *pte;
4748 	gfp_t gfp;
4749 	int order;
4750 
4751 	/*
4752 	 * If uffd is active for the vma we need per-page fault fidelity to
4753 	 * maintain the uffd semantics.
4754 	 */
4755 	if (unlikely(userfaultfd_armed(vma)))
4756 		goto fallback;
4757 
4758 	/*
4759 	 * Get a list of all the (large) orders below PMD_ORDER that are enabled
4760 	 * for this vma. Then filter out the orders that can't be allocated over
4761 	 * the faulting address and still be fully contained in the vma.
4762 	 */
4763 	orders = thp_vma_allowable_orders(vma, vma->vm_flags,
4764 			TVA_IN_PF | TVA_ENFORCE_SYSFS, BIT(PMD_ORDER) - 1);
4765 	orders = thp_vma_suitable_orders(vma, vmf->address, orders);
4766 
4767 	if (!orders)
4768 		goto fallback;
4769 
4770 	pte = pte_offset_map(vmf->pmd, vmf->address & PMD_MASK);
4771 	if (!pte)
4772 		return ERR_PTR(-EAGAIN);
4773 
4774 	/*
4775 	 * Find the highest order where the aligned range is completely
4776 	 * pte_none(). Note that all remaining orders will be completely
4777 	 * pte_none().
4778 	 */
4779 	order = highest_order(orders);
4780 	while (orders) {
4781 		addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
4782 		if (pte_range_none(pte + pte_index(addr), 1 << order))
4783 			break;
4784 		order = next_order(&orders, order);
4785 	}
4786 
4787 	pte_unmap(pte);
4788 
4789 	if (!orders)
4790 		goto fallback;
4791 
4792 	/* Try allocating the highest of the remaining orders. */
4793 	gfp = vma_thp_gfp_mask(vma);
4794 	while (orders) {
4795 		addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
4796 		folio = vma_alloc_folio(gfp, order, vma, addr);
4797 		if (folio) {
4798 			if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
4799 				count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
4800 				folio_put(folio);
4801 				goto next;
4802 			}
4803 			folio_throttle_swaprate(folio, gfp);
4804 			/*
4805 			 * When a folio is not zeroed during allocation
4806 			 * (__GFP_ZERO not used) or user folios require special
4807 			 * handling, folio_zero_user() is used to make sure
4808 			 * that the page corresponding to the faulting address
4809 			 * will be hot in the cache after zeroing.
4810 			 */
4811 			if (user_alloc_needs_zeroing())
4812 				folio_zero_user(folio, vmf->address);
4813 			return folio;
4814 		}
4815 next:
4816 		count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK);
4817 		order = next_order(&orders, order);
4818 	}
4819 
4820 fallback:
4821 #endif
4822 	return folio_prealloc(vma->vm_mm, vma, vmf->address, true);
4823 }
4824 
4825 /*
4826  * We enter with non-exclusive mmap_lock (to exclude vma changes,
4827  * but allow concurrent faults), and pte mapped but not yet locked.
4828  * We return with mmap_lock still held, but pte unmapped and unlocked.
4829  */
4830 static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
4831 {
4832 	struct vm_area_struct *vma = vmf->vma;
4833 	unsigned long addr = vmf->address;
4834 	struct folio *folio;
4835 	vm_fault_t ret = 0;
4836 	int nr_pages = 1;
4837 	pte_t entry;
4838 
4839 	/* File mapping without ->vm_ops ? */
4840 	if (vma->vm_flags & VM_SHARED)
4841 		return VM_FAULT_SIGBUS;
4842 
4843 	/*
4844 	 * Use pte_alloc() instead of pte_alloc_map(), so that OOM can
4845 	 * be distinguished from a transient failure of pte_offset_map().
4846 	 */
4847 	if (pte_alloc(vma->vm_mm, vmf->pmd))
4848 		return VM_FAULT_OOM;
4849 
4850 	/* Use the zero-page for reads */
4851 	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
4852 			!mm_forbids_zeropage(vma->vm_mm)) {
4853 		entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
4854 						vma->vm_page_prot));
4855 		vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4856 				vmf->address, &vmf->ptl);
4857 		if (!vmf->pte)
4858 			goto unlock;
4859 		if (vmf_pte_changed(vmf)) {
4860 			update_mmu_tlb(vma, vmf->address, vmf->pte);
4861 			goto unlock;
4862 		}
4863 		ret = check_stable_address_space(vma->vm_mm);
4864 		if (ret)
4865 			goto unlock;
4866 		/* Deliver the page fault to userland, check inside PT lock */
4867 		if (userfaultfd_missing(vma)) {
4868 			pte_unmap_unlock(vmf->pte, vmf->ptl);
4869 			return handle_userfault(vmf, VM_UFFD_MISSING);
4870 		}
4871 		goto setpte;
4872 	}
4873 
4874 	/* Allocate our own private page. */
4875 	ret = vmf_anon_prepare(vmf);
4876 	if (ret)
4877 		return ret;
4878 	/* Returns NULL on OOM or ERR_PTR(-EAGAIN) if we must retry the fault */
4879 	folio = alloc_anon_folio(vmf);
4880 	if (IS_ERR(folio))
4881 		return 0;
4882 	if (!folio)
4883 		goto oom;
4884 
4885 	nr_pages = folio_nr_pages(folio);
4886 	addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE);
4887 
4888 	/*
4889 	 * The memory barrier inside __folio_mark_uptodate makes sure that
4890 	 * preceding stores to the page contents become visible before
4891 	 * the set_pte_at() write.
4892 	 */
4893 	__folio_mark_uptodate(folio);
4894 
4895 	entry = mk_pte(&folio->page, vma->vm_page_prot);
4896 	entry = pte_sw_mkyoung(entry);
4897 	if (vma->vm_flags & VM_WRITE)
4898 		entry = pte_mkwrite(pte_mkdirty(entry), vma);
4899 
4900 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
4901 	if (!vmf->pte)
4902 		goto release;
4903 	if (nr_pages == 1 && vmf_pte_changed(vmf)) {
4904 		update_mmu_tlb(vma, addr, vmf->pte);
4905 		goto release;
4906 	} else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) {
4907 		update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages);
4908 		goto release;
4909 	}
4910 
4911 	ret = check_stable_address_space(vma->vm_mm);
4912 	if (ret)
4913 		goto release;
4914 
4915 	/* Deliver the page fault to userland, check inside PT lock */
4916 	if (userfaultfd_missing(vma)) {
4917 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4918 		folio_put(folio);
4919 		return handle_userfault(vmf, VM_UFFD_MISSING);
4920 	}
4921 
4922 	folio_ref_add(folio, nr_pages - 1);
4923 	add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
4924 	count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_FAULT_ALLOC);
4925 	folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
4926 	folio_add_lru_vma(folio, vma);
4927 setpte:
4928 	if (vmf_orig_pte_uffd_wp(vmf))
4929 		entry = pte_mkuffd_wp(entry);
4930 	set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr_pages);
4931 
4932 	/* No need to invalidate - it was non-present before */
4933 	update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr_pages);
4934 unlock:
4935 	if (vmf->pte)
4936 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4937 	return ret;
4938 release:
4939 	folio_put(folio);
4940 	goto unlock;
4941 oom:
4942 	return VM_FAULT_OOM;
4943 }
4944 
4945 /*
4946  * The mmap_lock must have been held on entry, and may have been
4947  * released depending on flags and vma->vm_ops->fault() return value.
4948  * See filemap_fault() and __lock_page_retry().
4949  */
4950 static vm_fault_t __do_fault(struct vm_fault *vmf)
4951 {
4952 	struct vm_area_struct *vma = vmf->vma;
4953 	struct folio *folio;
4954 	vm_fault_t ret;
4955 
4956 	/*
4957 	 * Preallocate pte before we take page_lock because this might lead to
4958 	 * deadlocks for memcg reclaim which waits for pages under writeback:
4959 	 *				lock_page(A)
4960 	 *				SetPageWriteback(A)
4961 	 *				unlock_page(A)
4962 	 * lock_page(B)
4963 	 *				lock_page(B)
4964 	 * pte_alloc_one
4965 	 *   shrink_folio_list
4966 	 *     wait_on_page_writeback(A)
4967 	 *				SetPageWriteback(B)
4968 	 *				unlock_page(B)
4969 	 *				# flush A, B to clear the writeback
4970 	 */
4971 	if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
4972 		vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
4973 		if (!vmf->prealloc_pte)
4974 			return VM_FAULT_OOM;
4975 	}
4976 
4977 	ret = vma->vm_ops->fault(vmf);
4978 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
4979 			    VM_FAULT_DONE_COW)))
4980 		return ret;
4981 
4982 	folio = page_folio(vmf->page);
4983 	if (unlikely(PageHWPoison(vmf->page))) {
4984 		vm_fault_t poisonret = VM_FAULT_HWPOISON;
4985 		if (ret & VM_FAULT_LOCKED) {
4986 			if (page_mapped(vmf->page))
4987 				unmap_mapping_folio(folio);
4988 			/* Retry if a clean folio was removed from the cache. */
4989 			if (mapping_evict_folio(folio->mapping, folio))
4990 				poisonret = VM_FAULT_NOPAGE;
4991 			folio_unlock(folio);
4992 		}
4993 		folio_put(folio);
4994 		vmf->page = NULL;
4995 		return poisonret;
4996 	}
4997 
4998 	if (unlikely(!(ret & VM_FAULT_LOCKED)))
4999 		folio_lock(folio);
5000 	else
5001 		VM_BUG_ON_PAGE(!folio_test_locked(folio), vmf->page);
5002 
5003 	return ret;
5004 }
5005 
5006 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5007 static void deposit_prealloc_pte(struct vm_fault *vmf)
5008 {
5009 	struct vm_area_struct *vma = vmf->vma;
5010 
5011 	pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
5012 	/*
5013 	 * We are going to consume the prealloc table,
5014 	 * count that as nr_ptes.
5015 	 */
5016 	mm_inc_nr_ptes(vma->vm_mm);
5017 	vmf->prealloc_pte = NULL;
5018 }
5019 
5020 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
5021 {
5022 	struct folio *folio = page_folio(page);
5023 	struct vm_area_struct *vma = vmf->vma;
5024 	bool write = vmf->flags & FAULT_FLAG_WRITE;
5025 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
5026 	pmd_t entry;
5027 	vm_fault_t ret = VM_FAULT_FALLBACK;
5028 
5029 	/*
5030 	 * It is too late to allocate a small folio, we already have a large
5031 	 * folio in the pagecache: especially s390 KVM cannot tolerate any
5032 	 * PMD mappings, but PTE-mapped THP are fine. So let's simply refuse any
5033 	 * PMD mappings if THPs are disabled.
5034 	 */
5035 	if (thp_disabled_by_hw() || vma_thp_disabled(vma, vma->vm_flags))
5036 		return ret;
5037 
5038 	if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
5039 		return ret;
5040 
5041 	if (folio_order(folio) != HPAGE_PMD_ORDER)
5042 		return ret;
5043 	page = &folio->page;
5044 
5045 	/*
5046 	 * Just backoff if any subpage of a THP is corrupted otherwise
5047 	 * the corrupted page may mapped by PMD silently to escape the
5048 	 * check.  This kind of THP just can be PTE mapped.  Access to
5049 	 * the corrupted subpage should trigger SIGBUS as expected.
5050 	 */
5051 	if (unlikely(folio_test_has_hwpoisoned(folio)))
5052 		return ret;
5053 
5054 	/*
5055 	 * Archs like ppc64 need additional space to store information
5056 	 * related to pte entry. Use the preallocated table for that.
5057 	 */
5058 	if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
5059 		vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
5060 		if (!vmf->prealloc_pte)
5061 			return VM_FAULT_OOM;
5062 	}
5063 
5064 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
5065 	if (unlikely(!pmd_none(*vmf->pmd)))
5066 		goto out;
5067 
5068 	flush_icache_pages(vma, page, HPAGE_PMD_NR);
5069 
5070 	entry = mk_huge_pmd(page, vma->vm_page_prot);
5071 	if (write)
5072 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
5073 
5074 	add_mm_counter(vma->vm_mm, mm_counter_file(folio), HPAGE_PMD_NR);
5075 	folio_add_file_rmap_pmd(folio, page, vma);
5076 
5077 	/*
5078 	 * deposit and withdraw with pmd lock held
5079 	 */
5080 	if (arch_needs_pgtable_deposit())
5081 		deposit_prealloc_pte(vmf);
5082 
5083 	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
5084 
5085 	update_mmu_cache_pmd(vma, haddr, vmf->pmd);
5086 
5087 	/* fault is handled */
5088 	ret = 0;
5089 	count_vm_event(THP_FILE_MAPPED);
5090 out:
5091 	spin_unlock(vmf->ptl);
5092 	return ret;
5093 }
5094 #else
5095 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
5096 {
5097 	return VM_FAULT_FALLBACK;
5098 }
5099 #endif
5100 
5101 /**
5102  * set_pte_range - Set a range of PTEs to point to pages in a folio.
5103  * @vmf: Fault decription.
5104  * @folio: The folio that contains @page.
5105  * @page: The first page to create a PTE for.
5106  * @nr: The number of PTEs to create.
5107  * @addr: The first address to create a PTE for.
5108  */
5109 void set_pte_range(struct vm_fault *vmf, struct folio *folio,
5110 		struct page *page, unsigned int nr, unsigned long addr)
5111 {
5112 	struct vm_area_struct *vma = vmf->vma;
5113 	bool write = vmf->flags & FAULT_FLAG_WRITE;
5114 	bool prefault = !in_range(vmf->address, addr, nr * PAGE_SIZE);
5115 	pte_t entry;
5116 
5117 	flush_icache_pages(vma, page, nr);
5118 	entry = mk_pte(page, vma->vm_page_prot);
5119 
5120 	if (prefault && arch_wants_old_prefaulted_pte())
5121 		entry = pte_mkold(entry);
5122 	else
5123 		entry = pte_sw_mkyoung(entry);
5124 
5125 	if (write)
5126 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
5127 	if (unlikely(vmf_orig_pte_uffd_wp(vmf)))
5128 		entry = pte_mkuffd_wp(entry);
5129 	/* copy-on-write page */
5130 	if (write && !(vma->vm_flags & VM_SHARED)) {
5131 		VM_BUG_ON_FOLIO(nr != 1, folio);
5132 		folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
5133 		folio_add_lru_vma(folio, vma);
5134 	} else {
5135 		folio_add_file_rmap_ptes(folio, page, nr, vma);
5136 	}
5137 	set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr);
5138 
5139 	/* no need to invalidate: a not-present page won't be cached */
5140 	update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr);
5141 }
5142 
5143 static bool vmf_pte_changed(struct vm_fault *vmf)
5144 {
5145 	if (vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)
5146 		return !pte_same(ptep_get(vmf->pte), vmf->orig_pte);
5147 
5148 	return !pte_none(ptep_get(vmf->pte));
5149 }
5150 
5151 /**
5152  * finish_fault - finish page fault once we have prepared the page to fault
5153  *
5154  * @vmf: structure describing the fault
5155  *
5156  * This function handles all that is needed to finish a page fault once the
5157  * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
5158  * given page, adds reverse page mapping, handles memcg charges and LRU
5159  * addition.
5160  *
5161  * The function expects the page to be locked and on success it consumes a
5162  * reference of a page being mapped (for the PTE which maps it).
5163  *
5164  * Return: %0 on success, %VM_FAULT_ code in case of error.
5165  */
5166 vm_fault_t finish_fault(struct vm_fault *vmf)
5167 {
5168 	struct vm_area_struct *vma = vmf->vma;
5169 	struct page *page;
5170 	struct folio *folio;
5171 	vm_fault_t ret;
5172 	bool is_cow = (vmf->flags & FAULT_FLAG_WRITE) &&
5173 		      !(vma->vm_flags & VM_SHARED);
5174 	int type, nr_pages;
5175 	unsigned long addr = vmf->address;
5176 
5177 	/* Did we COW the page? */
5178 	if (is_cow)
5179 		page = vmf->cow_page;
5180 	else
5181 		page = vmf->page;
5182 
5183 	/*
5184 	 * check even for read faults because we might have lost our CoWed
5185 	 * page
5186 	 */
5187 	if (!(vma->vm_flags & VM_SHARED)) {
5188 		ret = check_stable_address_space(vma->vm_mm);
5189 		if (ret)
5190 			return ret;
5191 	}
5192 
5193 	if (pmd_none(*vmf->pmd)) {
5194 		if (PageTransCompound(page)) {
5195 			ret = do_set_pmd(vmf, page);
5196 			if (ret != VM_FAULT_FALLBACK)
5197 				return ret;
5198 		}
5199 
5200 		if (vmf->prealloc_pte)
5201 			pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte);
5202 		else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd)))
5203 			return VM_FAULT_OOM;
5204 	}
5205 
5206 	folio = page_folio(page);
5207 	nr_pages = folio_nr_pages(folio);
5208 
5209 	/*
5210 	 * Using per-page fault to maintain the uffd semantics, and same
5211 	 * approach also applies to non-anonymous-shmem faults to avoid
5212 	 * inflating the RSS of the process.
5213 	 */
5214 	if (!vma_is_anon_shmem(vma) || unlikely(userfaultfd_armed(vma))) {
5215 		nr_pages = 1;
5216 	} else if (nr_pages > 1) {
5217 		pgoff_t idx = folio_page_idx(folio, page);
5218 		/* The page offset of vmf->address within the VMA. */
5219 		pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff;
5220 		/* The index of the entry in the pagetable for fault page. */
5221 		pgoff_t pte_off = pte_index(vmf->address);
5222 
5223 		/*
5224 		 * Fallback to per-page fault in case the folio size in page
5225 		 * cache beyond the VMA limits and PMD pagetable limits.
5226 		 */
5227 		if (unlikely(vma_off < idx ||
5228 			    vma_off + (nr_pages - idx) > vma_pages(vma) ||
5229 			    pte_off < idx ||
5230 			    pte_off + (nr_pages - idx)  > PTRS_PER_PTE)) {
5231 			nr_pages = 1;
5232 		} else {
5233 			/* Now we can set mappings for the whole large folio. */
5234 			addr = vmf->address - idx * PAGE_SIZE;
5235 			page = &folio->page;
5236 		}
5237 	}
5238 
5239 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
5240 				       addr, &vmf->ptl);
5241 	if (!vmf->pte)
5242 		return VM_FAULT_NOPAGE;
5243 
5244 	/* Re-check under ptl */
5245 	if (nr_pages == 1 && unlikely(vmf_pte_changed(vmf))) {
5246 		update_mmu_tlb(vma, addr, vmf->pte);
5247 		ret = VM_FAULT_NOPAGE;
5248 		goto unlock;
5249 	} else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) {
5250 		update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages);
5251 		ret = VM_FAULT_NOPAGE;
5252 		goto unlock;
5253 	}
5254 
5255 	folio_ref_add(folio, nr_pages - 1);
5256 	set_pte_range(vmf, folio, page, nr_pages, addr);
5257 	type = is_cow ? MM_ANONPAGES : mm_counter_file(folio);
5258 	add_mm_counter(vma->vm_mm, type, nr_pages);
5259 	ret = 0;
5260 
5261 unlock:
5262 	pte_unmap_unlock(vmf->pte, vmf->ptl);
5263 	return ret;
5264 }
5265 
5266 static unsigned long fault_around_pages __read_mostly =
5267 	65536 >> PAGE_SHIFT;
5268 
5269 #ifdef CONFIG_DEBUG_FS
5270 static int fault_around_bytes_get(void *data, u64 *val)
5271 {
5272 	*val = fault_around_pages << PAGE_SHIFT;
5273 	return 0;
5274 }
5275 
5276 /*
5277  * fault_around_bytes must be rounded down to the nearest page order as it's
5278  * what do_fault_around() expects to see.
5279  */
5280 static int fault_around_bytes_set(void *data, u64 val)
5281 {
5282 	if (val / PAGE_SIZE > PTRS_PER_PTE)
5283 		return -EINVAL;
5284 
5285 	/*
5286 	 * The minimum value is 1 page, however this results in no fault-around
5287 	 * at all. See should_fault_around().
5288 	 */
5289 	val = max(val, PAGE_SIZE);
5290 	fault_around_pages = rounddown_pow_of_two(val) >> PAGE_SHIFT;
5291 
5292 	return 0;
5293 }
5294 DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
5295 		fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
5296 
5297 static int __init fault_around_debugfs(void)
5298 {
5299 	debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
5300 				   &fault_around_bytes_fops);
5301 	return 0;
5302 }
5303 late_initcall(fault_around_debugfs);
5304 #endif
5305 
5306 /*
5307  * do_fault_around() tries to map few pages around the fault address. The hope
5308  * is that the pages will be needed soon and this will lower the number of
5309  * faults to handle.
5310  *
5311  * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
5312  * not ready to be mapped: not up-to-date, locked, etc.
5313  *
5314  * This function doesn't cross VMA or page table boundaries, in order to call
5315  * map_pages() and acquire a PTE lock only once.
5316  *
5317  * fault_around_pages defines how many pages we'll try to map.
5318  * do_fault_around() expects it to be set to a power of two less than or equal
5319  * to PTRS_PER_PTE.
5320  *
5321  * The virtual address of the area that we map is naturally aligned to
5322  * fault_around_pages * PAGE_SIZE rounded down to the machine page size
5323  * (and therefore to page order).  This way it's easier to guarantee
5324  * that we don't cross page table boundaries.
5325  */
5326 static vm_fault_t do_fault_around(struct vm_fault *vmf)
5327 {
5328 	pgoff_t nr_pages = READ_ONCE(fault_around_pages);
5329 	pgoff_t pte_off = pte_index(vmf->address);
5330 	/* The page offset of vmf->address within the VMA. */
5331 	pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff;
5332 	pgoff_t from_pte, to_pte;
5333 	vm_fault_t ret;
5334 
5335 	/* The PTE offset of the start address, clamped to the VMA. */
5336 	from_pte = max(ALIGN_DOWN(pte_off, nr_pages),
5337 		       pte_off - min(pte_off, vma_off));
5338 
5339 	/* The PTE offset of the end address, clamped to the VMA and PTE. */
5340 	to_pte = min3(from_pte + nr_pages, (pgoff_t)PTRS_PER_PTE,
5341 		      pte_off + vma_pages(vmf->vma) - vma_off) - 1;
5342 
5343 	if (pmd_none(*vmf->pmd)) {
5344 		vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
5345 		if (!vmf->prealloc_pte)
5346 			return VM_FAULT_OOM;
5347 	}
5348 
5349 	rcu_read_lock();
5350 	ret = vmf->vma->vm_ops->map_pages(vmf,
5351 			vmf->pgoff + from_pte - pte_off,
5352 			vmf->pgoff + to_pte - pte_off);
5353 	rcu_read_unlock();
5354 
5355 	return ret;
5356 }
5357 
5358 /* Return true if we should do read fault-around, false otherwise */
5359 static inline bool should_fault_around(struct vm_fault *vmf)
5360 {
5361 	/* No ->map_pages?  No way to fault around... */
5362 	if (!vmf->vma->vm_ops->map_pages)
5363 		return false;
5364 
5365 	if (uffd_disable_fault_around(vmf->vma))
5366 		return false;
5367 
5368 	/* A single page implies no faulting 'around' at all. */
5369 	return fault_around_pages > 1;
5370 }
5371 
5372 static vm_fault_t do_read_fault(struct vm_fault *vmf)
5373 {
5374 	vm_fault_t ret = 0;
5375 	struct folio *folio;
5376 
5377 	/*
5378 	 * Let's call ->map_pages() first and use ->fault() as fallback
5379 	 * if page by the offset is not ready to be mapped (cold cache or
5380 	 * something).
5381 	 */
5382 	if (should_fault_around(vmf)) {
5383 		ret = do_fault_around(vmf);
5384 		if (ret)
5385 			return ret;
5386 	}
5387 
5388 	ret = vmf_can_call_fault(vmf);
5389 	if (ret)
5390 		return ret;
5391 
5392 	ret = __do_fault(vmf);
5393 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5394 		return ret;
5395 
5396 	ret |= finish_fault(vmf);
5397 	folio = page_folio(vmf->page);
5398 	folio_unlock(folio);
5399 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5400 		folio_put(folio);
5401 	return ret;
5402 }
5403 
5404 static vm_fault_t do_cow_fault(struct vm_fault *vmf)
5405 {
5406 	struct vm_area_struct *vma = vmf->vma;
5407 	struct folio *folio;
5408 	vm_fault_t ret;
5409 
5410 	ret = vmf_can_call_fault(vmf);
5411 	if (!ret)
5412 		ret = vmf_anon_prepare(vmf);
5413 	if (ret)
5414 		return ret;
5415 
5416 	folio = folio_prealloc(vma->vm_mm, vma, vmf->address, false);
5417 	if (!folio)
5418 		return VM_FAULT_OOM;
5419 
5420 	vmf->cow_page = &folio->page;
5421 
5422 	ret = __do_fault(vmf);
5423 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5424 		goto uncharge_out;
5425 	if (ret & VM_FAULT_DONE_COW)
5426 		return ret;
5427 
5428 	if (copy_mc_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma)) {
5429 		ret = VM_FAULT_HWPOISON;
5430 		goto unlock;
5431 	}
5432 	__folio_mark_uptodate(folio);
5433 
5434 	ret |= finish_fault(vmf);
5435 unlock:
5436 	unlock_page(vmf->page);
5437 	put_page(vmf->page);
5438 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5439 		goto uncharge_out;
5440 	return ret;
5441 uncharge_out:
5442 	folio_put(folio);
5443 	return ret;
5444 }
5445 
5446 static vm_fault_t do_shared_fault(struct vm_fault *vmf)
5447 {
5448 	struct vm_area_struct *vma = vmf->vma;
5449 	vm_fault_t ret, tmp;
5450 	struct folio *folio;
5451 
5452 	ret = vmf_can_call_fault(vmf);
5453 	if (ret)
5454 		return ret;
5455 
5456 	ret = __do_fault(vmf);
5457 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5458 		return ret;
5459 
5460 	folio = page_folio(vmf->page);
5461 
5462 	/*
5463 	 * Check if the backing address space wants to know that the page is
5464 	 * about to become writable
5465 	 */
5466 	if (vma->vm_ops->page_mkwrite) {
5467 		folio_unlock(folio);
5468 		tmp = do_page_mkwrite(vmf, folio);
5469 		if (unlikely(!tmp ||
5470 				(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
5471 			folio_put(folio);
5472 			return tmp;
5473 		}
5474 	}
5475 
5476 	ret |= finish_fault(vmf);
5477 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
5478 					VM_FAULT_RETRY))) {
5479 		folio_unlock(folio);
5480 		folio_put(folio);
5481 		return ret;
5482 	}
5483 
5484 	ret |= fault_dirty_shared_page(vmf);
5485 	return ret;
5486 }
5487 
5488 /*
5489  * We enter with non-exclusive mmap_lock (to exclude vma changes,
5490  * but allow concurrent faults).
5491  * The mmap_lock may have been released depending on flags and our
5492  * return value.  See filemap_fault() and __folio_lock_or_retry().
5493  * If mmap_lock is released, vma may become invalid (for example
5494  * by other thread calling munmap()).
5495  */
5496 static vm_fault_t do_fault(struct vm_fault *vmf)
5497 {
5498 	struct vm_area_struct *vma = vmf->vma;
5499 	struct mm_struct *vm_mm = vma->vm_mm;
5500 	vm_fault_t ret;
5501 
5502 	/*
5503 	 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
5504 	 */
5505 	if (!vma->vm_ops->fault) {
5506 		vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
5507 					       vmf->address, &vmf->ptl);
5508 		if (unlikely(!vmf->pte))
5509 			ret = VM_FAULT_SIGBUS;
5510 		else {
5511 			/*
5512 			 * Make sure this is not a temporary clearing of pte
5513 			 * by holding ptl and checking again. A R/M/W update
5514 			 * of pte involves: take ptl, clearing the pte so that
5515 			 * we don't have concurrent modification by hardware
5516 			 * followed by an update.
5517 			 */
5518 			if (unlikely(pte_none(ptep_get(vmf->pte))))
5519 				ret = VM_FAULT_SIGBUS;
5520 			else
5521 				ret = VM_FAULT_NOPAGE;
5522 
5523 			pte_unmap_unlock(vmf->pte, vmf->ptl);
5524 		}
5525 	} else if (!(vmf->flags & FAULT_FLAG_WRITE))
5526 		ret = do_read_fault(vmf);
5527 	else if (!(vma->vm_flags & VM_SHARED))
5528 		ret = do_cow_fault(vmf);
5529 	else
5530 		ret = do_shared_fault(vmf);
5531 
5532 	/* preallocated pagetable is unused: free it */
5533 	if (vmf->prealloc_pte) {
5534 		pte_free(vm_mm, vmf->prealloc_pte);
5535 		vmf->prealloc_pte = NULL;
5536 	}
5537 	return ret;
5538 }
5539 
5540 int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
5541 		      unsigned long addr, int *flags,
5542 		      bool writable, int *last_cpupid)
5543 {
5544 	struct vm_area_struct *vma = vmf->vma;
5545 
5546 	/*
5547 	 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
5548 	 * much anyway since they can be in shared cache state. This misses
5549 	 * the case where a mapping is writable but the process never writes
5550 	 * to it but pte_write gets cleared during protection updates and
5551 	 * pte_dirty has unpredictable behaviour between PTE scan updates,
5552 	 * background writeback, dirty balancing and application behaviour.
5553 	 */
5554 	if (!writable)
5555 		*flags |= TNF_NO_GROUP;
5556 
5557 	/*
5558 	 * Flag if the folio is shared between multiple address spaces. This
5559 	 * is later used when determining whether to group tasks together
5560 	 */
5561 	if (folio_likely_mapped_shared(folio) && (vma->vm_flags & VM_SHARED))
5562 		*flags |= TNF_SHARED;
5563 	/*
5564 	 * For memory tiering mode, cpupid of slow memory page is used
5565 	 * to record page access time.  So use default value.
5566 	 */
5567 	if (folio_use_access_time(folio))
5568 		*last_cpupid = (-1 & LAST_CPUPID_MASK);
5569 	else
5570 		*last_cpupid = folio_last_cpupid(folio);
5571 
5572 	/* Record the current PID acceesing VMA */
5573 	vma_set_access_pid_bit(vma);
5574 
5575 	count_vm_numa_event(NUMA_HINT_FAULTS);
5576 #ifdef CONFIG_NUMA_BALANCING
5577 	count_memcg_folio_events(folio, NUMA_HINT_FAULTS, 1);
5578 #endif
5579 	if (folio_nid(folio) == numa_node_id()) {
5580 		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
5581 		*flags |= TNF_FAULT_LOCAL;
5582 	}
5583 
5584 	return mpol_misplaced(folio, vmf, addr);
5585 }
5586 
5587 static void numa_rebuild_single_mapping(struct vm_fault *vmf, struct vm_area_struct *vma,
5588 					unsigned long fault_addr, pte_t *fault_pte,
5589 					bool writable)
5590 {
5591 	pte_t pte, old_pte;
5592 
5593 	old_pte = ptep_modify_prot_start(vma, fault_addr, fault_pte);
5594 	pte = pte_modify(old_pte, vma->vm_page_prot);
5595 	pte = pte_mkyoung(pte);
5596 	if (writable)
5597 		pte = pte_mkwrite(pte, vma);
5598 	ptep_modify_prot_commit(vma, fault_addr, fault_pte, old_pte, pte);
5599 	update_mmu_cache_range(vmf, vma, fault_addr, fault_pte, 1);
5600 }
5601 
5602 static void numa_rebuild_large_mapping(struct vm_fault *vmf, struct vm_area_struct *vma,
5603 				       struct folio *folio, pte_t fault_pte,
5604 				       bool ignore_writable, bool pte_write_upgrade)
5605 {
5606 	int nr = pte_pfn(fault_pte) - folio_pfn(folio);
5607 	unsigned long start, end, addr = vmf->address;
5608 	unsigned long addr_start = addr - (nr << PAGE_SHIFT);
5609 	unsigned long pt_start = ALIGN_DOWN(addr, PMD_SIZE);
5610 	pte_t *start_ptep;
5611 
5612 	/* Stay within the VMA and within the page table. */
5613 	start = max3(addr_start, pt_start, vma->vm_start);
5614 	end = min3(addr_start + folio_size(folio), pt_start + PMD_SIZE,
5615 		   vma->vm_end);
5616 	start_ptep = vmf->pte - ((addr - start) >> PAGE_SHIFT);
5617 
5618 	/* Restore all PTEs' mapping of the large folio */
5619 	for (addr = start; addr != end; start_ptep++, addr += PAGE_SIZE) {
5620 		pte_t ptent = ptep_get(start_ptep);
5621 		bool writable = false;
5622 
5623 		if (!pte_present(ptent) || !pte_protnone(ptent))
5624 			continue;
5625 
5626 		if (pfn_folio(pte_pfn(ptent)) != folio)
5627 			continue;
5628 
5629 		if (!ignore_writable) {
5630 			ptent = pte_modify(ptent, vma->vm_page_prot);
5631 			writable = pte_write(ptent);
5632 			if (!writable && pte_write_upgrade &&
5633 			    can_change_pte_writable(vma, addr, ptent))
5634 				writable = true;
5635 		}
5636 
5637 		numa_rebuild_single_mapping(vmf, vma, addr, start_ptep, writable);
5638 	}
5639 }
5640 
5641 static vm_fault_t do_numa_page(struct vm_fault *vmf)
5642 {
5643 	struct vm_area_struct *vma = vmf->vma;
5644 	struct folio *folio = NULL;
5645 	int nid = NUMA_NO_NODE;
5646 	bool writable = false, ignore_writable = false;
5647 	bool pte_write_upgrade = vma_wants_manual_pte_write_upgrade(vma);
5648 	int last_cpupid;
5649 	int target_nid;
5650 	pte_t pte, old_pte;
5651 	int flags = 0, nr_pages;
5652 
5653 	/*
5654 	 * The pte cannot be used safely until we verify, while holding the page
5655 	 * table lock, that its contents have not changed during fault handling.
5656 	 */
5657 	spin_lock(vmf->ptl);
5658 	/* Read the live PTE from the page tables: */
5659 	old_pte = ptep_get(vmf->pte);
5660 
5661 	if (unlikely(!pte_same(old_pte, vmf->orig_pte))) {
5662 		pte_unmap_unlock(vmf->pte, vmf->ptl);
5663 		return 0;
5664 	}
5665 
5666 	pte = pte_modify(old_pte, vma->vm_page_prot);
5667 
5668 	/*
5669 	 * Detect now whether the PTE could be writable; this information
5670 	 * is only valid while holding the PT lock.
5671 	 */
5672 	writable = pte_write(pte);
5673 	if (!writable && pte_write_upgrade &&
5674 	    can_change_pte_writable(vma, vmf->address, pte))
5675 		writable = true;
5676 
5677 	folio = vm_normal_folio(vma, vmf->address, pte);
5678 	if (!folio || folio_is_zone_device(folio))
5679 		goto out_map;
5680 
5681 	nid = folio_nid(folio);
5682 	nr_pages = folio_nr_pages(folio);
5683 
5684 	target_nid = numa_migrate_check(folio, vmf, vmf->address, &flags,
5685 					writable, &last_cpupid);
5686 	if (target_nid == NUMA_NO_NODE)
5687 		goto out_map;
5688 	if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
5689 		flags |= TNF_MIGRATE_FAIL;
5690 		goto out_map;
5691 	}
5692 	/* The folio is isolated and isolation code holds a folio reference. */
5693 	pte_unmap_unlock(vmf->pte, vmf->ptl);
5694 	writable = false;
5695 	ignore_writable = true;
5696 
5697 	/* Migrate to the requested node */
5698 	if (!migrate_misplaced_folio(folio, target_nid)) {
5699 		nid = target_nid;
5700 		flags |= TNF_MIGRATED;
5701 		task_numa_fault(last_cpupid, nid, nr_pages, flags);
5702 		return 0;
5703 	}
5704 
5705 	flags |= TNF_MIGRATE_FAIL;
5706 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
5707 				       vmf->address, &vmf->ptl);
5708 	if (unlikely(!vmf->pte))
5709 		return 0;
5710 	if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
5711 		pte_unmap_unlock(vmf->pte, vmf->ptl);
5712 		return 0;
5713 	}
5714 out_map:
5715 	/*
5716 	 * Make it present again, depending on how arch implements
5717 	 * non-accessible ptes, some can allow access by kernel mode.
5718 	 */
5719 	if (folio && folio_test_large(folio))
5720 		numa_rebuild_large_mapping(vmf, vma, folio, pte, ignore_writable,
5721 					   pte_write_upgrade);
5722 	else
5723 		numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte,
5724 					    writable);
5725 	pte_unmap_unlock(vmf->pte, vmf->ptl);
5726 
5727 	if (nid != NUMA_NO_NODE)
5728 		task_numa_fault(last_cpupid, nid, nr_pages, flags);
5729 	return 0;
5730 }
5731 
5732 static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
5733 {
5734 	struct vm_area_struct *vma = vmf->vma;
5735 
5736 	if (vma_is_anonymous(vma))
5737 		return do_huge_pmd_anonymous_page(vmf);
5738 	/*
5739 	 * Currently we just emit PAGE_SIZE for our fault events, so don't allow
5740 	 * a huge fault if we have a pre content watch on this file.  This would
5741 	 * be trivial to support, but there would need to be tests to ensure
5742 	 * this works properly and those don't exist currently.
5743 	 */
5744 	if (unlikely(FMODE_FSNOTIFY_HSM(vma->vm_file->f_mode)))
5745 		return VM_FAULT_FALLBACK;
5746 	if (vma->vm_ops->huge_fault)
5747 		return vma->vm_ops->huge_fault(vmf, PMD_ORDER);
5748 	return VM_FAULT_FALLBACK;
5749 }
5750 
5751 /* `inline' is required to avoid gcc 4.1.2 build error */
5752 static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf)
5753 {
5754 	struct vm_area_struct *vma = vmf->vma;
5755 	const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
5756 	vm_fault_t ret;
5757 
5758 	if (vma_is_anonymous(vma)) {
5759 		if (likely(!unshare) &&
5760 		    userfaultfd_huge_pmd_wp(vma, vmf->orig_pmd)) {
5761 			if (userfaultfd_wp_async(vmf->vma))
5762 				goto split;
5763 			return handle_userfault(vmf, VM_UFFD_WP);
5764 		}
5765 		return do_huge_pmd_wp_page(vmf);
5766 	}
5767 
5768 	if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
5769 		/* See comment in create_huge_pmd. */
5770 		if (unlikely(FMODE_FSNOTIFY_HSM(vma->vm_file->f_mode)))
5771 			goto split;
5772 		if (vma->vm_ops->huge_fault) {
5773 			ret = vma->vm_ops->huge_fault(vmf, PMD_ORDER);
5774 			if (!(ret & VM_FAULT_FALLBACK))
5775 				return ret;
5776 		}
5777 	}
5778 
5779 split:
5780 	/* COW or write-notify handled on pte level: split pmd. */
5781 	__split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
5782 
5783 	return VM_FAULT_FALLBACK;
5784 }
5785 
5786 static vm_fault_t create_huge_pud(struct vm_fault *vmf)
5787 {
5788 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&			\
5789 	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
5790 	struct vm_area_struct *vma = vmf->vma;
5791 	/* No support for anonymous transparent PUD pages yet */
5792 	if (vma_is_anonymous(vma))
5793 		return VM_FAULT_FALLBACK;
5794 	/* See comment in create_huge_pmd. */
5795 	if (unlikely(FMODE_FSNOTIFY_HSM(vma->vm_file->f_mode)))
5796 		return VM_FAULT_FALLBACK;
5797 	if (vma->vm_ops->huge_fault)
5798 		return vma->vm_ops->huge_fault(vmf, PUD_ORDER);
5799 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
5800 	return VM_FAULT_FALLBACK;
5801 }
5802 
5803 static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
5804 {
5805 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&			\
5806 	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
5807 	struct vm_area_struct *vma = vmf->vma;
5808 	vm_fault_t ret;
5809 
5810 	/* No support for anonymous transparent PUD pages yet */
5811 	if (vma_is_anonymous(vma))
5812 		goto split;
5813 	if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
5814 		/* See comment in create_huge_pmd. */
5815 		if (unlikely(FMODE_FSNOTIFY_HSM(vma->vm_file->f_mode)))
5816 			goto split;
5817 		if (vma->vm_ops->huge_fault) {
5818 			ret = vma->vm_ops->huge_fault(vmf, PUD_ORDER);
5819 			if (!(ret & VM_FAULT_FALLBACK))
5820 				return ret;
5821 		}
5822 	}
5823 split:
5824 	/* COW or write-notify not handled on PUD level: split pud.*/
5825 	__split_huge_pud(vma, vmf->pud, vmf->address);
5826 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
5827 	return VM_FAULT_FALLBACK;
5828 }
5829 
5830 /*
5831  * These routines also need to handle stuff like marking pages dirty
5832  * and/or accessed for architectures that don't do it in hardware (most
5833  * RISC architectures).  The early dirtying is also good on the i386.
5834  *
5835  * There is also a hook called "update_mmu_cache()" that architectures
5836  * with external mmu caches can use to update those (ie the Sparc or
5837  * PowerPC hashed page tables that act as extended TLBs).
5838  *
5839  * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow
5840  * concurrent faults).
5841  *
5842  * The mmap_lock may have been released depending on flags and our return value.
5843  * See filemap_fault() and __folio_lock_or_retry().
5844  */
5845 static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
5846 {
5847 	pte_t entry;
5848 
5849 	if (unlikely(pmd_none(*vmf->pmd))) {
5850 		/*
5851 		 * Leave __pte_alloc() until later: because vm_ops->fault may
5852 		 * want to allocate huge page, and if we expose page table
5853 		 * for an instant, it will be difficult to retract from
5854 		 * concurrent faults and from rmap lookups.
5855 		 */
5856 		vmf->pte = NULL;
5857 		vmf->flags &= ~FAULT_FLAG_ORIG_PTE_VALID;
5858 	} else {
5859 		pmd_t dummy_pmdval;
5860 
5861 		/*
5862 		 * A regular pmd is established and it can't morph into a huge
5863 		 * pmd by anon khugepaged, since that takes mmap_lock in write
5864 		 * mode; but shmem or file collapse to THP could still morph
5865 		 * it into a huge pmd: just retry later if so.
5866 		 *
5867 		 * Use the maywrite version to indicate that vmf->pte may be
5868 		 * modified, but since we will use pte_same() to detect the
5869 		 * change of the !pte_none() entry, there is no need to recheck
5870 		 * the pmdval. Here we chooes to pass a dummy variable instead
5871 		 * of NULL, which helps new user think about why this place is
5872 		 * special.
5873 		 */
5874 		vmf->pte = pte_offset_map_rw_nolock(vmf->vma->vm_mm, vmf->pmd,
5875 						    vmf->address, &dummy_pmdval,
5876 						    &vmf->ptl);
5877 		if (unlikely(!vmf->pte))
5878 			return 0;
5879 		vmf->orig_pte = ptep_get_lockless(vmf->pte);
5880 		vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID;
5881 
5882 		if (pte_none(vmf->orig_pte)) {
5883 			pte_unmap(vmf->pte);
5884 			vmf->pte = NULL;
5885 		}
5886 	}
5887 
5888 	if (!vmf->pte)
5889 		return do_pte_missing(vmf);
5890 
5891 	if (!pte_present(vmf->orig_pte))
5892 		return do_swap_page(vmf);
5893 
5894 	if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
5895 		return do_numa_page(vmf);
5896 
5897 	spin_lock(vmf->ptl);
5898 	entry = vmf->orig_pte;
5899 	if (unlikely(!pte_same(ptep_get(vmf->pte), entry))) {
5900 		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
5901 		goto unlock;
5902 	}
5903 	if (vmf->flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
5904 		if (!pte_write(entry))
5905 			return do_wp_page(vmf);
5906 		else if (likely(vmf->flags & FAULT_FLAG_WRITE))
5907 			entry = pte_mkdirty(entry);
5908 	}
5909 	entry = pte_mkyoung(entry);
5910 	if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
5911 				vmf->flags & FAULT_FLAG_WRITE)) {
5912 		update_mmu_cache_range(vmf, vmf->vma, vmf->address,
5913 				vmf->pte, 1);
5914 	} else {
5915 		/* Skip spurious TLB flush for retried page fault */
5916 		if (vmf->flags & FAULT_FLAG_TRIED)
5917 			goto unlock;
5918 		/*
5919 		 * This is needed only for protection faults but the arch code
5920 		 * is not yet telling us if this is a protection fault or not.
5921 		 * This still avoids useless tlb flushes for .text page faults
5922 		 * with threads.
5923 		 */
5924 		if (vmf->flags & FAULT_FLAG_WRITE)
5925 			flush_tlb_fix_spurious_fault(vmf->vma, vmf->address,
5926 						     vmf->pte);
5927 	}
5928 unlock:
5929 	pte_unmap_unlock(vmf->pte, vmf->ptl);
5930 	return 0;
5931 }
5932 
5933 /*
5934  * On entry, we hold either the VMA lock or the mmap_lock
5935  * (FAULT_FLAG_VMA_LOCK tells you which).  If VM_FAULT_RETRY is set in
5936  * the result, the mmap_lock is not held on exit.  See filemap_fault()
5937  * and __folio_lock_or_retry().
5938  */
5939 static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
5940 		unsigned long address, unsigned int flags)
5941 {
5942 	struct vm_fault vmf = {
5943 		.vma = vma,
5944 		.address = address & PAGE_MASK,
5945 		.real_address = address,
5946 		.flags = flags,
5947 		.pgoff = linear_page_index(vma, address),
5948 		.gfp_mask = __get_fault_gfp_mask(vma),
5949 	};
5950 	struct mm_struct *mm = vma->vm_mm;
5951 	unsigned long vm_flags = vma->vm_flags;
5952 	pgd_t *pgd;
5953 	p4d_t *p4d;
5954 	vm_fault_t ret;
5955 
5956 	pgd = pgd_offset(mm, address);
5957 	p4d = p4d_alloc(mm, pgd, address);
5958 	if (!p4d)
5959 		return VM_FAULT_OOM;
5960 
5961 	vmf.pud = pud_alloc(mm, p4d, address);
5962 	if (!vmf.pud)
5963 		return VM_FAULT_OOM;
5964 retry_pud:
5965 	if (pud_none(*vmf.pud) &&
5966 	    thp_vma_allowable_order(vma, vm_flags,
5967 				TVA_IN_PF | TVA_ENFORCE_SYSFS, PUD_ORDER)) {
5968 		ret = create_huge_pud(&vmf);
5969 		if (!(ret & VM_FAULT_FALLBACK))
5970 			return ret;
5971 	} else {
5972 		pud_t orig_pud = *vmf.pud;
5973 
5974 		barrier();
5975 		if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
5976 
5977 			/*
5978 			 * TODO once we support anonymous PUDs: NUMA case and
5979 			 * FAULT_FLAG_UNSHARE handling.
5980 			 */
5981 			if ((flags & FAULT_FLAG_WRITE) && !pud_write(orig_pud)) {
5982 				ret = wp_huge_pud(&vmf, orig_pud);
5983 				if (!(ret & VM_FAULT_FALLBACK))
5984 					return ret;
5985 			} else {
5986 				huge_pud_set_accessed(&vmf, orig_pud);
5987 				return 0;
5988 			}
5989 		}
5990 	}
5991 
5992 	vmf.pmd = pmd_alloc(mm, vmf.pud, address);
5993 	if (!vmf.pmd)
5994 		return VM_FAULT_OOM;
5995 
5996 	/* Huge pud page fault raced with pmd_alloc? */
5997 	if (pud_trans_unstable(vmf.pud))
5998 		goto retry_pud;
5999 
6000 	if (pmd_none(*vmf.pmd) &&
6001 	    thp_vma_allowable_order(vma, vm_flags,
6002 				TVA_IN_PF | TVA_ENFORCE_SYSFS, PMD_ORDER)) {
6003 		ret = create_huge_pmd(&vmf);
6004 		if (!(ret & VM_FAULT_FALLBACK))
6005 			return ret;
6006 	} else {
6007 		vmf.orig_pmd = pmdp_get_lockless(vmf.pmd);
6008 
6009 		if (unlikely(is_swap_pmd(vmf.orig_pmd))) {
6010 			VM_BUG_ON(thp_migration_supported() &&
6011 					  !is_pmd_migration_entry(vmf.orig_pmd));
6012 			if (is_pmd_migration_entry(vmf.orig_pmd))
6013 				pmd_migration_entry_wait(mm, vmf.pmd);
6014 			return 0;
6015 		}
6016 		if (pmd_trans_huge(vmf.orig_pmd) || pmd_devmap(vmf.orig_pmd)) {
6017 			if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma))
6018 				return do_huge_pmd_numa_page(&vmf);
6019 
6020 			if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
6021 			    !pmd_write(vmf.orig_pmd)) {
6022 				ret = wp_huge_pmd(&vmf);
6023 				if (!(ret & VM_FAULT_FALLBACK))
6024 					return ret;
6025 			} else {
6026 				huge_pmd_set_accessed(&vmf);
6027 				return 0;
6028 			}
6029 		}
6030 	}
6031 
6032 	return handle_pte_fault(&vmf);
6033 }
6034 
6035 /**
6036  * mm_account_fault - Do page fault accounting
6037  * @mm: mm from which memcg should be extracted. It can be NULL.
6038  * @regs: the pt_regs struct pointer.  When set to NULL, will skip accounting
6039  *        of perf event counters, but we'll still do the per-task accounting to
6040  *        the task who triggered this page fault.
6041  * @address: the faulted address.
6042  * @flags: the fault flags.
6043  * @ret: the fault retcode.
6044  *
6045  * This will take care of most of the page fault accounting.  Meanwhile, it
6046  * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter
6047  * updates.  However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should
6048  * still be in per-arch page fault handlers at the entry of page fault.
6049  */
6050 static inline void mm_account_fault(struct mm_struct *mm, struct pt_regs *regs,
6051 				    unsigned long address, unsigned int flags,
6052 				    vm_fault_t ret)
6053 {
6054 	bool major;
6055 
6056 	/* Incomplete faults will be accounted upon completion. */
6057 	if (ret & VM_FAULT_RETRY)
6058 		return;
6059 
6060 	/*
6061 	 * To preserve the behavior of older kernels, PGFAULT counters record
6062 	 * both successful and failed faults, as opposed to perf counters,
6063 	 * which ignore failed cases.
6064 	 */
6065 	count_vm_event(PGFAULT);
6066 	count_memcg_event_mm(mm, PGFAULT);
6067 
6068 	/*
6069 	 * Do not account for unsuccessful faults (e.g. when the address wasn't
6070 	 * valid).  That includes arch_vma_access_permitted() failing before
6071 	 * reaching here. So this is not a "this many hardware page faults"
6072 	 * counter.  We should use the hw profiling for that.
6073 	 */
6074 	if (ret & VM_FAULT_ERROR)
6075 		return;
6076 
6077 	/*
6078 	 * We define the fault as a major fault when the final successful fault
6079 	 * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't
6080 	 * handle it immediately previously).
6081 	 */
6082 	major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED);
6083 
6084 	if (major)
6085 		current->maj_flt++;
6086 	else
6087 		current->min_flt++;
6088 
6089 	/*
6090 	 * If the fault is done for GUP, regs will be NULL.  We only do the
6091 	 * accounting for the per thread fault counters who triggered the
6092 	 * fault, and we skip the perf event updates.
6093 	 */
6094 	if (!regs)
6095 		return;
6096 
6097 	if (major)
6098 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
6099 	else
6100 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
6101 }
6102 
6103 #ifdef CONFIG_LRU_GEN
6104 static void lru_gen_enter_fault(struct vm_area_struct *vma)
6105 {
6106 	/* the LRU algorithm only applies to accesses with recency */
6107 	current->in_lru_fault = vma_has_recency(vma);
6108 }
6109 
6110 static void lru_gen_exit_fault(void)
6111 {
6112 	current->in_lru_fault = false;
6113 }
6114 #else
6115 static void lru_gen_enter_fault(struct vm_area_struct *vma)
6116 {
6117 }
6118 
6119 static void lru_gen_exit_fault(void)
6120 {
6121 }
6122 #endif /* CONFIG_LRU_GEN */
6123 
6124 static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma,
6125 				       unsigned int *flags)
6126 {
6127 	if (unlikely(*flags & FAULT_FLAG_UNSHARE)) {
6128 		if (WARN_ON_ONCE(*flags & FAULT_FLAG_WRITE))
6129 			return VM_FAULT_SIGSEGV;
6130 		/*
6131 		 * FAULT_FLAG_UNSHARE only applies to COW mappings. Let's
6132 		 * just treat it like an ordinary read-fault otherwise.
6133 		 */
6134 		if (!is_cow_mapping(vma->vm_flags))
6135 			*flags &= ~FAULT_FLAG_UNSHARE;
6136 	} else if (*flags & FAULT_FLAG_WRITE) {
6137 		/* Write faults on read-only mappings are impossible ... */
6138 		if (WARN_ON_ONCE(!(vma->vm_flags & VM_MAYWRITE)))
6139 			return VM_FAULT_SIGSEGV;
6140 		/* ... and FOLL_FORCE only applies to COW mappings. */
6141 		if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE) &&
6142 				 !is_cow_mapping(vma->vm_flags)))
6143 			return VM_FAULT_SIGSEGV;
6144 	}
6145 #ifdef CONFIG_PER_VMA_LOCK
6146 	/*
6147 	 * Per-VMA locks can't be used with FAULT_FLAG_RETRY_NOWAIT because of
6148 	 * the assumption that lock is dropped on VM_FAULT_RETRY.
6149 	 */
6150 	if (WARN_ON_ONCE((*flags &
6151 			(FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT)) ==
6152 			(FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT)))
6153 		return VM_FAULT_SIGSEGV;
6154 #endif
6155 
6156 	return 0;
6157 }
6158 
6159 /*
6160  * By the time we get here, we already hold either the VMA lock or the
6161  * mmap_lock (FAULT_FLAG_VMA_LOCK tells you which).
6162  *
6163  * The mmap_lock may have been released depending on flags and our
6164  * return value.  See filemap_fault() and __folio_lock_or_retry().
6165  */
6166 vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
6167 			   unsigned int flags, struct pt_regs *regs)
6168 {
6169 	/* If the fault handler drops the mmap_lock, vma may be freed */
6170 	struct mm_struct *mm = vma->vm_mm;
6171 	vm_fault_t ret;
6172 	bool is_droppable;
6173 
6174 	__set_current_state(TASK_RUNNING);
6175 
6176 	ret = sanitize_fault_flags(vma, &flags);
6177 	if (ret)
6178 		goto out;
6179 
6180 	if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
6181 					    flags & FAULT_FLAG_INSTRUCTION,
6182 					    flags & FAULT_FLAG_REMOTE)) {
6183 		ret = VM_FAULT_SIGSEGV;
6184 		goto out;
6185 	}
6186 
6187 	is_droppable = !!(vma->vm_flags & VM_DROPPABLE);
6188 
6189 	/*
6190 	 * Enable the memcg OOM handling for faults triggered in user
6191 	 * space.  Kernel faults are handled more gracefully.
6192 	 */
6193 	if (flags & FAULT_FLAG_USER)
6194 		mem_cgroup_enter_user_fault();
6195 
6196 	lru_gen_enter_fault(vma);
6197 
6198 	if (unlikely(is_vm_hugetlb_page(vma)))
6199 		ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
6200 	else
6201 		ret = __handle_mm_fault(vma, address, flags);
6202 
6203 	/*
6204 	 * Warning: It is no longer safe to dereference vma-> after this point,
6205 	 * because mmap_lock might have been dropped by __handle_mm_fault(), so
6206 	 * vma might be destroyed from underneath us.
6207 	 */
6208 
6209 	lru_gen_exit_fault();
6210 
6211 	/* If the mapping is droppable, then errors due to OOM aren't fatal. */
6212 	if (is_droppable)
6213 		ret &= ~VM_FAULT_OOM;
6214 
6215 	if (flags & FAULT_FLAG_USER) {
6216 		mem_cgroup_exit_user_fault();
6217 		/*
6218 		 * The task may have entered a memcg OOM situation but
6219 		 * if the allocation error was handled gracefully (no
6220 		 * VM_FAULT_OOM), there is no need to kill anything.
6221 		 * Just clean up the OOM state peacefully.
6222 		 */
6223 		if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
6224 			mem_cgroup_oom_synchronize(false);
6225 	}
6226 out:
6227 	mm_account_fault(mm, regs, address, flags, ret);
6228 
6229 	return ret;
6230 }
6231 EXPORT_SYMBOL_GPL(handle_mm_fault);
6232 
6233 #ifdef CONFIG_LOCK_MM_AND_FIND_VMA
6234 #include <linux/extable.h>
6235 
6236 static inline bool get_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs)
6237 {
6238 	if (likely(mmap_read_trylock(mm)))
6239 		return true;
6240 
6241 	if (regs && !user_mode(regs)) {
6242 		unsigned long ip = exception_ip(regs);
6243 		if (!search_exception_tables(ip))
6244 			return false;
6245 	}
6246 
6247 	return !mmap_read_lock_killable(mm);
6248 }
6249 
6250 static inline bool mmap_upgrade_trylock(struct mm_struct *mm)
6251 {
6252 	/*
6253 	 * We don't have this operation yet.
6254 	 *
6255 	 * It should be easy enough to do: it's basically a
6256 	 *    atomic_long_try_cmpxchg_acquire()
6257 	 * from RWSEM_READER_BIAS -> RWSEM_WRITER_LOCKED, but
6258 	 * it also needs the proper lockdep magic etc.
6259 	 */
6260 	return false;
6261 }
6262 
6263 static inline bool upgrade_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs)
6264 {
6265 	mmap_read_unlock(mm);
6266 	if (regs && !user_mode(regs)) {
6267 		unsigned long ip = exception_ip(regs);
6268 		if (!search_exception_tables(ip))
6269 			return false;
6270 	}
6271 	return !mmap_write_lock_killable(mm);
6272 }
6273 
6274 /*
6275  * Helper for page fault handling.
6276  *
6277  * This is kind of equivalent to "mmap_read_lock()" followed
6278  * by "find_extend_vma()", except it's a lot more careful about
6279  * the locking (and will drop the lock on failure).
6280  *
6281  * For example, if we have a kernel bug that causes a page
6282  * fault, we don't want to just use mmap_read_lock() to get
6283  * the mm lock, because that would deadlock if the bug were
6284  * to happen while we're holding the mm lock for writing.
6285  *
6286  * So this checks the exception tables on kernel faults in
6287  * order to only do this all for instructions that are actually
6288  * expected to fault.
6289  *
6290  * We can also actually take the mm lock for writing if we
6291  * need to extend the vma, which helps the VM layer a lot.
6292  */
6293 struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
6294 			unsigned long addr, struct pt_regs *regs)
6295 {
6296 	struct vm_area_struct *vma;
6297 
6298 	if (!get_mmap_lock_carefully(mm, regs))
6299 		return NULL;
6300 
6301 	vma = find_vma(mm, addr);
6302 	if (likely(vma && (vma->vm_start <= addr)))
6303 		return vma;
6304 
6305 	/*
6306 	 * Well, dang. We might still be successful, but only
6307 	 * if we can extend a vma to do so.
6308 	 */
6309 	if (!vma || !(vma->vm_flags & VM_GROWSDOWN)) {
6310 		mmap_read_unlock(mm);
6311 		return NULL;
6312 	}
6313 
6314 	/*
6315 	 * We can try to upgrade the mmap lock atomically,
6316 	 * in which case we can continue to use the vma
6317 	 * we already looked up.
6318 	 *
6319 	 * Otherwise we'll have to drop the mmap lock and
6320 	 * re-take it, and also look up the vma again,
6321 	 * re-checking it.
6322 	 */
6323 	if (!mmap_upgrade_trylock(mm)) {
6324 		if (!upgrade_mmap_lock_carefully(mm, regs))
6325 			return NULL;
6326 
6327 		vma = find_vma(mm, addr);
6328 		if (!vma)
6329 			goto fail;
6330 		if (vma->vm_start <= addr)
6331 			goto success;
6332 		if (!(vma->vm_flags & VM_GROWSDOWN))
6333 			goto fail;
6334 	}
6335 
6336 	if (expand_stack_locked(vma, addr))
6337 		goto fail;
6338 
6339 success:
6340 	mmap_write_downgrade(mm);
6341 	return vma;
6342 
6343 fail:
6344 	mmap_write_unlock(mm);
6345 	return NULL;
6346 }
6347 #endif
6348 
6349 #ifdef CONFIG_PER_VMA_LOCK
6350 /*
6351  * Lookup and lock a VMA under RCU protection. Returned VMA is guaranteed to be
6352  * stable and not isolated. If the VMA is not found or is being modified the
6353  * function returns NULL.
6354  */
6355 struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
6356 					  unsigned long address)
6357 {
6358 	MA_STATE(mas, &mm->mm_mt, address, address);
6359 	struct vm_area_struct *vma;
6360 
6361 	rcu_read_lock();
6362 retry:
6363 	vma = mas_walk(&mas);
6364 	if (!vma)
6365 		goto inval;
6366 
6367 	if (!vma_start_read(vma))
6368 		goto inval;
6369 
6370 	/* Check if the VMA got isolated after we found it */
6371 	if (vma->detached) {
6372 		vma_end_read(vma);
6373 		count_vm_vma_lock_event(VMA_LOCK_MISS);
6374 		/* The area was replaced with another one */
6375 		goto retry;
6376 	}
6377 	/*
6378 	 * At this point, we have a stable reference to a VMA: The VMA is
6379 	 * locked and we know it hasn't already been isolated.
6380 	 * From here on, we can access the VMA without worrying about which
6381 	 * fields are accessible for RCU readers.
6382 	 */
6383 
6384 	/* Check since vm_start/vm_end might change before we lock the VMA */
6385 	if (unlikely(address < vma->vm_start || address >= vma->vm_end))
6386 		goto inval_end_read;
6387 
6388 	rcu_read_unlock();
6389 	return vma;
6390 
6391 inval_end_read:
6392 	vma_end_read(vma);
6393 inval:
6394 	rcu_read_unlock();
6395 	count_vm_vma_lock_event(VMA_LOCK_ABORT);
6396 	return NULL;
6397 }
6398 #endif /* CONFIG_PER_VMA_LOCK */
6399 
6400 #ifndef __PAGETABLE_P4D_FOLDED
6401 /*
6402  * Allocate p4d page table.
6403  * We've already handled the fast-path in-line.
6404  */
6405 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
6406 {
6407 	p4d_t *new = p4d_alloc_one(mm, address);
6408 	if (!new)
6409 		return -ENOMEM;
6410 
6411 	spin_lock(&mm->page_table_lock);
6412 	if (pgd_present(*pgd)) {	/* Another has populated it */
6413 		p4d_free(mm, new);
6414 	} else {
6415 		smp_wmb(); /* See comment in pmd_install() */
6416 		pgd_populate(mm, pgd, new);
6417 	}
6418 	spin_unlock(&mm->page_table_lock);
6419 	return 0;
6420 }
6421 #endif /* __PAGETABLE_P4D_FOLDED */
6422 
6423 #ifndef __PAGETABLE_PUD_FOLDED
6424 /*
6425  * Allocate page upper directory.
6426  * We've already handled the fast-path in-line.
6427  */
6428 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
6429 {
6430 	pud_t *new = pud_alloc_one(mm, address);
6431 	if (!new)
6432 		return -ENOMEM;
6433 
6434 	spin_lock(&mm->page_table_lock);
6435 	if (!p4d_present(*p4d)) {
6436 		mm_inc_nr_puds(mm);
6437 		smp_wmb(); /* See comment in pmd_install() */
6438 		p4d_populate(mm, p4d, new);
6439 	} else	/* Another has populated it */
6440 		pud_free(mm, new);
6441 	spin_unlock(&mm->page_table_lock);
6442 	return 0;
6443 }
6444 #endif /* __PAGETABLE_PUD_FOLDED */
6445 
6446 #ifndef __PAGETABLE_PMD_FOLDED
6447 /*
6448  * Allocate page middle directory.
6449  * We've already handled the fast-path in-line.
6450  */
6451 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
6452 {
6453 	spinlock_t *ptl;
6454 	pmd_t *new = pmd_alloc_one(mm, address);
6455 	if (!new)
6456 		return -ENOMEM;
6457 
6458 	ptl = pud_lock(mm, pud);
6459 	if (!pud_present(*pud)) {
6460 		mm_inc_nr_pmds(mm);
6461 		smp_wmb(); /* See comment in pmd_install() */
6462 		pud_populate(mm, pud, new);
6463 	} else {	/* Another has populated it */
6464 		pmd_free(mm, new);
6465 	}
6466 	spin_unlock(ptl);
6467 	return 0;
6468 }
6469 #endif /* __PAGETABLE_PMD_FOLDED */
6470 
6471 static inline void pfnmap_args_setup(struct follow_pfnmap_args *args,
6472 				     spinlock_t *lock, pte_t *ptep,
6473 				     pgprot_t pgprot, unsigned long pfn_base,
6474 				     unsigned long addr_mask, bool writable,
6475 				     bool special)
6476 {
6477 	args->lock = lock;
6478 	args->ptep = ptep;
6479 	args->pfn = pfn_base + ((args->address & ~addr_mask) >> PAGE_SHIFT);
6480 	args->pgprot = pgprot;
6481 	args->writable = writable;
6482 	args->special = special;
6483 }
6484 
6485 static inline void pfnmap_lockdep_assert(struct vm_area_struct *vma)
6486 {
6487 #ifdef CONFIG_LOCKDEP
6488 	struct file *file = vma->vm_file;
6489 	struct address_space *mapping = file ? file->f_mapping : NULL;
6490 
6491 	if (mapping)
6492 		lockdep_assert(lockdep_is_held(&mapping->i_mmap_rwsem) ||
6493 			       lockdep_is_held(&vma->vm_mm->mmap_lock));
6494 	else
6495 		lockdep_assert(lockdep_is_held(&vma->vm_mm->mmap_lock));
6496 #endif
6497 }
6498 
6499 /**
6500  * follow_pfnmap_start() - Look up a pfn mapping at a user virtual address
6501  * @args: Pointer to struct @follow_pfnmap_args
6502  *
6503  * The caller needs to setup args->vma and args->address to point to the
6504  * virtual address as the target of such lookup.  On a successful return,
6505  * the results will be put into other output fields.
6506  *
6507  * After the caller finished using the fields, the caller must invoke
6508  * another follow_pfnmap_end() to proper releases the locks and resources
6509  * of such look up request.
6510  *
6511  * During the start() and end() calls, the results in @args will be valid
6512  * as proper locks will be held.  After the end() is called, all the fields
6513  * in @follow_pfnmap_args will be invalid to be further accessed.  Further
6514  * use of such information after end() may require proper synchronizations
6515  * by the caller with page table updates, otherwise it can create a
6516  * security bug.
6517  *
6518  * If the PTE maps a refcounted page, callers are responsible to protect
6519  * against invalidation with MMU notifiers; otherwise access to the PFN at
6520  * a later point in time can trigger use-after-free.
6521  *
6522  * Only IO mappings and raw PFN mappings are allowed.  The mmap semaphore
6523  * should be taken for read, and the mmap semaphore cannot be released
6524  * before the end() is invoked.
6525  *
6526  * This function must not be used to modify PTE content.
6527  *
6528  * Return: zero on success, negative otherwise.
6529  */
6530 int follow_pfnmap_start(struct follow_pfnmap_args *args)
6531 {
6532 	struct vm_area_struct *vma = args->vma;
6533 	unsigned long address = args->address;
6534 	struct mm_struct *mm = vma->vm_mm;
6535 	spinlock_t *lock;
6536 	pgd_t *pgdp;
6537 	p4d_t *p4dp, p4d;
6538 	pud_t *pudp, pud;
6539 	pmd_t *pmdp, pmd;
6540 	pte_t *ptep, pte;
6541 
6542 	pfnmap_lockdep_assert(vma);
6543 
6544 	if (unlikely(address < vma->vm_start || address >= vma->vm_end))
6545 		goto out;
6546 
6547 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
6548 		goto out;
6549 retry:
6550 	pgdp = pgd_offset(mm, address);
6551 	if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
6552 		goto out;
6553 
6554 	p4dp = p4d_offset(pgdp, address);
6555 	p4d = READ_ONCE(*p4dp);
6556 	if (p4d_none(p4d) || unlikely(p4d_bad(p4d)))
6557 		goto out;
6558 
6559 	pudp = pud_offset(p4dp, address);
6560 	pud = READ_ONCE(*pudp);
6561 	if (pud_none(pud))
6562 		goto out;
6563 	if (pud_leaf(pud)) {
6564 		lock = pud_lock(mm, pudp);
6565 		if (!unlikely(pud_leaf(pud))) {
6566 			spin_unlock(lock);
6567 			goto retry;
6568 		}
6569 		pfnmap_args_setup(args, lock, NULL, pud_pgprot(pud),
6570 				  pud_pfn(pud), PUD_MASK, pud_write(pud),
6571 				  pud_special(pud));
6572 		return 0;
6573 	}
6574 
6575 	pmdp = pmd_offset(pudp, address);
6576 	pmd = pmdp_get_lockless(pmdp);
6577 	if (pmd_leaf(pmd)) {
6578 		lock = pmd_lock(mm, pmdp);
6579 		if (!unlikely(pmd_leaf(pmd))) {
6580 			spin_unlock(lock);
6581 			goto retry;
6582 		}
6583 		pfnmap_args_setup(args, lock, NULL, pmd_pgprot(pmd),
6584 				  pmd_pfn(pmd), PMD_MASK, pmd_write(pmd),
6585 				  pmd_special(pmd));
6586 		return 0;
6587 	}
6588 
6589 	ptep = pte_offset_map_lock(mm, pmdp, address, &lock);
6590 	if (!ptep)
6591 		goto out;
6592 	pte = ptep_get(ptep);
6593 	if (!pte_present(pte))
6594 		goto unlock;
6595 	pfnmap_args_setup(args, lock, ptep, pte_pgprot(pte),
6596 			  pte_pfn(pte), PAGE_MASK, pte_write(pte),
6597 			  pte_special(pte));
6598 	return 0;
6599 unlock:
6600 	pte_unmap_unlock(ptep, lock);
6601 out:
6602 	return -EINVAL;
6603 }
6604 EXPORT_SYMBOL_GPL(follow_pfnmap_start);
6605 
6606 /**
6607  * follow_pfnmap_end(): End a follow_pfnmap_start() process
6608  * @args: Pointer to struct @follow_pfnmap_args
6609  *
6610  * Must be used in pair of follow_pfnmap_start().  See the start() function
6611  * above for more information.
6612  */
6613 void follow_pfnmap_end(struct follow_pfnmap_args *args)
6614 {
6615 	if (args->lock)
6616 		spin_unlock(args->lock);
6617 	if (args->ptep)
6618 		pte_unmap(args->ptep);
6619 }
6620 EXPORT_SYMBOL_GPL(follow_pfnmap_end);
6621 
6622 #ifdef CONFIG_HAVE_IOREMAP_PROT
6623 /**
6624  * generic_access_phys - generic implementation for iomem mmap access
6625  * @vma: the vma to access
6626  * @addr: userspace address, not relative offset within @vma
6627  * @buf: buffer to read/write
6628  * @len: length of transfer
6629  * @write: set to FOLL_WRITE when writing, otherwise reading
6630  *
6631  * This is a generic implementation for &vm_operations_struct.access for an
6632  * iomem mapping. This callback is used by access_process_vm() when the @vma is
6633  * not page based.
6634  */
6635 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
6636 			void *buf, int len, int write)
6637 {
6638 	resource_size_t phys_addr;
6639 	unsigned long prot = 0;
6640 	void __iomem *maddr;
6641 	int offset = offset_in_page(addr);
6642 	int ret = -EINVAL;
6643 	bool writable;
6644 	struct follow_pfnmap_args args = { .vma = vma, .address = addr };
6645 
6646 retry:
6647 	if (follow_pfnmap_start(&args))
6648 		return -EINVAL;
6649 	prot = pgprot_val(args.pgprot);
6650 	phys_addr = (resource_size_t)args.pfn << PAGE_SHIFT;
6651 	writable = args.writable;
6652 	follow_pfnmap_end(&args);
6653 
6654 	if ((write & FOLL_WRITE) && !writable)
6655 		return -EINVAL;
6656 
6657 	maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
6658 	if (!maddr)
6659 		return -ENOMEM;
6660 
6661 	if (follow_pfnmap_start(&args))
6662 		goto out_unmap;
6663 
6664 	if ((prot != pgprot_val(args.pgprot)) ||
6665 	    (phys_addr != (args.pfn << PAGE_SHIFT)) ||
6666 	    (writable != args.writable)) {
6667 		follow_pfnmap_end(&args);
6668 		iounmap(maddr);
6669 		goto retry;
6670 	}
6671 
6672 	if (write)
6673 		memcpy_toio(maddr + offset, buf, len);
6674 	else
6675 		memcpy_fromio(buf, maddr + offset, len);
6676 	ret = len;
6677 	follow_pfnmap_end(&args);
6678 out_unmap:
6679 	iounmap(maddr);
6680 
6681 	return ret;
6682 }
6683 EXPORT_SYMBOL_GPL(generic_access_phys);
6684 #endif
6685 
6686 /*
6687  * Access another process' address space as given in mm.
6688  */
6689 static int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
6690 			      void *buf, int len, unsigned int gup_flags)
6691 {
6692 	void *old_buf = buf;
6693 	int write = gup_flags & FOLL_WRITE;
6694 
6695 	if (mmap_read_lock_killable(mm))
6696 		return 0;
6697 
6698 	/* Untag the address before looking up the VMA */
6699 	addr = untagged_addr_remote(mm, addr);
6700 
6701 	/* Avoid triggering the temporary warning in __get_user_pages */
6702 	if (!vma_lookup(mm, addr) && !expand_stack(mm, addr))
6703 		return 0;
6704 
6705 	/* ignore errors, just check how much was successfully transferred */
6706 	while (len) {
6707 		int bytes, offset;
6708 		void *maddr;
6709 		struct vm_area_struct *vma = NULL;
6710 		struct page *page = get_user_page_vma_remote(mm, addr,
6711 							     gup_flags, &vma);
6712 
6713 		if (IS_ERR(page)) {
6714 			/* We might need to expand the stack to access it */
6715 			vma = vma_lookup(mm, addr);
6716 			if (!vma) {
6717 				vma = expand_stack(mm, addr);
6718 
6719 				/* mmap_lock was dropped on failure */
6720 				if (!vma)
6721 					return buf - old_buf;
6722 
6723 				/* Try again if stack expansion worked */
6724 				continue;
6725 			}
6726 
6727 			/*
6728 			 * Check if this is a VM_IO | VM_PFNMAP VMA, which
6729 			 * we can access using slightly different code.
6730 			 */
6731 			bytes = 0;
6732 #ifdef CONFIG_HAVE_IOREMAP_PROT
6733 			if (vma->vm_ops && vma->vm_ops->access)
6734 				bytes = vma->vm_ops->access(vma, addr, buf,
6735 							    len, write);
6736 #endif
6737 			if (bytes <= 0)
6738 				break;
6739 		} else {
6740 			bytes = len;
6741 			offset = addr & (PAGE_SIZE-1);
6742 			if (bytes > PAGE_SIZE-offset)
6743 				bytes = PAGE_SIZE-offset;
6744 
6745 			maddr = kmap_local_page(page);
6746 			if (write) {
6747 				copy_to_user_page(vma, page, addr,
6748 						  maddr + offset, buf, bytes);
6749 				set_page_dirty_lock(page);
6750 			} else {
6751 				copy_from_user_page(vma, page, addr,
6752 						    buf, maddr + offset, bytes);
6753 			}
6754 			unmap_and_put_page(page, maddr);
6755 		}
6756 		len -= bytes;
6757 		buf += bytes;
6758 		addr += bytes;
6759 	}
6760 	mmap_read_unlock(mm);
6761 
6762 	return buf - old_buf;
6763 }
6764 
6765 /**
6766  * access_remote_vm - access another process' address space
6767  * @mm:		the mm_struct of the target address space
6768  * @addr:	start address to access
6769  * @buf:	source or destination buffer
6770  * @len:	number of bytes to transfer
6771  * @gup_flags:	flags modifying lookup behaviour
6772  *
6773  * The caller must hold a reference on @mm.
6774  *
6775  * Return: number of bytes copied from source to destination.
6776  */
6777 int access_remote_vm(struct mm_struct *mm, unsigned long addr,
6778 		void *buf, int len, unsigned int gup_flags)
6779 {
6780 	return __access_remote_vm(mm, addr, buf, len, gup_flags);
6781 }
6782 
6783 /*
6784  * Access another process' address space.
6785  * Source/target buffer must be kernel space,
6786  * Do not walk the page table directly, use get_user_pages
6787  */
6788 int access_process_vm(struct task_struct *tsk, unsigned long addr,
6789 		void *buf, int len, unsigned int gup_flags)
6790 {
6791 	struct mm_struct *mm;
6792 	int ret;
6793 
6794 	mm = get_task_mm(tsk);
6795 	if (!mm)
6796 		return 0;
6797 
6798 	ret = __access_remote_vm(mm, addr, buf, len, gup_flags);
6799 
6800 	mmput(mm);
6801 
6802 	return ret;
6803 }
6804 EXPORT_SYMBOL_GPL(access_process_vm);
6805 
6806 /*
6807  * Print the name of a VMA.
6808  */
6809 void print_vma_addr(char *prefix, unsigned long ip)
6810 {
6811 	struct mm_struct *mm = current->mm;
6812 	struct vm_area_struct *vma;
6813 
6814 	/*
6815 	 * we might be running from an atomic context so we cannot sleep
6816 	 */
6817 	if (!mmap_read_trylock(mm))
6818 		return;
6819 
6820 	vma = vma_lookup(mm, ip);
6821 	if (vma && vma->vm_file) {
6822 		struct file *f = vma->vm_file;
6823 		ip -= vma->vm_start;
6824 		ip += vma->vm_pgoff << PAGE_SHIFT;
6825 		printk("%s%pD[%lx,%lx+%lx]", prefix, f, ip,
6826 				vma->vm_start,
6827 				vma->vm_end - vma->vm_start);
6828 	}
6829 	mmap_read_unlock(mm);
6830 }
6831 
6832 #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
6833 void __might_fault(const char *file, int line)
6834 {
6835 	if (pagefault_disabled())
6836 		return;
6837 	__might_sleep(file, line);
6838 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
6839 	if (current->mm)
6840 		might_lock_read(&current->mm->mmap_lock);
6841 #endif
6842 }
6843 EXPORT_SYMBOL(__might_fault);
6844 #endif
6845 
6846 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
6847 /*
6848  * Process all subpages of the specified huge page with the specified
6849  * operation.  The target subpage will be processed last to keep its
6850  * cache lines hot.
6851  */
6852 static inline int process_huge_page(
6853 	unsigned long addr_hint, unsigned int nr_pages,
6854 	int (*process_subpage)(unsigned long addr, int idx, void *arg),
6855 	void *arg)
6856 {
6857 	int i, n, base, l, ret;
6858 	unsigned long addr = addr_hint &
6859 		~(((unsigned long)nr_pages << PAGE_SHIFT) - 1);
6860 
6861 	/* Process target subpage last to keep its cache lines hot */
6862 	might_sleep();
6863 	n = (addr_hint - addr) / PAGE_SIZE;
6864 	if (2 * n <= nr_pages) {
6865 		/* If target subpage in first half of huge page */
6866 		base = 0;
6867 		l = n;
6868 		/* Process subpages at the end of huge page */
6869 		for (i = nr_pages - 1; i >= 2 * n; i--) {
6870 			cond_resched();
6871 			ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
6872 			if (ret)
6873 				return ret;
6874 		}
6875 	} else {
6876 		/* If target subpage in second half of huge page */
6877 		base = nr_pages - 2 * (nr_pages - n);
6878 		l = nr_pages - n;
6879 		/* Process subpages at the begin of huge page */
6880 		for (i = 0; i < base; i++) {
6881 			cond_resched();
6882 			ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
6883 			if (ret)
6884 				return ret;
6885 		}
6886 	}
6887 	/*
6888 	 * Process remaining subpages in left-right-left-right pattern
6889 	 * towards the target subpage
6890 	 */
6891 	for (i = 0; i < l; i++) {
6892 		int left_idx = base + i;
6893 		int right_idx = base + 2 * l - 1 - i;
6894 
6895 		cond_resched();
6896 		ret = process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
6897 		if (ret)
6898 			return ret;
6899 		cond_resched();
6900 		ret = process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
6901 		if (ret)
6902 			return ret;
6903 	}
6904 	return 0;
6905 }
6906 
6907 static void clear_gigantic_page(struct folio *folio, unsigned long addr_hint,
6908 				unsigned int nr_pages)
6909 {
6910 	unsigned long addr = ALIGN_DOWN(addr_hint, folio_size(folio));
6911 	int i;
6912 
6913 	might_sleep();
6914 	for (i = 0; i < nr_pages; i++) {
6915 		cond_resched();
6916 		clear_user_highpage(folio_page(folio, i), addr + i * PAGE_SIZE);
6917 	}
6918 }
6919 
6920 static int clear_subpage(unsigned long addr, int idx, void *arg)
6921 {
6922 	struct folio *folio = arg;
6923 
6924 	clear_user_highpage(folio_page(folio, idx), addr);
6925 	return 0;
6926 }
6927 
6928 /**
6929  * folio_zero_user - Zero a folio which will be mapped to userspace.
6930  * @folio: The folio to zero.
6931  * @addr_hint: The address will be accessed or the base address if uncelar.
6932  */
6933 void folio_zero_user(struct folio *folio, unsigned long addr_hint)
6934 {
6935 	unsigned int nr_pages = folio_nr_pages(folio);
6936 
6937 	if (unlikely(nr_pages > MAX_ORDER_NR_PAGES))
6938 		clear_gigantic_page(folio, addr_hint, nr_pages);
6939 	else
6940 		process_huge_page(addr_hint, nr_pages, clear_subpage, folio);
6941 }
6942 
6943 static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
6944 				   unsigned long addr_hint,
6945 				   struct vm_area_struct *vma,
6946 				   unsigned int nr_pages)
6947 {
6948 	unsigned long addr = ALIGN_DOWN(addr_hint, folio_size(dst));
6949 	struct page *dst_page;
6950 	struct page *src_page;
6951 	int i;
6952 
6953 	for (i = 0; i < nr_pages; i++) {
6954 		dst_page = folio_page(dst, i);
6955 		src_page = folio_page(src, i);
6956 
6957 		cond_resched();
6958 		if (copy_mc_user_highpage(dst_page, src_page,
6959 					  addr + i*PAGE_SIZE, vma))
6960 			return -EHWPOISON;
6961 	}
6962 	return 0;
6963 }
6964 
6965 struct copy_subpage_arg {
6966 	struct folio *dst;
6967 	struct folio *src;
6968 	struct vm_area_struct *vma;
6969 };
6970 
6971 static int copy_subpage(unsigned long addr, int idx, void *arg)
6972 {
6973 	struct copy_subpage_arg *copy_arg = arg;
6974 	struct page *dst = folio_page(copy_arg->dst, idx);
6975 	struct page *src = folio_page(copy_arg->src, idx);
6976 
6977 	if (copy_mc_user_highpage(dst, src, addr, copy_arg->vma))
6978 		return -EHWPOISON;
6979 	return 0;
6980 }
6981 
6982 int copy_user_large_folio(struct folio *dst, struct folio *src,
6983 			  unsigned long addr_hint, struct vm_area_struct *vma)
6984 {
6985 	unsigned int nr_pages = folio_nr_pages(dst);
6986 	struct copy_subpage_arg arg = {
6987 		.dst = dst,
6988 		.src = src,
6989 		.vma = vma,
6990 	};
6991 
6992 	if (unlikely(nr_pages > MAX_ORDER_NR_PAGES))
6993 		return copy_user_gigantic_page(dst, src, addr_hint, vma, nr_pages);
6994 
6995 	return process_huge_page(addr_hint, nr_pages, copy_subpage, &arg);
6996 }
6997 
6998 long copy_folio_from_user(struct folio *dst_folio,
6999 			   const void __user *usr_src,
7000 			   bool allow_pagefault)
7001 {
7002 	void *kaddr;
7003 	unsigned long i, rc = 0;
7004 	unsigned int nr_pages = folio_nr_pages(dst_folio);
7005 	unsigned long ret_val = nr_pages * PAGE_SIZE;
7006 	struct page *subpage;
7007 
7008 	for (i = 0; i < nr_pages; i++) {
7009 		subpage = folio_page(dst_folio, i);
7010 		kaddr = kmap_local_page(subpage);
7011 		if (!allow_pagefault)
7012 			pagefault_disable();
7013 		rc = copy_from_user(kaddr, usr_src + i * PAGE_SIZE, PAGE_SIZE);
7014 		if (!allow_pagefault)
7015 			pagefault_enable();
7016 		kunmap_local(kaddr);
7017 
7018 		ret_val -= (PAGE_SIZE - rc);
7019 		if (rc)
7020 			break;
7021 
7022 		flush_dcache_page(subpage);
7023 
7024 		cond_resched();
7025 	}
7026 	return ret_val;
7027 }
7028 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
7029 
7030 #if defined(CONFIG_SPLIT_PTE_PTLOCKS) && ALLOC_SPLIT_PTLOCKS
7031 
7032 static struct kmem_cache *page_ptl_cachep;
7033 
7034 void __init ptlock_cache_init(void)
7035 {
7036 	page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
7037 			SLAB_PANIC, NULL);
7038 }
7039 
7040 bool ptlock_alloc(struct ptdesc *ptdesc)
7041 {
7042 	spinlock_t *ptl;
7043 
7044 	ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
7045 	if (!ptl)
7046 		return false;
7047 	ptdesc->ptl = ptl;
7048 	return true;
7049 }
7050 
7051 void ptlock_free(struct ptdesc *ptdesc)
7052 {
7053 	if (ptdesc->ptl)
7054 		kmem_cache_free(page_ptl_cachep, ptdesc->ptl);
7055 }
7056 #endif
7057 
7058 void vma_pgtable_walk_begin(struct vm_area_struct *vma)
7059 {
7060 	if (is_vm_hugetlb_page(vma))
7061 		hugetlb_vma_lock_read(vma);
7062 }
7063 
7064 void vma_pgtable_walk_end(struct vm_area_struct *vma)
7065 {
7066 	if (is_vm_hugetlb_page(vma))
7067 		hugetlb_vma_unlock_read(vma);
7068 }
7069