xref: /linux/mm/memory.c (revision 38c6104e0bc7c8af20ab4897cb0504e3339e4fe4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/memory.c
4  *
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  */
7 
8 /*
9  * demand-loading started 01.12.91 - seems it is high on the list of
10  * things wanted, and it should be easy to implement. - Linus
11  */
12 
13 /*
14  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
15  * pages started 02.12.91, seems to work. - Linus.
16  *
17  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
18  * would have taken more than the 6M I have free, but it worked well as
19  * far as I could see.
20  *
21  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
22  */
23 
24 /*
25  * Real VM (paging to/from disk) started 18.12.91. Much more work and
26  * thought has to go into this. Oh, well..
27  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
28  *		Found it. Everything seems to work now.
29  * 20.12.91  -  Ok, making the swap-device changeable like the root.
30  */
31 
32 /*
33  * 05.04.94  -  Multi-page memory management added for v1.1.
34  *              Idea by Alex Bligh (alex@cconcepts.co.uk)
35  *
36  * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
37  *		(Gerhard.Wichert@pdb.siemens.de)
38  *
39  * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
40  */
41 
42 #include <linux/kernel_stat.h>
43 #include <linux/mm.h>
44 #include <linux/mm_inline.h>
45 #include <linux/sched/mm.h>
46 #include <linux/sched/numa_balancing.h>
47 #include <linux/sched/task.h>
48 #include <linux/hugetlb.h>
49 #include <linux/mman.h>
50 #include <linux/swap.h>
51 #include <linux/highmem.h>
52 #include <linux/pagemap.h>
53 #include <linux/memremap.h>
54 #include <linux/kmsan.h>
55 #include <linux/ksm.h>
56 #include <linux/rmap.h>
57 #include <linux/export.h>
58 #include <linux/delayacct.h>
59 #include <linux/init.h>
60 #include <linux/pfn_t.h>
61 #include <linux/writeback.h>
62 #include <linux/memcontrol.h>
63 #include <linux/mmu_notifier.h>
64 #include <linux/swapops.h>
65 #include <linux/elf.h>
66 #include <linux/gfp.h>
67 #include <linux/migrate.h>
68 #include <linux/string.h>
69 #include <linux/memory-tiers.h>
70 #include <linux/debugfs.h>
71 #include <linux/userfaultfd_k.h>
72 #include <linux/dax.h>
73 #include <linux/oom.h>
74 #include <linux/numa.h>
75 #include <linux/perf_event.h>
76 #include <linux/ptrace.h>
77 #include <linux/vmalloc.h>
78 #include <linux/sched/sysctl.h>
79 #include <linux/fsnotify.h>
80 
81 #include <trace/events/kmem.h>
82 
83 #include <asm/io.h>
84 #include <asm/mmu_context.h>
85 #include <asm/pgalloc.h>
86 #include <linux/uaccess.h>
87 #include <asm/tlb.h>
88 #include <asm/tlbflush.h>
89 
90 #include "pgalloc-track.h"
91 #include "internal.h"
92 #include "swap.h"
93 
94 #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
95 #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
96 #endif
97 
98 #ifndef CONFIG_NUMA
99 unsigned long max_mapnr;
100 EXPORT_SYMBOL(max_mapnr);
101 
102 struct page *mem_map;
103 EXPORT_SYMBOL(mem_map);
104 #endif
105 
106 static vm_fault_t do_fault(struct vm_fault *vmf);
107 static vm_fault_t do_anonymous_page(struct vm_fault *vmf);
108 static bool vmf_pte_changed(struct vm_fault *vmf);
109 
110 /*
111  * Return true if the original pte was a uffd-wp pte marker (so the pte was
112  * wr-protected).
113  */
114 static __always_inline bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf)
115 {
116 	if (!userfaultfd_wp(vmf->vma))
117 		return false;
118 	if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID))
119 		return false;
120 
121 	return pte_marker_uffd_wp(vmf->orig_pte);
122 }
123 
124 /*
125  * A number of key systems in x86 including ioremap() rely on the assumption
126  * that high_memory defines the upper bound on direct map memory, then end
127  * of ZONE_NORMAL.
128  */
129 void *high_memory;
130 EXPORT_SYMBOL(high_memory);
131 
132 /*
133  * Randomize the address space (stacks, mmaps, brk, etc.).
134  *
135  * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
136  *   as ancient (libc5 based) binaries can segfault. )
137  */
138 int randomize_va_space __read_mostly =
139 #ifdef CONFIG_COMPAT_BRK
140 					1;
141 #else
142 					2;
143 #endif
144 
145 #ifndef arch_wants_old_prefaulted_pte
146 static inline bool arch_wants_old_prefaulted_pte(void)
147 {
148 	/*
149 	 * Transitioning a PTE from 'old' to 'young' can be expensive on
150 	 * some architectures, even if it's performed in hardware. By
151 	 * default, "false" means prefaulted entries will be 'young'.
152 	 */
153 	return false;
154 }
155 #endif
156 
157 static int __init disable_randmaps(char *s)
158 {
159 	randomize_va_space = 0;
160 	return 1;
161 }
162 __setup("norandmaps", disable_randmaps);
163 
164 unsigned long zero_pfn __read_mostly;
165 EXPORT_SYMBOL(zero_pfn);
166 
167 unsigned long highest_memmap_pfn __read_mostly;
168 
169 /*
170  * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
171  */
172 static int __init init_zero_pfn(void)
173 {
174 	zero_pfn = page_to_pfn(ZERO_PAGE(0));
175 	return 0;
176 }
177 early_initcall(init_zero_pfn);
178 
179 void mm_trace_rss_stat(struct mm_struct *mm, int member)
180 {
181 	trace_rss_stat(mm, member);
182 }
183 
184 /*
185  * Note: this doesn't free the actual pages themselves. That
186  * has been handled earlier when unmapping all the memory regions.
187  */
188 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
189 			   unsigned long addr)
190 {
191 	pgtable_t token = pmd_pgtable(*pmd);
192 	pmd_clear(pmd);
193 	pte_free_tlb(tlb, token, addr);
194 	mm_dec_nr_ptes(tlb->mm);
195 }
196 
197 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
198 				unsigned long addr, unsigned long end,
199 				unsigned long floor, unsigned long ceiling)
200 {
201 	pmd_t *pmd;
202 	unsigned long next;
203 	unsigned long start;
204 
205 	start = addr;
206 	pmd = pmd_offset(pud, addr);
207 	do {
208 		next = pmd_addr_end(addr, end);
209 		if (pmd_none_or_clear_bad(pmd))
210 			continue;
211 		free_pte_range(tlb, pmd, addr);
212 	} while (pmd++, addr = next, addr != end);
213 
214 	start &= PUD_MASK;
215 	if (start < floor)
216 		return;
217 	if (ceiling) {
218 		ceiling &= PUD_MASK;
219 		if (!ceiling)
220 			return;
221 	}
222 	if (end - 1 > ceiling - 1)
223 		return;
224 
225 	pmd = pmd_offset(pud, start);
226 	pud_clear(pud);
227 	pmd_free_tlb(tlb, pmd, start);
228 	mm_dec_nr_pmds(tlb->mm);
229 }
230 
231 static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
232 				unsigned long addr, unsigned long end,
233 				unsigned long floor, unsigned long ceiling)
234 {
235 	pud_t *pud;
236 	unsigned long next;
237 	unsigned long start;
238 
239 	start = addr;
240 	pud = pud_offset(p4d, addr);
241 	do {
242 		next = pud_addr_end(addr, end);
243 		if (pud_none_or_clear_bad(pud))
244 			continue;
245 		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
246 	} while (pud++, addr = next, addr != end);
247 
248 	start &= P4D_MASK;
249 	if (start < floor)
250 		return;
251 	if (ceiling) {
252 		ceiling &= P4D_MASK;
253 		if (!ceiling)
254 			return;
255 	}
256 	if (end - 1 > ceiling - 1)
257 		return;
258 
259 	pud = pud_offset(p4d, start);
260 	p4d_clear(p4d);
261 	pud_free_tlb(tlb, pud, start);
262 	mm_dec_nr_puds(tlb->mm);
263 }
264 
265 static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
266 				unsigned long addr, unsigned long end,
267 				unsigned long floor, unsigned long ceiling)
268 {
269 	p4d_t *p4d;
270 	unsigned long next;
271 	unsigned long start;
272 
273 	start = addr;
274 	p4d = p4d_offset(pgd, addr);
275 	do {
276 		next = p4d_addr_end(addr, end);
277 		if (p4d_none_or_clear_bad(p4d))
278 			continue;
279 		free_pud_range(tlb, p4d, addr, next, floor, ceiling);
280 	} while (p4d++, addr = next, addr != end);
281 
282 	start &= PGDIR_MASK;
283 	if (start < floor)
284 		return;
285 	if (ceiling) {
286 		ceiling &= PGDIR_MASK;
287 		if (!ceiling)
288 			return;
289 	}
290 	if (end - 1 > ceiling - 1)
291 		return;
292 
293 	p4d = p4d_offset(pgd, start);
294 	pgd_clear(pgd);
295 	p4d_free_tlb(tlb, p4d, start);
296 }
297 
298 /*
299  * This function frees user-level page tables of a process.
300  */
301 void free_pgd_range(struct mmu_gather *tlb,
302 			unsigned long addr, unsigned long end,
303 			unsigned long floor, unsigned long ceiling)
304 {
305 	pgd_t *pgd;
306 	unsigned long next;
307 
308 	/*
309 	 * The next few lines have given us lots of grief...
310 	 *
311 	 * Why are we testing PMD* at this top level?  Because often
312 	 * there will be no work to do at all, and we'd prefer not to
313 	 * go all the way down to the bottom just to discover that.
314 	 *
315 	 * Why all these "- 1"s?  Because 0 represents both the bottom
316 	 * of the address space and the top of it (using -1 for the
317 	 * top wouldn't help much: the masks would do the wrong thing).
318 	 * The rule is that addr 0 and floor 0 refer to the bottom of
319 	 * the address space, but end 0 and ceiling 0 refer to the top
320 	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
321 	 * that end 0 case should be mythical).
322 	 *
323 	 * Wherever addr is brought up or ceiling brought down, we must
324 	 * be careful to reject "the opposite 0" before it confuses the
325 	 * subsequent tests.  But what about where end is brought down
326 	 * by PMD_SIZE below? no, end can't go down to 0 there.
327 	 *
328 	 * Whereas we round start (addr) and ceiling down, by different
329 	 * masks at different levels, in order to test whether a table
330 	 * now has no other vmas using it, so can be freed, we don't
331 	 * bother to round floor or end up - the tests don't need that.
332 	 */
333 
334 	addr &= PMD_MASK;
335 	if (addr < floor) {
336 		addr += PMD_SIZE;
337 		if (!addr)
338 			return;
339 	}
340 	if (ceiling) {
341 		ceiling &= PMD_MASK;
342 		if (!ceiling)
343 			return;
344 	}
345 	if (end - 1 > ceiling - 1)
346 		end -= PMD_SIZE;
347 	if (addr > end - 1)
348 		return;
349 	/*
350 	 * We add page table cache pages with PAGE_SIZE,
351 	 * (see pte_free_tlb()), flush the tlb if we need
352 	 */
353 	tlb_change_page_size(tlb, PAGE_SIZE);
354 	pgd = pgd_offset(tlb->mm, addr);
355 	do {
356 		next = pgd_addr_end(addr, end);
357 		if (pgd_none_or_clear_bad(pgd))
358 			continue;
359 		free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
360 	} while (pgd++, addr = next, addr != end);
361 }
362 
363 void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
364 		   struct vm_area_struct *vma, unsigned long floor,
365 		   unsigned long ceiling, bool mm_wr_locked)
366 {
367 	struct unlink_vma_file_batch vb;
368 
369 	do {
370 		unsigned long addr = vma->vm_start;
371 		struct vm_area_struct *next;
372 
373 		/*
374 		 * Note: USER_PGTABLES_CEILING may be passed as ceiling and may
375 		 * be 0.  This will underflow and is okay.
376 		 */
377 		next = mas_find(mas, ceiling - 1);
378 		if (unlikely(xa_is_zero(next)))
379 			next = NULL;
380 
381 		/*
382 		 * Hide vma from rmap and truncate_pagecache before freeing
383 		 * pgtables
384 		 */
385 		if (mm_wr_locked)
386 			vma_start_write(vma);
387 		unlink_anon_vmas(vma);
388 
389 		if (is_vm_hugetlb_page(vma)) {
390 			unlink_file_vma(vma);
391 			hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
392 				floor, next ? next->vm_start : ceiling);
393 		} else {
394 			unlink_file_vma_batch_init(&vb);
395 			unlink_file_vma_batch_add(&vb, vma);
396 
397 			/*
398 			 * Optimization: gather nearby vmas into one call down
399 			 */
400 			while (next && next->vm_start <= vma->vm_end + PMD_SIZE
401 			       && !is_vm_hugetlb_page(next)) {
402 				vma = next;
403 				next = mas_find(mas, ceiling - 1);
404 				if (unlikely(xa_is_zero(next)))
405 					next = NULL;
406 				if (mm_wr_locked)
407 					vma_start_write(vma);
408 				unlink_anon_vmas(vma);
409 				unlink_file_vma_batch_add(&vb, vma);
410 			}
411 			unlink_file_vma_batch_final(&vb);
412 			free_pgd_range(tlb, addr, vma->vm_end,
413 				floor, next ? next->vm_start : ceiling);
414 		}
415 		vma = next;
416 	} while (vma);
417 }
418 
419 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
420 {
421 	spinlock_t *ptl = pmd_lock(mm, pmd);
422 
423 	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
424 		mm_inc_nr_ptes(mm);
425 		/*
426 		 * Ensure all pte setup (eg. pte page lock and page clearing) are
427 		 * visible before the pte is made visible to other CPUs by being
428 		 * put into page tables.
429 		 *
430 		 * The other side of the story is the pointer chasing in the page
431 		 * table walking code (when walking the page table without locking;
432 		 * ie. most of the time). Fortunately, these data accesses consist
433 		 * of a chain of data-dependent loads, meaning most CPUs (alpha
434 		 * being the notable exception) will already guarantee loads are
435 		 * seen in-order. See the alpha page table accessors for the
436 		 * smp_rmb() barriers in page table walking code.
437 		 */
438 		smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
439 		pmd_populate(mm, pmd, *pte);
440 		*pte = NULL;
441 	}
442 	spin_unlock(ptl);
443 }
444 
445 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
446 {
447 	pgtable_t new = pte_alloc_one(mm);
448 	if (!new)
449 		return -ENOMEM;
450 
451 	pmd_install(mm, pmd, &new);
452 	if (new)
453 		pte_free(mm, new);
454 	return 0;
455 }
456 
457 int __pte_alloc_kernel(pmd_t *pmd)
458 {
459 	pte_t *new = pte_alloc_one_kernel(&init_mm);
460 	if (!new)
461 		return -ENOMEM;
462 
463 	spin_lock(&init_mm.page_table_lock);
464 	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
465 		smp_wmb(); /* See comment in pmd_install() */
466 		pmd_populate_kernel(&init_mm, pmd, new);
467 		new = NULL;
468 	}
469 	spin_unlock(&init_mm.page_table_lock);
470 	if (new)
471 		pte_free_kernel(&init_mm, new);
472 	return 0;
473 }
474 
475 static inline void init_rss_vec(int *rss)
476 {
477 	memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
478 }
479 
480 static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
481 {
482 	int i;
483 
484 	for (i = 0; i < NR_MM_COUNTERS; i++)
485 		if (rss[i])
486 			add_mm_counter(mm, i, rss[i]);
487 }
488 
489 /*
490  * This function is called to print an error when a bad pte
491  * is found. For example, we might have a PFN-mapped pte in
492  * a region that doesn't allow it.
493  *
494  * The calling function must still handle the error.
495  */
496 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
497 			  pte_t pte, struct page *page)
498 {
499 	pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
500 	p4d_t *p4d = p4d_offset(pgd, addr);
501 	pud_t *pud = pud_offset(p4d, addr);
502 	pmd_t *pmd = pmd_offset(pud, addr);
503 	struct address_space *mapping;
504 	pgoff_t index;
505 	static unsigned long resume;
506 	static unsigned long nr_shown;
507 	static unsigned long nr_unshown;
508 
509 	/*
510 	 * Allow a burst of 60 reports, then keep quiet for that minute;
511 	 * or allow a steady drip of one report per second.
512 	 */
513 	if (nr_shown == 60) {
514 		if (time_before(jiffies, resume)) {
515 			nr_unshown++;
516 			return;
517 		}
518 		if (nr_unshown) {
519 			pr_alert("BUG: Bad page map: %lu messages suppressed\n",
520 				 nr_unshown);
521 			nr_unshown = 0;
522 		}
523 		nr_shown = 0;
524 	}
525 	if (nr_shown++ == 0)
526 		resume = jiffies + 60 * HZ;
527 
528 	mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
529 	index = linear_page_index(vma, addr);
530 
531 	pr_alert("BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
532 		 current->comm,
533 		 (long long)pte_val(pte), (long long)pmd_val(*pmd));
534 	if (page)
535 		dump_page(page, "bad pte");
536 	pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
537 		 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
538 	pr_alert("file:%pD fault:%ps mmap:%ps read_folio:%ps\n",
539 		 vma->vm_file,
540 		 vma->vm_ops ? vma->vm_ops->fault : NULL,
541 		 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
542 		 mapping ? mapping->a_ops->read_folio : NULL);
543 	dump_stack();
544 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
545 }
546 
547 /*
548  * vm_normal_page -- This function gets the "struct page" associated with a pte.
549  *
550  * "Special" mappings do not wish to be associated with a "struct page" (either
551  * it doesn't exist, or it exists but they don't want to touch it). In this
552  * case, NULL is returned here. "Normal" mappings do have a struct page.
553  *
554  * There are 2 broad cases. Firstly, an architecture may define a pte_special()
555  * pte bit, in which case this function is trivial. Secondly, an architecture
556  * may not have a spare pte bit, which requires a more complicated scheme,
557  * described below.
558  *
559  * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
560  * special mapping (even if there are underlying and valid "struct pages").
561  * COWed pages of a VM_PFNMAP are always normal.
562  *
563  * The way we recognize COWed pages within VM_PFNMAP mappings is through the
564  * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
565  * set, and the vm_pgoff will point to the first PFN mapped: thus every special
566  * mapping will always honor the rule
567  *
568  *	pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
569  *
570  * And for normal mappings this is false.
571  *
572  * This restricts such mappings to be a linear translation from virtual address
573  * to pfn. To get around this restriction, we allow arbitrary mappings so long
574  * as the vma is not a COW mapping; in that case, we know that all ptes are
575  * special (because none can have been COWed).
576  *
577  *
578  * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
579  *
580  * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
581  * page" backing, however the difference is that _all_ pages with a struct
582  * page (that is, those where pfn_valid is true) are refcounted and considered
583  * normal pages by the VM. The only exception are zeropages, which are
584  * *never* refcounted.
585  *
586  * The disadvantage is that pages are refcounted (which can be slower and
587  * simply not an option for some PFNMAP users). The advantage is that we
588  * don't have to follow the strict linearity rule of PFNMAP mappings in
589  * order to support COWable mappings.
590  *
591  */
592 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
593 			    pte_t pte)
594 {
595 	unsigned long pfn = pte_pfn(pte);
596 
597 	if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
598 		if (likely(!pte_special(pte)))
599 			goto check_pfn;
600 		if (vma->vm_ops && vma->vm_ops->find_special_page)
601 			return vma->vm_ops->find_special_page(vma, addr);
602 		if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
603 			return NULL;
604 		if (is_zero_pfn(pfn))
605 			return NULL;
606 		if (pte_devmap(pte))
607 		/*
608 		 * NOTE: New users of ZONE_DEVICE will not set pte_devmap()
609 		 * and will have refcounts incremented on their struct pages
610 		 * when they are inserted into PTEs, thus they are safe to
611 		 * return here. Legacy ZONE_DEVICE pages that set pte_devmap()
612 		 * do not have refcounts. Example of legacy ZONE_DEVICE is
613 		 * MEMORY_DEVICE_FS_DAX type in pmem or virtio_fs drivers.
614 		 */
615 			return NULL;
616 
617 		print_bad_pte(vma, addr, pte, NULL);
618 		return NULL;
619 	}
620 
621 	/* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
622 
623 	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
624 		if (vma->vm_flags & VM_MIXEDMAP) {
625 			if (!pfn_valid(pfn))
626 				return NULL;
627 			if (is_zero_pfn(pfn))
628 				return NULL;
629 			goto out;
630 		} else {
631 			unsigned long off;
632 			off = (addr - vma->vm_start) >> PAGE_SHIFT;
633 			if (pfn == vma->vm_pgoff + off)
634 				return NULL;
635 			if (!is_cow_mapping(vma->vm_flags))
636 				return NULL;
637 		}
638 	}
639 
640 	if (is_zero_pfn(pfn))
641 		return NULL;
642 
643 check_pfn:
644 	if (unlikely(pfn > highest_memmap_pfn)) {
645 		print_bad_pte(vma, addr, pte, NULL);
646 		return NULL;
647 	}
648 
649 	/*
650 	 * NOTE! We still have PageReserved() pages in the page tables.
651 	 * eg. VDSO mappings can cause them to exist.
652 	 */
653 out:
654 	VM_WARN_ON_ONCE(is_zero_pfn(pfn));
655 	return pfn_to_page(pfn);
656 }
657 
658 struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
659 			    pte_t pte)
660 {
661 	struct page *page = vm_normal_page(vma, addr, pte);
662 
663 	if (page)
664 		return page_folio(page);
665 	return NULL;
666 }
667 
668 #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
669 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
670 				pmd_t pmd)
671 {
672 	unsigned long pfn = pmd_pfn(pmd);
673 
674 	/* Currently it's only used for huge pfnmaps */
675 	if (unlikely(pmd_special(pmd)))
676 		return NULL;
677 
678 	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
679 		if (vma->vm_flags & VM_MIXEDMAP) {
680 			if (!pfn_valid(pfn))
681 				return NULL;
682 			goto out;
683 		} else {
684 			unsigned long off;
685 			off = (addr - vma->vm_start) >> PAGE_SHIFT;
686 			if (pfn == vma->vm_pgoff + off)
687 				return NULL;
688 			if (!is_cow_mapping(vma->vm_flags))
689 				return NULL;
690 		}
691 	}
692 
693 	if (pmd_devmap(pmd))
694 		return NULL;
695 	if (is_huge_zero_pmd(pmd))
696 		return NULL;
697 	if (unlikely(pfn > highest_memmap_pfn))
698 		return NULL;
699 
700 	/*
701 	 * NOTE! We still have PageReserved() pages in the page tables.
702 	 * eg. VDSO mappings can cause them to exist.
703 	 */
704 out:
705 	return pfn_to_page(pfn);
706 }
707 
708 struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
709 				  unsigned long addr, pmd_t pmd)
710 {
711 	struct page *page = vm_normal_page_pmd(vma, addr, pmd);
712 
713 	if (page)
714 		return page_folio(page);
715 	return NULL;
716 }
717 #endif
718 
719 static void restore_exclusive_pte(struct vm_area_struct *vma,
720 				  struct page *page, unsigned long address,
721 				  pte_t *ptep)
722 {
723 	struct folio *folio = page_folio(page);
724 	pte_t orig_pte;
725 	pte_t pte;
726 	swp_entry_t entry;
727 
728 	orig_pte = ptep_get(ptep);
729 	pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
730 	if (pte_swp_soft_dirty(orig_pte))
731 		pte = pte_mksoft_dirty(pte);
732 
733 	entry = pte_to_swp_entry(orig_pte);
734 	if (pte_swp_uffd_wp(orig_pte))
735 		pte = pte_mkuffd_wp(pte);
736 	else if (is_writable_device_exclusive_entry(entry))
737 		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
738 
739 	VM_BUG_ON_FOLIO(pte_write(pte) && (!folio_test_anon(folio) &&
740 					   PageAnonExclusive(page)), folio);
741 
742 	/*
743 	 * No need to take a page reference as one was already
744 	 * created when the swap entry was made.
745 	 */
746 	if (folio_test_anon(folio))
747 		folio_add_anon_rmap_pte(folio, page, vma, address, RMAP_NONE);
748 	else
749 		/*
750 		 * Currently device exclusive access only supports anonymous
751 		 * memory so the entry shouldn't point to a filebacked page.
752 		 */
753 		WARN_ON_ONCE(1);
754 
755 	set_pte_at(vma->vm_mm, address, ptep, pte);
756 
757 	/*
758 	 * No need to invalidate - it was non-present before. However
759 	 * secondary CPUs may have mappings that need invalidating.
760 	 */
761 	update_mmu_cache(vma, address, ptep);
762 }
763 
764 /*
765  * Tries to restore an exclusive pte if the page lock can be acquired without
766  * sleeping.
767  */
768 static int
769 try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma,
770 			unsigned long addr)
771 {
772 	swp_entry_t entry = pte_to_swp_entry(ptep_get(src_pte));
773 	struct page *page = pfn_swap_entry_to_page(entry);
774 
775 	if (trylock_page(page)) {
776 		restore_exclusive_pte(vma, page, addr, src_pte);
777 		unlock_page(page);
778 		return 0;
779 	}
780 
781 	return -EBUSY;
782 }
783 
784 /*
785  * copy one vm_area from one task to the other. Assumes the page tables
786  * already present in the new task to be cleared in the whole range
787  * covered by this vma.
788  */
789 
790 static unsigned long
791 copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
792 		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma,
793 		struct vm_area_struct *src_vma, unsigned long addr, int *rss)
794 {
795 	unsigned long vm_flags = dst_vma->vm_flags;
796 	pte_t orig_pte = ptep_get(src_pte);
797 	pte_t pte = orig_pte;
798 	struct folio *folio;
799 	struct page *page;
800 	swp_entry_t entry = pte_to_swp_entry(orig_pte);
801 
802 	if (likely(!non_swap_entry(entry))) {
803 		if (swap_duplicate(entry) < 0)
804 			return -EIO;
805 
806 		/* make sure dst_mm is on swapoff's mmlist. */
807 		if (unlikely(list_empty(&dst_mm->mmlist))) {
808 			spin_lock(&mmlist_lock);
809 			if (list_empty(&dst_mm->mmlist))
810 				list_add(&dst_mm->mmlist,
811 						&src_mm->mmlist);
812 			spin_unlock(&mmlist_lock);
813 		}
814 		/* Mark the swap entry as shared. */
815 		if (pte_swp_exclusive(orig_pte)) {
816 			pte = pte_swp_clear_exclusive(orig_pte);
817 			set_pte_at(src_mm, addr, src_pte, pte);
818 		}
819 		rss[MM_SWAPENTS]++;
820 	} else if (is_migration_entry(entry)) {
821 		folio = pfn_swap_entry_folio(entry);
822 
823 		rss[mm_counter(folio)]++;
824 
825 		if (!is_readable_migration_entry(entry) &&
826 				is_cow_mapping(vm_flags)) {
827 			/*
828 			 * COW mappings require pages in both parent and child
829 			 * to be set to read. A previously exclusive entry is
830 			 * now shared.
831 			 */
832 			entry = make_readable_migration_entry(
833 							swp_offset(entry));
834 			pte = swp_entry_to_pte(entry);
835 			if (pte_swp_soft_dirty(orig_pte))
836 				pte = pte_swp_mksoft_dirty(pte);
837 			if (pte_swp_uffd_wp(orig_pte))
838 				pte = pte_swp_mkuffd_wp(pte);
839 			set_pte_at(src_mm, addr, src_pte, pte);
840 		}
841 	} else if (is_device_private_entry(entry)) {
842 		page = pfn_swap_entry_to_page(entry);
843 		folio = page_folio(page);
844 
845 		/*
846 		 * Update rss count even for unaddressable pages, as
847 		 * they should treated just like normal pages in this
848 		 * respect.
849 		 *
850 		 * We will likely want to have some new rss counters
851 		 * for unaddressable pages, at some point. But for now
852 		 * keep things as they are.
853 		 */
854 		folio_get(folio);
855 		rss[mm_counter(folio)]++;
856 		/* Cannot fail as these pages cannot get pinned. */
857 		folio_try_dup_anon_rmap_pte(folio, page, src_vma);
858 
859 		/*
860 		 * We do not preserve soft-dirty information, because so
861 		 * far, checkpoint/restore is the only feature that
862 		 * requires that. And checkpoint/restore does not work
863 		 * when a device driver is involved (you cannot easily
864 		 * save and restore device driver state).
865 		 */
866 		if (is_writable_device_private_entry(entry) &&
867 		    is_cow_mapping(vm_flags)) {
868 			entry = make_readable_device_private_entry(
869 							swp_offset(entry));
870 			pte = swp_entry_to_pte(entry);
871 			if (pte_swp_uffd_wp(orig_pte))
872 				pte = pte_swp_mkuffd_wp(pte);
873 			set_pte_at(src_mm, addr, src_pte, pte);
874 		}
875 	} else if (is_device_exclusive_entry(entry)) {
876 		/*
877 		 * Make device exclusive entries present by restoring the
878 		 * original entry then copying as for a present pte. Device
879 		 * exclusive entries currently only support private writable
880 		 * (ie. COW) mappings.
881 		 */
882 		VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags));
883 		if (try_restore_exclusive_pte(src_pte, src_vma, addr))
884 			return -EBUSY;
885 		return -ENOENT;
886 	} else if (is_pte_marker_entry(entry)) {
887 		pte_marker marker = copy_pte_marker(entry, dst_vma);
888 
889 		if (marker)
890 			set_pte_at(dst_mm, addr, dst_pte,
891 				   make_pte_marker(marker));
892 		return 0;
893 	}
894 	if (!userfaultfd_wp(dst_vma))
895 		pte = pte_swp_clear_uffd_wp(pte);
896 	set_pte_at(dst_mm, addr, dst_pte, pte);
897 	return 0;
898 }
899 
900 /*
901  * Copy a present and normal page.
902  *
903  * NOTE! The usual case is that this isn't required;
904  * instead, the caller can just increase the page refcount
905  * and re-use the pte the traditional way.
906  *
907  * And if we need a pre-allocated page but don't yet have
908  * one, return a negative error to let the preallocation
909  * code know so that it can do so outside the page table
910  * lock.
911  */
912 static inline int
913 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
914 		  pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
915 		  struct folio **prealloc, struct page *page)
916 {
917 	struct folio *new_folio;
918 	pte_t pte;
919 
920 	new_folio = *prealloc;
921 	if (!new_folio)
922 		return -EAGAIN;
923 
924 	/*
925 	 * We have a prealloc page, all good!  Take it
926 	 * over and copy the page & arm it.
927 	 */
928 
929 	if (copy_mc_user_highpage(&new_folio->page, page, addr, src_vma))
930 		return -EHWPOISON;
931 
932 	*prealloc = NULL;
933 	__folio_mark_uptodate(new_folio);
934 	folio_add_new_anon_rmap(new_folio, dst_vma, addr, RMAP_EXCLUSIVE);
935 	folio_add_lru_vma(new_folio, dst_vma);
936 	rss[MM_ANONPAGES]++;
937 
938 	/* All done, just insert the new page copy in the child */
939 	pte = mk_pte(&new_folio->page, dst_vma->vm_page_prot);
940 	pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma);
941 	if (userfaultfd_pte_wp(dst_vma, ptep_get(src_pte)))
942 		/* Uffd-wp needs to be delivered to dest pte as well */
943 		pte = pte_mkuffd_wp(pte);
944 	set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
945 	return 0;
946 }
947 
948 static __always_inline void __copy_present_ptes(struct vm_area_struct *dst_vma,
949 		struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte,
950 		pte_t pte, unsigned long addr, int nr)
951 {
952 	struct mm_struct *src_mm = src_vma->vm_mm;
953 
954 	/* If it's a COW mapping, write protect it both processes. */
955 	if (is_cow_mapping(src_vma->vm_flags) && pte_write(pte)) {
956 		wrprotect_ptes(src_mm, addr, src_pte, nr);
957 		pte = pte_wrprotect(pte);
958 	}
959 
960 	/* If it's a shared mapping, mark it clean in the child. */
961 	if (src_vma->vm_flags & VM_SHARED)
962 		pte = pte_mkclean(pte);
963 	pte = pte_mkold(pte);
964 
965 	if (!userfaultfd_wp(dst_vma))
966 		pte = pte_clear_uffd_wp(pte);
967 
968 	set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr);
969 }
970 
971 /*
972  * Copy one present PTE, trying to batch-process subsequent PTEs that map
973  * consecutive pages of the same folio by copying them as well.
974  *
975  * Returns -EAGAIN if one preallocated page is required to copy the next PTE.
976  * Otherwise, returns the number of copied PTEs (at least 1).
977  */
978 static inline int
979 copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
980 		 pte_t *dst_pte, pte_t *src_pte, pte_t pte, unsigned long addr,
981 		 int max_nr, int *rss, struct folio **prealloc)
982 {
983 	struct page *page;
984 	struct folio *folio;
985 	bool any_writable;
986 	fpb_t flags = 0;
987 	int err, nr;
988 
989 	page = vm_normal_page(src_vma, addr, pte);
990 	if (unlikely(!page))
991 		goto copy_pte;
992 
993 	folio = page_folio(page);
994 
995 	/*
996 	 * If we likely have to copy, just don't bother with batching. Make
997 	 * sure that the common "small folio" case is as fast as possible
998 	 * by keeping the batching logic separate.
999 	 */
1000 	if (unlikely(!*prealloc && folio_test_large(folio) && max_nr != 1)) {
1001 		if (src_vma->vm_flags & VM_SHARED)
1002 			flags |= FPB_IGNORE_DIRTY;
1003 		if (!vma_soft_dirty_enabled(src_vma))
1004 			flags |= FPB_IGNORE_SOFT_DIRTY;
1005 
1006 		nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr, flags,
1007 				     &any_writable, NULL, NULL);
1008 		folio_ref_add(folio, nr);
1009 		if (folio_test_anon(folio)) {
1010 			if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page,
1011 								  nr, src_vma))) {
1012 				folio_ref_sub(folio, nr);
1013 				return -EAGAIN;
1014 			}
1015 			rss[MM_ANONPAGES] += nr;
1016 			VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio);
1017 		} else {
1018 			folio_dup_file_rmap_ptes(folio, page, nr);
1019 			rss[mm_counter_file(folio)] += nr;
1020 		}
1021 		if (any_writable)
1022 			pte = pte_mkwrite(pte, src_vma);
1023 		__copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte,
1024 				    addr, nr);
1025 		return nr;
1026 	}
1027 
1028 	folio_get(folio);
1029 	if (folio_test_anon(folio)) {
1030 		/*
1031 		 * If this page may have been pinned by the parent process,
1032 		 * copy the page immediately for the child so that we'll always
1033 		 * guarantee the pinned page won't be randomly replaced in the
1034 		 * future.
1035 		 */
1036 		if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, src_vma))) {
1037 			/* Page may be pinned, we have to copy. */
1038 			folio_put(folio);
1039 			err = copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
1040 						addr, rss, prealloc, page);
1041 			return err ? err : 1;
1042 		}
1043 		rss[MM_ANONPAGES]++;
1044 		VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio);
1045 	} else {
1046 		folio_dup_file_rmap_pte(folio, page);
1047 		rss[mm_counter_file(folio)]++;
1048 	}
1049 
1050 copy_pte:
1051 	__copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte, addr, 1);
1052 	return 1;
1053 }
1054 
1055 static inline struct folio *folio_prealloc(struct mm_struct *src_mm,
1056 		struct vm_area_struct *vma, unsigned long addr, bool need_zero)
1057 {
1058 	struct folio *new_folio;
1059 
1060 	if (need_zero)
1061 		new_folio = vma_alloc_zeroed_movable_folio(vma, addr);
1062 	else
1063 		new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr);
1064 
1065 	if (!new_folio)
1066 		return NULL;
1067 
1068 	if (mem_cgroup_charge(new_folio, src_mm, GFP_KERNEL)) {
1069 		folio_put(new_folio);
1070 		return NULL;
1071 	}
1072 	folio_throttle_swaprate(new_folio, GFP_KERNEL);
1073 
1074 	return new_folio;
1075 }
1076 
1077 static int
1078 copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1079 	       pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1080 	       unsigned long end)
1081 {
1082 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1083 	struct mm_struct *src_mm = src_vma->vm_mm;
1084 	pte_t *orig_src_pte, *orig_dst_pte;
1085 	pte_t *src_pte, *dst_pte;
1086 	pmd_t dummy_pmdval;
1087 	pte_t ptent;
1088 	spinlock_t *src_ptl, *dst_ptl;
1089 	int progress, max_nr, ret = 0;
1090 	int rss[NR_MM_COUNTERS];
1091 	swp_entry_t entry = (swp_entry_t){0};
1092 	struct folio *prealloc = NULL;
1093 	int nr;
1094 
1095 again:
1096 	progress = 0;
1097 	init_rss_vec(rss);
1098 
1099 	/*
1100 	 * copy_pmd_range()'s prior pmd_none_or_clear_bad(src_pmd), and the
1101 	 * error handling here, assume that exclusive mmap_lock on dst and src
1102 	 * protects anon from unexpected THP transitions; with shmem and file
1103 	 * protected by mmap_lock-less collapse skipping areas with anon_vma
1104 	 * (whereas vma_needs_copy() skips areas without anon_vma).  A rework
1105 	 * can remove such assumptions later, but this is good enough for now.
1106 	 */
1107 	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
1108 	if (!dst_pte) {
1109 		ret = -ENOMEM;
1110 		goto out;
1111 	}
1112 
1113 	/*
1114 	 * We already hold the exclusive mmap_lock, the copy_pte_range() and
1115 	 * retract_page_tables() are using vma->anon_vma to be exclusive, so
1116 	 * the PTE page is stable, and there is no need to get pmdval and do
1117 	 * pmd_same() check.
1118 	 */
1119 	src_pte = pte_offset_map_rw_nolock(src_mm, src_pmd, addr, &dummy_pmdval,
1120 					   &src_ptl);
1121 	if (!src_pte) {
1122 		pte_unmap_unlock(dst_pte, dst_ptl);
1123 		/* ret == 0 */
1124 		goto out;
1125 	}
1126 	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1127 	orig_src_pte = src_pte;
1128 	orig_dst_pte = dst_pte;
1129 	arch_enter_lazy_mmu_mode();
1130 
1131 	do {
1132 		nr = 1;
1133 
1134 		/*
1135 		 * We are holding two locks at this point - either of them
1136 		 * could generate latencies in another task on another CPU.
1137 		 */
1138 		if (progress >= 32) {
1139 			progress = 0;
1140 			if (need_resched() ||
1141 			    spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
1142 				break;
1143 		}
1144 		ptent = ptep_get(src_pte);
1145 		if (pte_none(ptent)) {
1146 			progress++;
1147 			continue;
1148 		}
1149 		if (unlikely(!pte_present(ptent))) {
1150 			ret = copy_nonpresent_pte(dst_mm, src_mm,
1151 						  dst_pte, src_pte,
1152 						  dst_vma, src_vma,
1153 						  addr, rss);
1154 			if (ret == -EIO) {
1155 				entry = pte_to_swp_entry(ptep_get(src_pte));
1156 				break;
1157 			} else if (ret == -EBUSY) {
1158 				break;
1159 			} else if (!ret) {
1160 				progress += 8;
1161 				continue;
1162 			}
1163 			ptent = ptep_get(src_pte);
1164 			VM_WARN_ON_ONCE(!pte_present(ptent));
1165 
1166 			/*
1167 			 * Device exclusive entry restored, continue by copying
1168 			 * the now present pte.
1169 			 */
1170 			WARN_ON_ONCE(ret != -ENOENT);
1171 		}
1172 		/* copy_present_ptes() will clear `*prealloc' if consumed */
1173 		max_nr = (end - addr) / PAGE_SIZE;
1174 		ret = copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte,
1175 					ptent, addr, max_nr, rss, &prealloc);
1176 		/*
1177 		 * If we need a pre-allocated page for this pte, drop the
1178 		 * locks, allocate, and try again.
1179 		 * If copy failed due to hwpoison in source page, break out.
1180 		 */
1181 		if (unlikely(ret == -EAGAIN || ret == -EHWPOISON))
1182 			break;
1183 		if (unlikely(prealloc)) {
1184 			/*
1185 			 * pre-alloc page cannot be reused by next time so as
1186 			 * to strictly follow mempolicy (e.g., alloc_page_vma()
1187 			 * will allocate page according to address).  This
1188 			 * could only happen if one pinned pte changed.
1189 			 */
1190 			folio_put(prealloc);
1191 			prealloc = NULL;
1192 		}
1193 		nr = ret;
1194 		progress += 8 * nr;
1195 	} while (dst_pte += nr, src_pte += nr, addr += PAGE_SIZE * nr,
1196 		 addr != end);
1197 
1198 	arch_leave_lazy_mmu_mode();
1199 	pte_unmap_unlock(orig_src_pte, src_ptl);
1200 	add_mm_rss_vec(dst_mm, rss);
1201 	pte_unmap_unlock(orig_dst_pte, dst_ptl);
1202 	cond_resched();
1203 
1204 	if (ret == -EIO) {
1205 		VM_WARN_ON_ONCE(!entry.val);
1206 		if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) {
1207 			ret = -ENOMEM;
1208 			goto out;
1209 		}
1210 		entry.val = 0;
1211 	} else if (ret == -EBUSY || unlikely(ret == -EHWPOISON)) {
1212 		goto out;
1213 	} else if (ret ==  -EAGAIN) {
1214 		prealloc = folio_prealloc(src_mm, src_vma, addr, false);
1215 		if (!prealloc)
1216 			return -ENOMEM;
1217 	} else if (ret < 0) {
1218 		VM_WARN_ON_ONCE(1);
1219 	}
1220 
1221 	/* We've captured and resolved the error. Reset, try again. */
1222 	ret = 0;
1223 
1224 	if (addr != end)
1225 		goto again;
1226 out:
1227 	if (unlikely(prealloc))
1228 		folio_put(prealloc);
1229 	return ret;
1230 }
1231 
1232 static inline int
1233 copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1234 	       pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1235 	       unsigned long end)
1236 {
1237 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1238 	struct mm_struct *src_mm = src_vma->vm_mm;
1239 	pmd_t *src_pmd, *dst_pmd;
1240 	unsigned long next;
1241 
1242 	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
1243 	if (!dst_pmd)
1244 		return -ENOMEM;
1245 	src_pmd = pmd_offset(src_pud, addr);
1246 	do {
1247 		next = pmd_addr_end(addr, end);
1248 		if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
1249 			|| pmd_devmap(*src_pmd)) {
1250 			int err;
1251 			VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
1252 			err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd,
1253 					    addr, dst_vma, src_vma);
1254 			if (err == -ENOMEM)
1255 				return -ENOMEM;
1256 			if (!err)
1257 				continue;
1258 			/* fall through */
1259 		}
1260 		if (pmd_none_or_clear_bad(src_pmd))
1261 			continue;
1262 		if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd,
1263 				   addr, next))
1264 			return -ENOMEM;
1265 	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
1266 	return 0;
1267 }
1268 
1269 static inline int
1270 copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1271 	       p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr,
1272 	       unsigned long end)
1273 {
1274 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1275 	struct mm_struct *src_mm = src_vma->vm_mm;
1276 	pud_t *src_pud, *dst_pud;
1277 	unsigned long next;
1278 
1279 	dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
1280 	if (!dst_pud)
1281 		return -ENOMEM;
1282 	src_pud = pud_offset(src_p4d, addr);
1283 	do {
1284 		next = pud_addr_end(addr, end);
1285 		if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
1286 			int err;
1287 
1288 			VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma);
1289 			err = copy_huge_pud(dst_mm, src_mm,
1290 					    dst_pud, src_pud, addr, src_vma);
1291 			if (err == -ENOMEM)
1292 				return -ENOMEM;
1293 			if (!err)
1294 				continue;
1295 			/* fall through */
1296 		}
1297 		if (pud_none_or_clear_bad(src_pud))
1298 			continue;
1299 		if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud,
1300 				   addr, next))
1301 			return -ENOMEM;
1302 	} while (dst_pud++, src_pud++, addr = next, addr != end);
1303 	return 0;
1304 }
1305 
1306 static inline int
1307 copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1308 	       pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr,
1309 	       unsigned long end)
1310 {
1311 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1312 	p4d_t *src_p4d, *dst_p4d;
1313 	unsigned long next;
1314 
1315 	dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
1316 	if (!dst_p4d)
1317 		return -ENOMEM;
1318 	src_p4d = p4d_offset(src_pgd, addr);
1319 	do {
1320 		next = p4d_addr_end(addr, end);
1321 		if (p4d_none_or_clear_bad(src_p4d))
1322 			continue;
1323 		if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d,
1324 				   addr, next))
1325 			return -ENOMEM;
1326 	} while (dst_p4d++, src_p4d++, addr = next, addr != end);
1327 	return 0;
1328 }
1329 
1330 /*
1331  * Return true if the vma needs to copy the pgtable during this fork().  Return
1332  * false when we can speed up fork() by allowing lazy page faults later until
1333  * when the child accesses the memory range.
1334  */
1335 static bool
1336 vma_needs_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1337 {
1338 	/*
1339 	 * Always copy pgtables when dst_vma has uffd-wp enabled even if it's
1340 	 * file-backed (e.g. shmem). Because when uffd-wp is enabled, pgtable
1341 	 * contains uffd-wp protection information, that's something we can't
1342 	 * retrieve from page cache, and skip copying will lose those info.
1343 	 */
1344 	if (userfaultfd_wp(dst_vma))
1345 		return true;
1346 
1347 	if (src_vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
1348 		return true;
1349 
1350 	if (src_vma->anon_vma)
1351 		return true;
1352 
1353 	/*
1354 	 * Don't copy ptes where a page fault will fill them correctly.  Fork
1355 	 * becomes much lighter when there are big shared or private readonly
1356 	 * mappings. The tradeoff is that copy_page_range is more efficient
1357 	 * than faulting.
1358 	 */
1359 	return false;
1360 }
1361 
1362 int
1363 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1364 {
1365 	pgd_t *src_pgd, *dst_pgd;
1366 	unsigned long next;
1367 	unsigned long addr = src_vma->vm_start;
1368 	unsigned long end = src_vma->vm_end;
1369 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1370 	struct mm_struct *src_mm = src_vma->vm_mm;
1371 	struct mmu_notifier_range range;
1372 	bool is_cow;
1373 	int ret;
1374 
1375 	if (!vma_needs_copy(dst_vma, src_vma))
1376 		return 0;
1377 
1378 	if (is_vm_hugetlb_page(src_vma))
1379 		return copy_hugetlb_page_range(dst_mm, src_mm, dst_vma, src_vma);
1380 
1381 	if (unlikely(src_vma->vm_flags & VM_PFNMAP)) {
1382 		/*
1383 		 * We do not free on error cases below as remove_vma
1384 		 * gets called on error from higher level routine
1385 		 */
1386 		ret = track_pfn_copy(src_vma);
1387 		if (ret)
1388 			return ret;
1389 	}
1390 
1391 	/*
1392 	 * We need to invalidate the secondary MMU mappings only when
1393 	 * there could be a permission downgrade on the ptes of the
1394 	 * parent mm. And a permission downgrade will only happen if
1395 	 * is_cow_mapping() returns true.
1396 	 */
1397 	is_cow = is_cow_mapping(src_vma->vm_flags);
1398 
1399 	if (is_cow) {
1400 		mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
1401 					0, src_mm, addr, end);
1402 		mmu_notifier_invalidate_range_start(&range);
1403 		/*
1404 		 * Disabling preemption is not needed for the write side, as
1405 		 * the read side doesn't spin, but goes to the mmap_lock.
1406 		 *
1407 		 * Use the raw variant of the seqcount_t write API to avoid
1408 		 * lockdep complaining about preemptibility.
1409 		 */
1410 		vma_assert_write_locked(src_vma);
1411 		raw_write_seqcount_begin(&src_mm->write_protect_seq);
1412 	}
1413 
1414 	ret = 0;
1415 	dst_pgd = pgd_offset(dst_mm, addr);
1416 	src_pgd = pgd_offset(src_mm, addr);
1417 	do {
1418 		next = pgd_addr_end(addr, end);
1419 		if (pgd_none_or_clear_bad(src_pgd))
1420 			continue;
1421 		if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,
1422 					    addr, next))) {
1423 			untrack_pfn_clear(dst_vma);
1424 			ret = -ENOMEM;
1425 			break;
1426 		}
1427 	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
1428 
1429 	if (is_cow) {
1430 		raw_write_seqcount_end(&src_mm->write_protect_seq);
1431 		mmu_notifier_invalidate_range_end(&range);
1432 	}
1433 	return ret;
1434 }
1435 
1436 /* Whether we should zap all COWed (private) pages too */
1437 static inline bool should_zap_cows(struct zap_details *details)
1438 {
1439 	/* By default, zap all pages */
1440 	if (!details || details->reclaim_pt)
1441 		return true;
1442 
1443 	/* Or, we zap COWed pages only if the caller wants to */
1444 	return details->even_cows;
1445 }
1446 
1447 /* Decides whether we should zap this folio with the folio pointer specified */
1448 static inline bool should_zap_folio(struct zap_details *details,
1449 				    struct folio *folio)
1450 {
1451 	/* If we can make a decision without *folio.. */
1452 	if (should_zap_cows(details))
1453 		return true;
1454 
1455 	/* Otherwise we should only zap non-anon folios */
1456 	return !folio_test_anon(folio);
1457 }
1458 
1459 static inline bool zap_drop_markers(struct zap_details *details)
1460 {
1461 	if (!details)
1462 		return false;
1463 
1464 	return details->zap_flags & ZAP_FLAG_DROP_MARKER;
1465 }
1466 
1467 /*
1468  * This function makes sure that we'll replace the none pte with an uffd-wp
1469  * swap special pte marker when necessary. Must be with the pgtable lock held.
1470  *
1471  * Returns true if uffd-wp ptes was installed, false otherwise.
1472  */
1473 static inline bool
1474 zap_install_uffd_wp_if_needed(struct vm_area_struct *vma,
1475 			      unsigned long addr, pte_t *pte, int nr,
1476 			      struct zap_details *details, pte_t pteval)
1477 {
1478 	bool was_installed = false;
1479 
1480 #ifdef CONFIG_PTE_MARKER_UFFD_WP
1481 	/* Zap on anonymous always means dropping everything */
1482 	if (vma_is_anonymous(vma))
1483 		return false;
1484 
1485 	if (zap_drop_markers(details))
1486 		return false;
1487 
1488 	for (;;) {
1489 		/* the PFN in the PTE is irrelevant. */
1490 		if (pte_install_uffd_wp_if_needed(vma, addr, pte, pteval))
1491 			was_installed = true;
1492 		if (--nr == 0)
1493 			break;
1494 		pte++;
1495 		addr += PAGE_SIZE;
1496 	}
1497 #endif
1498 	return was_installed;
1499 }
1500 
1501 static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb,
1502 		struct vm_area_struct *vma, struct folio *folio,
1503 		struct page *page, pte_t *pte, pte_t ptent, unsigned int nr,
1504 		unsigned long addr, struct zap_details *details, int *rss,
1505 		bool *force_flush, bool *force_break, bool *any_skipped)
1506 {
1507 	struct mm_struct *mm = tlb->mm;
1508 	bool delay_rmap = false;
1509 
1510 	if (!folio_test_anon(folio)) {
1511 		ptent = get_and_clear_full_ptes(mm, addr, pte, nr, tlb->fullmm);
1512 		if (pte_dirty(ptent)) {
1513 			folio_mark_dirty(folio);
1514 			if (tlb_delay_rmap(tlb)) {
1515 				delay_rmap = true;
1516 				*force_flush = true;
1517 			}
1518 		}
1519 		if (pte_young(ptent) && likely(vma_has_recency(vma)))
1520 			folio_mark_accessed(folio);
1521 		rss[mm_counter(folio)] -= nr;
1522 	} else {
1523 		/* We don't need up-to-date accessed/dirty bits. */
1524 		clear_full_ptes(mm, addr, pte, nr, tlb->fullmm);
1525 		rss[MM_ANONPAGES] -= nr;
1526 	}
1527 	/* Checking a single PTE in a batch is sufficient. */
1528 	arch_check_zapped_pte(vma, ptent);
1529 	tlb_remove_tlb_entries(tlb, pte, nr, addr);
1530 	if (unlikely(userfaultfd_pte_wp(vma, ptent)))
1531 		*any_skipped = zap_install_uffd_wp_if_needed(vma, addr, pte,
1532 							     nr, details, ptent);
1533 
1534 	if (!delay_rmap) {
1535 		folio_remove_rmap_ptes(folio, page, nr, vma);
1536 
1537 		if (unlikely(folio_mapcount(folio) < 0))
1538 			print_bad_pte(vma, addr, ptent, page);
1539 	}
1540 	if (unlikely(__tlb_remove_folio_pages(tlb, page, nr, delay_rmap))) {
1541 		*force_flush = true;
1542 		*force_break = true;
1543 	}
1544 }
1545 
1546 /*
1547  * Zap or skip at least one present PTE, trying to batch-process subsequent
1548  * PTEs that map consecutive pages of the same folio.
1549  *
1550  * Returns the number of processed (skipped or zapped) PTEs (at least 1).
1551  */
1552 static inline int zap_present_ptes(struct mmu_gather *tlb,
1553 		struct vm_area_struct *vma, pte_t *pte, pte_t ptent,
1554 		unsigned int max_nr, unsigned long addr,
1555 		struct zap_details *details, int *rss, bool *force_flush,
1556 		bool *force_break, bool *any_skipped)
1557 {
1558 	const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY;
1559 	struct mm_struct *mm = tlb->mm;
1560 	struct folio *folio;
1561 	struct page *page;
1562 	int nr;
1563 
1564 	page = vm_normal_page(vma, addr, ptent);
1565 	if (!page) {
1566 		/* We don't need up-to-date accessed/dirty bits. */
1567 		ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm);
1568 		arch_check_zapped_pte(vma, ptent);
1569 		tlb_remove_tlb_entry(tlb, pte, addr);
1570 		if (userfaultfd_pte_wp(vma, ptent))
1571 			*any_skipped = zap_install_uffd_wp_if_needed(vma, addr,
1572 						pte, 1, details, ptent);
1573 		ksm_might_unmap_zero_page(mm, ptent);
1574 		return 1;
1575 	}
1576 
1577 	folio = page_folio(page);
1578 	if (unlikely(!should_zap_folio(details, folio))) {
1579 		*any_skipped = true;
1580 		return 1;
1581 	}
1582 
1583 	/*
1584 	 * Make sure that the common "small folio" case is as fast as possible
1585 	 * by keeping the batching logic separate.
1586 	 */
1587 	if (unlikely(folio_test_large(folio) && max_nr != 1)) {
1588 		nr = folio_pte_batch(folio, addr, pte, ptent, max_nr, fpb_flags,
1589 				     NULL, NULL, NULL);
1590 
1591 		zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr,
1592 				       addr, details, rss, force_flush,
1593 				       force_break, any_skipped);
1594 		return nr;
1595 	}
1596 	zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, 1, addr,
1597 			       details, rss, force_flush, force_break, any_skipped);
1598 	return 1;
1599 }
1600 
1601 static inline int zap_nonpresent_ptes(struct mmu_gather *tlb,
1602 		struct vm_area_struct *vma, pte_t *pte, pte_t ptent,
1603 		unsigned int max_nr, unsigned long addr,
1604 		struct zap_details *details, int *rss, bool *any_skipped)
1605 {
1606 	swp_entry_t entry;
1607 	int nr = 1;
1608 
1609 	*any_skipped = true;
1610 	entry = pte_to_swp_entry(ptent);
1611 	if (is_device_private_entry(entry) ||
1612 		is_device_exclusive_entry(entry)) {
1613 		struct page *page = pfn_swap_entry_to_page(entry);
1614 		struct folio *folio = page_folio(page);
1615 
1616 		if (unlikely(!should_zap_folio(details, folio)))
1617 			return 1;
1618 		/*
1619 		 * Both device private/exclusive mappings should only
1620 		 * work with anonymous page so far, so we don't need to
1621 		 * consider uffd-wp bit when zap. For more information,
1622 		 * see zap_install_uffd_wp_if_needed().
1623 		 */
1624 		WARN_ON_ONCE(!vma_is_anonymous(vma));
1625 		rss[mm_counter(folio)]--;
1626 		if (is_device_private_entry(entry))
1627 			folio_remove_rmap_pte(folio, page, vma);
1628 		folio_put(folio);
1629 	} else if (!non_swap_entry(entry)) {
1630 		/* Genuine swap entries, hence a private anon pages */
1631 		if (!should_zap_cows(details))
1632 			return 1;
1633 
1634 		nr = swap_pte_batch(pte, max_nr, ptent);
1635 		rss[MM_SWAPENTS] -= nr;
1636 		free_swap_and_cache_nr(entry, nr);
1637 	} else if (is_migration_entry(entry)) {
1638 		struct folio *folio = pfn_swap_entry_folio(entry);
1639 
1640 		if (!should_zap_folio(details, folio))
1641 			return 1;
1642 		rss[mm_counter(folio)]--;
1643 	} else if (pte_marker_entry_uffd_wp(entry)) {
1644 		/*
1645 		 * For anon: always drop the marker; for file: only
1646 		 * drop the marker if explicitly requested.
1647 		 */
1648 		if (!vma_is_anonymous(vma) && !zap_drop_markers(details))
1649 			return 1;
1650 	} else if (is_guard_swp_entry(entry)) {
1651 		/*
1652 		 * Ordinary zapping should not remove guard PTE
1653 		 * markers. Only do so if we should remove PTE markers
1654 		 * in general.
1655 		 */
1656 		if (!zap_drop_markers(details))
1657 			return 1;
1658 	} else if (is_hwpoison_entry(entry) || is_poisoned_swp_entry(entry)) {
1659 		if (!should_zap_cows(details))
1660 			return 1;
1661 	} else {
1662 		/* We should have covered all the swap entry types */
1663 		pr_alert("unrecognized swap entry 0x%lx\n", entry.val);
1664 		WARN_ON_ONCE(1);
1665 	}
1666 	clear_not_present_full_ptes(vma->vm_mm, addr, pte, nr, tlb->fullmm);
1667 	*any_skipped = zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, ptent);
1668 
1669 	return nr;
1670 }
1671 
1672 static inline int do_zap_pte_range(struct mmu_gather *tlb,
1673 				   struct vm_area_struct *vma, pte_t *pte,
1674 				   unsigned long addr, unsigned long end,
1675 				   struct zap_details *details, int *rss,
1676 				   bool *force_flush, bool *force_break,
1677 				   bool *any_skipped)
1678 {
1679 	pte_t ptent = ptep_get(pte);
1680 	int max_nr = (end - addr) / PAGE_SIZE;
1681 	int nr = 0;
1682 
1683 	/* Skip all consecutive none ptes */
1684 	if (pte_none(ptent)) {
1685 		for (nr = 1; nr < max_nr; nr++) {
1686 			ptent = ptep_get(pte + nr);
1687 			if (!pte_none(ptent))
1688 				break;
1689 		}
1690 		max_nr -= nr;
1691 		if (!max_nr)
1692 			return nr;
1693 		pte += nr;
1694 		addr += nr * PAGE_SIZE;
1695 	}
1696 
1697 	if (pte_present(ptent))
1698 		nr += zap_present_ptes(tlb, vma, pte, ptent, max_nr, addr,
1699 				       details, rss, force_flush, force_break,
1700 				       any_skipped);
1701 	else
1702 		nr += zap_nonpresent_ptes(tlb, vma, pte, ptent, max_nr, addr,
1703 					  details, rss, any_skipped);
1704 
1705 	return nr;
1706 }
1707 
1708 static unsigned long zap_pte_range(struct mmu_gather *tlb,
1709 				struct vm_area_struct *vma, pmd_t *pmd,
1710 				unsigned long addr, unsigned long end,
1711 				struct zap_details *details)
1712 {
1713 	bool force_flush = false, force_break = false;
1714 	struct mm_struct *mm = tlb->mm;
1715 	int rss[NR_MM_COUNTERS];
1716 	spinlock_t *ptl;
1717 	pte_t *start_pte;
1718 	pte_t *pte;
1719 	pmd_t pmdval;
1720 	unsigned long start = addr;
1721 	bool can_reclaim_pt = reclaim_pt_is_enabled(start, end, details);
1722 	bool direct_reclaim = true;
1723 	int nr;
1724 
1725 retry:
1726 	tlb_change_page_size(tlb, PAGE_SIZE);
1727 	init_rss_vec(rss);
1728 	start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1729 	if (!pte)
1730 		return addr;
1731 
1732 	flush_tlb_batched_pending(mm);
1733 	arch_enter_lazy_mmu_mode();
1734 	do {
1735 		bool any_skipped = false;
1736 
1737 		if (need_resched()) {
1738 			direct_reclaim = false;
1739 			break;
1740 		}
1741 
1742 		nr = do_zap_pte_range(tlb, vma, pte, addr, end, details, rss,
1743 				      &force_flush, &force_break, &any_skipped);
1744 		if (any_skipped)
1745 			can_reclaim_pt = false;
1746 		if (unlikely(force_break)) {
1747 			addr += nr * PAGE_SIZE;
1748 			direct_reclaim = false;
1749 			break;
1750 		}
1751 	} while (pte += nr, addr += PAGE_SIZE * nr, addr != end);
1752 
1753 	/*
1754 	 * Fast path: try to hold the pmd lock and unmap the PTE page.
1755 	 *
1756 	 * If the pte lock was released midway (retry case), or if the attempt
1757 	 * to hold the pmd lock failed, then we need to recheck all pte entries
1758 	 * to ensure they are still none, thereby preventing the pte entries
1759 	 * from being repopulated by another thread.
1760 	 */
1761 	if (can_reclaim_pt && direct_reclaim && addr == end)
1762 		direct_reclaim = try_get_and_clear_pmd(mm, pmd, &pmdval);
1763 
1764 	add_mm_rss_vec(mm, rss);
1765 	arch_leave_lazy_mmu_mode();
1766 
1767 	/* Do the actual TLB flush before dropping ptl */
1768 	if (force_flush) {
1769 		tlb_flush_mmu_tlbonly(tlb);
1770 		tlb_flush_rmaps(tlb, vma);
1771 	}
1772 	pte_unmap_unlock(start_pte, ptl);
1773 
1774 	/*
1775 	 * If we forced a TLB flush (either due to running out of
1776 	 * batch buffers or because we needed to flush dirty TLB
1777 	 * entries before releasing the ptl), free the batched
1778 	 * memory too. Come back again if we didn't do everything.
1779 	 */
1780 	if (force_flush)
1781 		tlb_flush_mmu(tlb);
1782 
1783 	if (addr != end) {
1784 		cond_resched();
1785 		force_flush = false;
1786 		force_break = false;
1787 		goto retry;
1788 	}
1789 
1790 	if (can_reclaim_pt) {
1791 		if (direct_reclaim)
1792 			free_pte(mm, start, tlb, pmdval);
1793 		else
1794 			try_to_free_pte(mm, pmd, start, tlb);
1795 	}
1796 
1797 	return addr;
1798 }
1799 
1800 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1801 				struct vm_area_struct *vma, pud_t *pud,
1802 				unsigned long addr, unsigned long end,
1803 				struct zap_details *details)
1804 {
1805 	pmd_t *pmd;
1806 	unsigned long next;
1807 
1808 	pmd = pmd_offset(pud, addr);
1809 	do {
1810 		next = pmd_addr_end(addr, end);
1811 		if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
1812 			if (next - addr != HPAGE_PMD_SIZE)
1813 				__split_huge_pmd(vma, pmd, addr, false, NULL);
1814 			else if (zap_huge_pmd(tlb, vma, pmd, addr)) {
1815 				addr = next;
1816 				continue;
1817 			}
1818 			/* fall through */
1819 		} else if (details && details->single_folio &&
1820 			   folio_test_pmd_mappable(details->single_folio) &&
1821 			   next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
1822 			spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
1823 			/*
1824 			 * Take and drop THP pmd lock so that we cannot return
1825 			 * prematurely, while zap_huge_pmd() has cleared *pmd,
1826 			 * but not yet decremented compound_mapcount().
1827 			 */
1828 			spin_unlock(ptl);
1829 		}
1830 		if (pmd_none(*pmd)) {
1831 			addr = next;
1832 			continue;
1833 		}
1834 		addr = zap_pte_range(tlb, vma, pmd, addr, next, details);
1835 		if (addr != next)
1836 			pmd--;
1837 	} while (pmd++, cond_resched(), addr != end);
1838 
1839 	return addr;
1840 }
1841 
1842 static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1843 				struct vm_area_struct *vma, p4d_t *p4d,
1844 				unsigned long addr, unsigned long end,
1845 				struct zap_details *details)
1846 {
1847 	pud_t *pud;
1848 	unsigned long next;
1849 
1850 	pud = pud_offset(p4d, addr);
1851 	do {
1852 		next = pud_addr_end(addr, end);
1853 		if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
1854 			if (next - addr != HPAGE_PUD_SIZE) {
1855 				mmap_assert_locked(tlb->mm);
1856 				split_huge_pud(vma, pud, addr);
1857 			} else if (zap_huge_pud(tlb, vma, pud, addr))
1858 				goto next;
1859 			/* fall through */
1860 		}
1861 		if (pud_none_or_clear_bad(pud))
1862 			continue;
1863 		next = zap_pmd_range(tlb, vma, pud, addr, next, details);
1864 next:
1865 		cond_resched();
1866 	} while (pud++, addr = next, addr != end);
1867 
1868 	return addr;
1869 }
1870 
1871 static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
1872 				struct vm_area_struct *vma, pgd_t *pgd,
1873 				unsigned long addr, unsigned long end,
1874 				struct zap_details *details)
1875 {
1876 	p4d_t *p4d;
1877 	unsigned long next;
1878 
1879 	p4d = p4d_offset(pgd, addr);
1880 	do {
1881 		next = p4d_addr_end(addr, end);
1882 		if (p4d_none_or_clear_bad(p4d))
1883 			continue;
1884 		next = zap_pud_range(tlb, vma, p4d, addr, next, details);
1885 	} while (p4d++, addr = next, addr != end);
1886 
1887 	return addr;
1888 }
1889 
1890 void unmap_page_range(struct mmu_gather *tlb,
1891 			     struct vm_area_struct *vma,
1892 			     unsigned long addr, unsigned long end,
1893 			     struct zap_details *details)
1894 {
1895 	pgd_t *pgd;
1896 	unsigned long next;
1897 
1898 	BUG_ON(addr >= end);
1899 	tlb_start_vma(tlb, vma);
1900 	pgd = pgd_offset(vma->vm_mm, addr);
1901 	do {
1902 		next = pgd_addr_end(addr, end);
1903 		if (pgd_none_or_clear_bad(pgd))
1904 			continue;
1905 		next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
1906 	} while (pgd++, addr = next, addr != end);
1907 	tlb_end_vma(tlb, vma);
1908 }
1909 
1910 
1911 static void unmap_single_vma(struct mmu_gather *tlb,
1912 		struct vm_area_struct *vma, unsigned long start_addr,
1913 		unsigned long end_addr,
1914 		struct zap_details *details, bool mm_wr_locked)
1915 {
1916 	unsigned long start = max(vma->vm_start, start_addr);
1917 	unsigned long end;
1918 
1919 	if (start >= vma->vm_end)
1920 		return;
1921 	end = min(vma->vm_end, end_addr);
1922 	if (end <= vma->vm_start)
1923 		return;
1924 
1925 	if (vma->vm_file)
1926 		uprobe_munmap(vma, start, end);
1927 
1928 	if (unlikely(vma->vm_flags & VM_PFNMAP))
1929 		untrack_pfn(vma, 0, 0, mm_wr_locked);
1930 
1931 	if (start != end) {
1932 		if (unlikely(is_vm_hugetlb_page(vma))) {
1933 			/*
1934 			 * It is undesirable to test vma->vm_file as it
1935 			 * should be non-null for valid hugetlb area.
1936 			 * However, vm_file will be NULL in the error
1937 			 * cleanup path of mmap_region. When
1938 			 * hugetlbfs ->mmap method fails,
1939 			 * mmap_region() nullifies vma->vm_file
1940 			 * before calling this function to clean up.
1941 			 * Since no pte has actually been setup, it is
1942 			 * safe to do nothing in this case.
1943 			 */
1944 			if (vma->vm_file) {
1945 				zap_flags_t zap_flags = details ?
1946 				    details->zap_flags : 0;
1947 				__unmap_hugepage_range(tlb, vma, start, end,
1948 							     NULL, zap_flags);
1949 			}
1950 		} else
1951 			unmap_page_range(tlb, vma, start, end, details);
1952 	}
1953 }
1954 
1955 /**
1956  * unmap_vmas - unmap a range of memory covered by a list of vma's
1957  * @tlb: address of the caller's struct mmu_gather
1958  * @mas: the maple state
1959  * @vma: the starting vma
1960  * @start_addr: virtual address at which to start unmapping
1961  * @end_addr: virtual address at which to end unmapping
1962  * @tree_end: The maximum index to check
1963  * @mm_wr_locked: lock flag
1964  *
1965  * Unmap all pages in the vma list.
1966  *
1967  * Only addresses between `start' and `end' will be unmapped.
1968  *
1969  * The VMA list must be sorted in ascending virtual address order.
1970  *
1971  * unmap_vmas() assumes that the caller will flush the whole unmapped address
1972  * range after unmap_vmas() returns.  So the only responsibility here is to
1973  * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1974  * drops the lock and schedules.
1975  */
1976 void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
1977 		struct vm_area_struct *vma, unsigned long start_addr,
1978 		unsigned long end_addr, unsigned long tree_end,
1979 		bool mm_wr_locked)
1980 {
1981 	struct mmu_notifier_range range;
1982 	struct zap_details details = {
1983 		.zap_flags = ZAP_FLAG_DROP_MARKER | ZAP_FLAG_UNMAP,
1984 		/* Careful - we need to zap private pages too! */
1985 		.even_cows = true,
1986 	};
1987 
1988 	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm,
1989 				start_addr, end_addr);
1990 	mmu_notifier_invalidate_range_start(&range);
1991 	do {
1992 		unsigned long start = start_addr;
1993 		unsigned long end = end_addr;
1994 		hugetlb_zap_begin(vma, &start, &end);
1995 		unmap_single_vma(tlb, vma, start, end, &details,
1996 				 mm_wr_locked);
1997 		hugetlb_zap_end(vma, &details);
1998 		vma = mas_find(mas, tree_end - 1);
1999 	} while (vma && likely(!xa_is_zero(vma)));
2000 	mmu_notifier_invalidate_range_end(&range);
2001 }
2002 
2003 /**
2004  * zap_page_range_single - remove user pages in a given range
2005  * @vma: vm_area_struct holding the applicable pages
2006  * @address: starting address of pages to zap
2007  * @size: number of bytes to zap
2008  * @details: details of shared cache invalidation
2009  *
2010  * The range must fit into one VMA.
2011  */
2012 void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
2013 		unsigned long size, struct zap_details *details)
2014 {
2015 	const unsigned long end = address + size;
2016 	struct mmu_notifier_range range;
2017 	struct mmu_gather tlb;
2018 
2019 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2020 				address, end);
2021 	hugetlb_zap_begin(vma, &range.start, &range.end);
2022 	tlb_gather_mmu(&tlb, vma->vm_mm);
2023 	update_hiwater_rss(vma->vm_mm);
2024 	mmu_notifier_invalidate_range_start(&range);
2025 	/*
2026 	 * unmap 'address-end' not 'range.start-range.end' as range
2027 	 * could have been expanded for hugetlb pmd sharing.
2028 	 */
2029 	unmap_single_vma(&tlb, vma, address, end, details, false);
2030 	mmu_notifier_invalidate_range_end(&range);
2031 	tlb_finish_mmu(&tlb);
2032 	hugetlb_zap_end(vma, details);
2033 }
2034 
2035 /**
2036  * zap_vma_ptes - remove ptes mapping the vma
2037  * @vma: vm_area_struct holding ptes to be zapped
2038  * @address: starting address of pages to zap
2039  * @size: number of bytes to zap
2040  *
2041  * This function only unmaps ptes assigned to VM_PFNMAP vmas.
2042  *
2043  * The entire address range must be fully contained within the vma.
2044  *
2045  */
2046 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
2047 		unsigned long size)
2048 {
2049 	if (!range_in_vma(vma, address, address + size) ||
2050 	    		!(vma->vm_flags & VM_PFNMAP))
2051 		return;
2052 
2053 	zap_page_range_single(vma, address, size, NULL);
2054 }
2055 EXPORT_SYMBOL_GPL(zap_vma_ptes);
2056 
2057 static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
2058 {
2059 	pgd_t *pgd;
2060 	p4d_t *p4d;
2061 	pud_t *pud;
2062 	pmd_t *pmd;
2063 
2064 	pgd = pgd_offset(mm, addr);
2065 	p4d = p4d_alloc(mm, pgd, addr);
2066 	if (!p4d)
2067 		return NULL;
2068 	pud = pud_alloc(mm, p4d, addr);
2069 	if (!pud)
2070 		return NULL;
2071 	pmd = pmd_alloc(mm, pud, addr);
2072 	if (!pmd)
2073 		return NULL;
2074 
2075 	VM_BUG_ON(pmd_trans_huge(*pmd));
2076 	return pmd;
2077 }
2078 
2079 pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
2080 			spinlock_t **ptl)
2081 {
2082 	pmd_t *pmd = walk_to_pmd(mm, addr);
2083 
2084 	if (!pmd)
2085 		return NULL;
2086 	return pte_alloc_map_lock(mm, pmd, addr, ptl);
2087 }
2088 
2089 static bool vm_mixed_zeropage_allowed(struct vm_area_struct *vma)
2090 {
2091 	VM_WARN_ON_ONCE(vma->vm_flags & VM_PFNMAP);
2092 	/*
2093 	 * Whoever wants to forbid the zeropage after some zeropages
2094 	 * might already have been mapped has to scan the page tables and
2095 	 * bail out on any zeropages. Zeropages in COW mappings can
2096 	 * be unshared using FAULT_FLAG_UNSHARE faults.
2097 	 */
2098 	if (mm_forbids_zeropage(vma->vm_mm))
2099 		return false;
2100 	/* zeropages in COW mappings are common and unproblematic. */
2101 	if (is_cow_mapping(vma->vm_flags))
2102 		return true;
2103 	/* Mappings that do not allow for writable PTEs are unproblematic. */
2104 	if (!(vma->vm_flags & (VM_WRITE | VM_MAYWRITE)))
2105 		return true;
2106 	/*
2107 	 * Why not allow any VMA that has vm_ops->pfn_mkwrite? GUP could
2108 	 * find the shared zeropage and longterm-pin it, which would
2109 	 * be problematic as soon as the zeropage gets replaced by a different
2110 	 * page due to vma->vm_ops->pfn_mkwrite, because what's mapped would
2111 	 * now differ to what GUP looked up. FSDAX is incompatible to
2112 	 * FOLL_LONGTERM and VM_IO is incompatible to GUP completely (see
2113 	 * check_vma_flags).
2114 	 */
2115 	return vma->vm_ops && vma->vm_ops->pfn_mkwrite &&
2116 	       (vma_is_fsdax(vma) || vma->vm_flags & VM_IO);
2117 }
2118 
2119 static int validate_page_before_insert(struct vm_area_struct *vma,
2120 				       struct page *page)
2121 {
2122 	struct folio *folio = page_folio(page);
2123 
2124 	if (!folio_ref_count(folio))
2125 		return -EINVAL;
2126 	if (unlikely(is_zero_folio(folio))) {
2127 		if (!vm_mixed_zeropage_allowed(vma))
2128 			return -EINVAL;
2129 		return 0;
2130 	}
2131 	if (folio_test_anon(folio) || folio_test_slab(folio) ||
2132 	    page_has_type(page))
2133 		return -EINVAL;
2134 	flush_dcache_folio(folio);
2135 	return 0;
2136 }
2137 
2138 static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte,
2139 			unsigned long addr, struct page *page, pgprot_t prot)
2140 {
2141 	struct folio *folio = page_folio(page);
2142 	pte_t pteval;
2143 
2144 	if (!pte_none(ptep_get(pte)))
2145 		return -EBUSY;
2146 	/* Ok, finally just insert the thing.. */
2147 	pteval = mk_pte(page, prot);
2148 	if (unlikely(is_zero_folio(folio))) {
2149 		pteval = pte_mkspecial(pteval);
2150 	} else {
2151 		folio_get(folio);
2152 		inc_mm_counter(vma->vm_mm, mm_counter_file(folio));
2153 		folio_add_file_rmap_pte(folio, page, vma);
2154 	}
2155 	set_pte_at(vma->vm_mm, addr, pte, pteval);
2156 	return 0;
2157 }
2158 
2159 static int insert_page(struct vm_area_struct *vma, unsigned long addr,
2160 			struct page *page, pgprot_t prot)
2161 {
2162 	int retval;
2163 	pte_t *pte;
2164 	spinlock_t *ptl;
2165 
2166 	retval = validate_page_before_insert(vma, page);
2167 	if (retval)
2168 		goto out;
2169 	retval = -ENOMEM;
2170 	pte = get_locked_pte(vma->vm_mm, addr, &ptl);
2171 	if (!pte)
2172 		goto out;
2173 	retval = insert_page_into_pte_locked(vma, pte, addr, page, prot);
2174 	pte_unmap_unlock(pte, ptl);
2175 out:
2176 	return retval;
2177 }
2178 
2179 static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte,
2180 			unsigned long addr, struct page *page, pgprot_t prot)
2181 {
2182 	int err;
2183 
2184 	err = validate_page_before_insert(vma, page);
2185 	if (err)
2186 		return err;
2187 	return insert_page_into_pte_locked(vma, pte, addr, page, prot);
2188 }
2189 
2190 /* insert_pages() amortizes the cost of spinlock operations
2191  * when inserting pages in a loop.
2192  */
2193 static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
2194 			struct page **pages, unsigned long *num, pgprot_t prot)
2195 {
2196 	pmd_t *pmd = NULL;
2197 	pte_t *start_pte, *pte;
2198 	spinlock_t *pte_lock;
2199 	struct mm_struct *const mm = vma->vm_mm;
2200 	unsigned long curr_page_idx = 0;
2201 	unsigned long remaining_pages_total = *num;
2202 	unsigned long pages_to_write_in_pmd;
2203 	int ret;
2204 more:
2205 	ret = -EFAULT;
2206 	pmd = walk_to_pmd(mm, addr);
2207 	if (!pmd)
2208 		goto out;
2209 
2210 	pages_to_write_in_pmd = min_t(unsigned long,
2211 		remaining_pages_total, PTRS_PER_PTE - pte_index(addr));
2212 
2213 	/* Allocate the PTE if necessary; takes PMD lock once only. */
2214 	ret = -ENOMEM;
2215 	if (pte_alloc(mm, pmd))
2216 		goto out;
2217 
2218 	while (pages_to_write_in_pmd) {
2219 		int pte_idx = 0;
2220 		const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
2221 
2222 		start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
2223 		if (!start_pte) {
2224 			ret = -EFAULT;
2225 			goto out;
2226 		}
2227 		for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
2228 			int err = insert_page_in_batch_locked(vma, pte,
2229 				addr, pages[curr_page_idx], prot);
2230 			if (unlikely(err)) {
2231 				pte_unmap_unlock(start_pte, pte_lock);
2232 				ret = err;
2233 				remaining_pages_total -= pte_idx;
2234 				goto out;
2235 			}
2236 			addr += PAGE_SIZE;
2237 			++curr_page_idx;
2238 		}
2239 		pte_unmap_unlock(start_pte, pte_lock);
2240 		pages_to_write_in_pmd -= batch_size;
2241 		remaining_pages_total -= batch_size;
2242 	}
2243 	if (remaining_pages_total)
2244 		goto more;
2245 	ret = 0;
2246 out:
2247 	*num = remaining_pages_total;
2248 	return ret;
2249 }
2250 
2251 /**
2252  * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
2253  * @vma: user vma to map to
2254  * @addr: target start user address of these pages
2255  * @pages: source kernel pages
2256  * @num: in: number of pages to map. out: number of pages that were *not*
2257  * mapped. (0 means all pages were successfully mapped).
2258  *
2259  * Preferred over vm_insert_page() when inserting multiple pages.
2260  *
2261  * In case of error, we may have mapped a subset of the provided
2262  * pages. It is the caller's responsibility to account for this case.
2263  *
2264  * The same restrictions apply as in vm_insert_page().
2265  */
2266 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
2267 			struct page **pages, unsigned long *num)
2268 {
2269 	const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
2270 
2271 	if (addr < vma->vm_start || end_addr >= vma->vm_end)
2272 		return -EFAULT;
2273 	if (!(vma->vm_flags & VM_MIXEDMAP)) {
2274 		BUG_ON(mmap_read_trylock(vma->vm_mm));
2275 		BUG_ON(vma->vm_flags & VM_PFNMAP);
2276 		vm_flags_set(vma, VM_MIXEDMAP);
2277 	}
2278 	/* Defer page refcount checking till we're about to map that page. */
2279 	return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
2280 }
2281 EXPORT_SYMBOL(vm_insert_pages);
2282 
2283 /**
2284  * vm_insert_page - insert single page into user vma
2285  * @vma: user vma to map to
2286  * @addr: target user address of this page
2287  * @page: source kernel page
2288  *
2289  * This allows drivers to insert individual pages they've allocated
2290  * into a user vma. The zeropage is supported in some VMAs,
2291  * see vm_mixed_zeropage_allowed().
2292  *
2293  * The page has to be a nice clean _individual_ kernel allocation.
2294  * If you allocate a compound page, you need to have marked it as
2295  * such (__GFP_COMP), or manually just split the page up yourself
2296  * (see split_page()).
2297  *
2298  * NOTE! Traditionally this was done with "remap_pfn_range()" which
2299  * took an arbitrary page protection parameter. This doesn't allow
2300  * that. Your vma protection will have to be set up correctly, which
2301  * means that if you want a shared writable mapping, you'd better
2302  * ask for a shared writable mapping!
2303  *
2304  * The page does not need to be reserved.
2305  *
2306  * Usually this function is called from f_op->mmap() handler
2307  * under mm->mmap_lock write-lock, so it can change vma->vm_flags.
2308  * Caller must set VM_MIXEDMAP on vma if it wants to call this
2309  * function from other places, for example from page-fault handler.
2310  *
2311  * Return: %0 on success, negative error code otherwise.
2312  */
2313 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
2314 			struct page *page)
2315 {
2316 	if (addr < vma->vm_start || addr >= vma->vm_end)
2317 		return -EFAULT;
2318 	if (!(vma->vm_flags & VM_MIXEDMAP)) {
2319 		BUG_ON(mmap_read_trylock(vma->vm_mm));
2320 		BUG_ON(vma->vm_flags & VM_PFNMAP);
2321 		vm_flags_set(vma, VM_MIXEDMAP);
2322 	}
2323 	return insert_page(vma, addr, page, vma->vm_page_prot);
2324 }
2325 EXPORT_SYMBOL(vm_insert_page);
2326 
2327 /*
2328  * __vm_map_pages - maps range of kernel pages into user vma
2329  * @vma: user vma to map to
2330  * @pages: pointer to array of source kernel pages
2331  * @num: number of pages in page array
2332  * @offset: user's requested vm_pgoff
2333  *
2334  * This allows drivers to map range of kernel pages into a user vma.
2335  * The zeropage is supported in some VMAs, see
2336  * vm_mixed_zeropage_allowed().
2337  *
2338  * Return: 0 on success and error code otherwise.
2339  */
2340 static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2341 				unsigned long num, unsigned long offset)
2342 {
2343 	unsigned long count = vma_pages(vma);
2344 	unsigned long uaddr = vma->vm_start;
2345 	int ret, i;
2346 
2347 	/* Fail if the user requested offset is beyond the end of the object */
2348 	if (offset >= num)
2349 		return -ENXIO;
2350 
2351 	/* Fail if the user requested size exceeds available object size */
2352 	if (count > num - offset)
2353 		return -ENXIO;
2354 
2355 	for (i = 0; i < count; i++) {
2356 		ret = vm_insert_page(vma, uaddr, pages[offset + i]);
2357 		if (ret < 0)
2358 			return ret;
2359 		uaddr += PAGE_SIZE;
2360 	}
2361 
2362 	return 0;
2363 }
2364 
2365 /**
2366  * vm_map_pages - maps range of kernel pages starts with non zero offset
2367  * @vma: user vma to map to
2368  * @pages: pointer to array of source kernel pages
2369  * @num: number of pages in page array
2370  *
2371  * Maps an object consisting of @num pages, catering for the user's
2372  * requested vm_pgoff
2373  *
2374  * If we fail to insert any page into the vma, the function will return
2375  * immediately leaving any previously inserted pages present.  Callers
2376  * from the mmap handler may immediately return the error as their caller
2377  * will destroy the vma, removing any successfully inserted pages. Other
2378  * callers should make their own arrangements for calling unmap_region().
2379  *
2380  * Context: Process context. Called by mmap handlers.
2381  * Return: 0 on success and error code otherwise.
2382  */
2383 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2384 				unsigned long num)
2385 {
2386 	return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
2387 }
2388 EXPORT_SYMBOL(vm_map_pages);
2389 
2390 /**
2391  * vm_map_pages_zero - map range of kernel pages starts with zero offset
2392  * @vma: user vma to map to
2393  * @pages: pointer to array of source kernel pages
2394  * @num: number of pages in page array
2395  *
2396  * Similar to vm_map_pages(), except that it explicitly sets the offset
2397  * to 0. This function is intended for the drivers that did not consider
2398  * vm_pgoff.
2399  *
2400  * Context: Process context. Called by mmap handlers.
2401  * Return: 0 on success and error code otherwise.
2402  */
2403 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
2404 				unsigned long num)
2405 {
2406 	return __vm_map_pages(vma, pages, num, 0);
2407 }
2408 EXPORT_SYMBOL(vm_map_pages_zero);
2409 
2410 static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2411 			pfn_t pfn, pgprot_t prot, bool mkwrite)
2412 {
2413 	struct mm_struct *mm = vma->vm_mm;
2414 	pte_t *pte, entry;
2415 	spinlock_t *ptl;
2416 
2417 	pte = get_locked_pte(mm, addr, &ptl);
2418 	if (!pte)
2419 		return VM_FAULT_OOM;
2420 	entry = ptep_get(pte);
2421 	if (!pte_none(entry)) {
2422 		if (mkwrite) {
2423 			/*
2424 			 * For read faults on private mappings the PFN passed
2425 			 * in may not match the PFN we have mapped if the
2426 			 * mapped PFN is a writeable COW page.  In the mkwrite
2427 			 * case we are creating a writable PTE for a shared
2428 			 * mapping and we expect the PFNs to match. If they
2429 			 * don't match, we are likely racing with block
2430 			 * allocation and mapping invalidation so just skip the
2431 			 * update.
2432 			 */
2433 			if (pte_pfn(entry) != pfn_t_to_pfn(pfn)) {
2434 				WARN_ON_ONCE(!is_zero_pfn(pte_pfn(entry)));
2435 				goto out_unlock;
2436 			}
2437 			entry = pte_mkyoung(entry);
2438 			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2439 			if (ptep_set_access_flags(vma, addr, pte, entry, 1))
2440 				update_mmu_cache(vma, addr, pte);
2441 		}
2442 		goto out_unlock;
2443 	}
2444 
2445 	/* Ok, finally just insert the thing.. */
2446 	if (pfn_t_devmap(pfn))
2447 		entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
2448 	else
2449 		entry = pte_mkspecial(pfn_t_pte(pfn, prot));
2450 
2451 	if (mkwrite) {
2452 		entry = pte_mkyoung(entry);
2453 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2454 	}
2455 
2456 	set_pte_at(mm, addr, pte, entry);
2457 	update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
2458 
2459 out_unlock:
2460 	pte_unmap_unlock(pte, ptl);
2461 	return VM_FAULT_NOPAGE;
2462 }
2463 
2464 /**
2465  * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
2466  * @vma: user vma to map to
2467  * @addr: target user address of this page
2468  * @pfn: source kernel pfn
2469  * @pgprot: pgprot flags for the inserted page
2470  *
2471  * This is exactly like vmf_insert_pfn(), except that it allows drivers
2472  * to override pgprot on a per-page basis.
2473  *
2474  * This only makes sense for IO mappings, and it makes no sense for
2475  * COW mappings.  In general, using multiple vmas is preferable;
2476  * vmf_insert_pfn_prot should only be used if using multiple VMAs is
2477  * impractical.
2478  *
2479  * pgprot typically only differs from @vma->vm_page_prot when drivers set
2480  * caching- and encryption bits different than those of @vma->vm_page_prot,
2481  * because the caching- or encryption mode may not be known at mmap() time.
2482  *
2483  * This is ok as long as @vma->vm_page_prot is not used by the core vm
2484  * to set caching and encryption bits for those vmas (except for COW pages).
2485  * This is ensured by core vm only modifying these page table entries using
2486  * functions that don't touch caching- or encryption bits, using pte_modify()
2487  * if needed. (See for example mprotect()).
2488  *
2489  * Also when new page-table entries are created, this is only done using the
2490  * fault() callback, and never using the value of vma->vm_page_prot,
2491  * except for page-table entries that point to anonymous pages as the result
2492  * of COW.
2493  *
2494  * Context: Process context.  May allocate using %GFP_KERNEL.
2495  * Return: vm_fault_t value.
2496  */
2497 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2498 			unsigned long pfn, pgprot_t pgprot)
2499 {
2500 	/*
2501 	 * Technically, architectures with pte_special can avoid all these
2502 	 * restrictions (same for remap_pfn_range).  However we would like
2503 	 * consistency in testing and feature parity among all, so we should
2504 	 * try to keep these invariants in place for everybody.
2505 	 */
2506 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
2507 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
2508 						(VM_PFNMAP|VM_MIXEDMAP));
2509 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
2510 	BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
2511 
2512 	if (addr < vma->vm_start || addr >= vma->vm_end)
2513 		return VM_FAULT_SIGBUS;
2514 
2515 	if (!pfn_modify_allowed(pfn, pgprot))
2516 		return VM_FAULT_SIGBUS;
2517 
2518 	track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
2519 
2520 	return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
2521 			false);
2522 }
2523 EXPORT_SYMBOL(vmf_insert_pfn_prot);
2524 
2525 /**
2526  * vmf_insert_pfn - insert single pfn into user vma
2527  * @vma: user vma to map to
2528  * @addr: target user address of this page
2529  * @pfn: source kernel pfn
2530  *
2531  * Similar to vm_insert_page, this allows drivers to insert individual pages
2532  * they've allocated into a user vma. Same comments apply.
2533  *
2534  * This function should only be called from a vm_ops->fault handler, and
2535  * in that case the handler should return the result of this function.
2536  *
2537  * vma cannot be a COW mapping.
2538  *
2539  * As this is called only for pages that do not currently exist, we
2540  * do not need to flush old virtual caches or the TLB.
2541  *
2542  * Context: Process context.  May allocate using %GFP_KERNEL.
2543  * Return: vm_fault_t value.
2544  */
2545 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2546 			unsigned long pfn)
2547 {
2548 	return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
2549 }
2550 EXPORT_SYMBOL(vmf_insert_pfn);
2551 
2552 static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn, bool mkwrite)
2553 {
2554 	if (unlikely(is_zero_pfn(pfn_t_to_pfn(pfn))) &&
2555 	    (mkwrite || !vm_mixed_zeropage_allowed(vma)))
2556 		return false;
2557 	/* these checks mirror the abort conditions in vm_normal_page */
2558 	if (vma->vm_flags & VM_MIXEDMAP)
2559 		return true;
2560 	if (pfn_t_devmap(pfn))
2561 		return true;
2562 	if (pfn_t_special(pfn))
2563 		return true;
2564 	if (is_zero_pfn(pfn_t_to_pfn(pfn)))
2565 		return true;
2566 	return false;
2567 }
2568 
2569 static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
2570 		unsigned long addr, pfn_t pfn, bool mkwrite)
2571 {
2572 	pgprot_t pgprot = vma->vm_page_prot;
2573 	int err;
2574 
2575 	if (!vm_mixed_ok(vma, pfn, mkwrite))
2576 		return VM_FAULT_SIGBUS;
2577 
2578 	if (addr < vma->vm_start || addr >= vma->vm_end)
2579 		return VM_FAULT_SIGBUS;
2580 
2581 	track_pfn_insert(vma, &pgprot, pfn);
2582 
2583 	if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
2584 		return VM_FAULT_SIGBUS;
2585 
2586 	/*
2587 	 * If we don't have pte special, then we have to use the pfn_valid()
2588 	 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
2589 	 * refcount the page if pfn_valid is true (hence insert_page rather
2590 	 * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
2591 	 * without pte special, it would there be refcounted as a normal page.
2592 	 */
2593 	if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
2594 	    !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
2595 		struct page *page;
2596 
2597 		/*
2598 		 * At this point we are committed to insert_page()
2599 		 * regardless of whether the caller specified flags that
2600 		 * result in pfn_t_has_page() == false.
2601 		 */
2602 		page = pfn_to_page(pfn_t_to_pfn(pfn));
2603 		err = insert_page(vma, addr, page, pgprot);
2604 	} else {
2605 		return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
2606 	}
2607 
2608 	if (err == -ENOMEM)
2609 		return VM_FAULT_OOM;
2610 	if (err < 0 && err != -EBUSY)
2611 		return VM_FAULT_SIGBUS;
2612 
2613 	return VM_FAULT_NOPAGE;
2614 }
2615 
2616 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2617 		pfn_t pfn)
2618 {
2619 	return __vm_insert_mixed(vma, addr, pfn, false);
2620 }
2621 EXPORT_SYMBOL(vmf_insert_mixed);
2622 
2623 /*
2624  *  If the insertion of PTE failed because someone else already added a
2625  *  different entry in the mean time, we treat that as success as we assume
2626  *  the same entry was actually inserted.
2627  */
2628 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2629 		unsigned long addr, pfn_t pfn)
2630 {
2631 	return __vm_insert_mixed(vma, addr, pfn, true);
2632 }
2633 
2634 /*
2635  * maps a range of physical memory into the requested pages. the old
2636  * mappings are removed. any references to nonexistent pages results
2637  * in null mappings (currently treated as "copy-on-access")
2638  */
2639 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
2640 			unsigned long addr, unsigned long end,
2641 			unsigned long pfn, pgprot_t prot)
2642 {
2643 	pte_t *pte, *mapped_pte;
2644 	spinlock_t *ptl;
2645 	int err = 0;
2646 
2647 	mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
2648 	if (!pte)
2649 		return -ENOMEM;
2650 	arch_enter_lazy_mmu_mode();
2651 	do {
2652 		BUG_ON(!pte_none(ptep_get(pte)));
2653 		if (!pfn_modify_allowed(pfn, prot)) {
2654 			err = -EACCES;
2655 			break;
2656 		}
2657 		set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
2658 		pfn++;
2659 	} while (pte++, addr += PAGE_SIZE, addr != end);
2660 	arch_leave_lazy_mmu_mode();
2661 	pte_unmap_unlock(mapped_pte, ptl);
2662 	return err;
2663 }
2664 
2665 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
2666 			unsigned long addr, unsigned long end,
2667 			unsigned long pfn, pgprot_t prot)
2668 {
2669 	pmd_t *pmd;
2670 	unsigned long next;
2671 	int err;
2672 
2673 	pfn -= addr >> PAGE_SHIFT;
2674 	pmd = pmd_alloc(mm, pud, addr);
2675 	if (!pmd)
2676 		return -ENOMEM;
2677 	VM_BUG_ON(pmd_trans_huge(*pmd));
2678 	do {
2679 		next = pmd_addr_end(addr, end);
2680 		err = remap_pte_range(mm, pmd, addr, next,
2681 				pfn + (addr >> PAGE_SHIFT), prot);
2682 		if (err)
2683 			return err;
2684 	} while (pmd++, addr = next, addr != end);
2685 	return 0;
2686 }
2687 
2688 static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
2689 			unsigned long addr, unsigned long end,
2690 			unsigned long pfn, pgprot_t prot)
2691 {
2692 	pud_t *pud;
2693 	unsigned long next;
2694 	int err;
2695 
2696 	pfn -= addr >> PAGE_SHIFT;
2697 	pud = pud_alloc(mm, p4d, addr);
2698 	if (!pud)
2699 		return -ENOMEM;
2700 	do {
2701 		next = pud_addr_end(addr, end);
2702 		err = remap_pmd_range(mm, pud, addr, next,
2703 				pfn + (addr >> PAGE_SHIFT), prot);
2704 		if (err)
2705 			return err;
2706 	} while (pud++, addr = next, addr != end);
2707 	return 0;
2708 }
2709 
2710 static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2711 			unsigned long addr, unsigned long end,
2712 			unsigned long pfn, pgprot_t prot)
2713 {
2714 	p4d_t *p4d;
2715 	unsigned long next;
2716 	int err;
2717 
2718 	pfn -= addr >> PAGE_SHIFT;
2719 	p4d = p4d_alloc(mm, pgd, addr);
2720 	if (!p4d)
2721 		return -ENOMEM;
2722 	do {
2723 		next = p4d_addr_end(addr, end);
2724 		err = remap_pud_range(mm, p4d, addr, next,
2725 				pfn + (addr >> PAGE_SHIFT), prot);
2726 		if (err)
2727 			return err;
2728 	} while (p4d++, addr = next, addr != end);
2729 	return 0;
2730 }
2731 
2732 static int remap_pfn_range_internal(struct vm_area_struct *vma, unsigned long addr,
2733 		unsigned long pfn, unsigned long size, pgprot_t prot)
2734 {
2735 	pgd_t *pgd;
2736 	unsigned long next;
2737 	unsigned long end = addr + PAGE_ALIGN(size);
2738 	struct mm_struct *mm = vma->vm_mm;
2739 	int err;
2740 
2741 	if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
2742 		return -EINVAL;
2743 
2744 	/*
2745 	 * Physically remapped pages are special. Tell the
2746 	 * rest of the world about it:
2747 	 *   VM_IO tells people not to look at these pages
2748 	 *	(accesses can have side effects).
2749 	 *   VM_PFNMAP tells the core MM that the base pages are just
2750 	 *	raw PFN mappings, and do not have a "struct page" associated
2751 	 *	with them.
2752 	 *   VM_DONTEXPAND
2753 	 *      Disable vma merging and expanding with mremap().
2754 	 *   VM_DONTDUMP
2755 	 *      Omit vma from core dump, even when VM_IO turned off.
2756 	 *
2757 	 * There's a horrible special case to handle copy-on-write
2758 	 * behaviour that some programs depend on. We mark the "original"
2759 	 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
2760 	 * See vm_normal_page() for details.
2761 	 */
2762 	if (is_cow_mapping(vma->vm_flags)) {
2763 		if (addr != vma->vm_start || end != vma->vm_end)
2764 			return -EINVAL;
2765 		vma->vm_pgoff = pfn;
2766 	}
2767 
2768 	vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
2769 
2770 	BUG_ON(addr >= end);
2771 	pfn -= addr >> PAGE_SHIFT;
2772 	pgd = pgd_offset(mm, addr);
2773 	flush_cache_range(vma, addr, end);
2774 	do {
2775 		next = pgd_addr_end(addr, end);
2776 		err = remap_p4d_range(mm, pgd, addr, next,
2777 				pfn + (addr >> PAGE_SHIFT), prot);
2778 		if (err)
2779 			return err;
2780 	} while (pgd++, addr = next, addr != end);
2781 
2782 	return 0;
2783 }
2784 
2785 /*
2786  * Variant of remap_pfn_range that does not call track_pfn_remap.  The caller
2787  * must have pre-validated the caching bits of the pgprot_t.
2788  */
2789 int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
2790 		unsigned long pfn, unsigned long size, pgprot_t prot)
2791 {
2792 	int error = remap_pfn_range_internal(vma, addr, pfn, size, prot);
2793 
2794 	if (!error)
2795 		return 0;
2796 
2797 	/*
2798 	 * A partial pfn range mapping is dangerous: it does not
2799 	 * maintain page reference counts, and callers may free
2800 	 * pages due to the error. So zap it early.
2801 	 */
2802 	zap_page_range_single(vma, addr, size, NULL);
2803 	return error;
2804 }
2805 
2806 /**
2807  * remap_pfn_range - remap kernel memory to userspace
2808  * @vma: user vma to map to
2809  * @addr: target page aligned user address to start at
2810  * @pfn: page frame number of kernel physical memory address
2811  * @size: size of mapping area
2812  * @prot: page protection flags for this mapping
2813  *
2814  * Note: this is only safe if the mm semaphore is held when called.
2815  *
2816  * Return: %0 on success, negative error code otherwise.
2817  */
2818 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
2819 		    unsigned long pfn, unsigned long size, pgprot_t prot)
2820 {
2821 	int err;
2822 
2823 	err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size));
2824 	if (err)
2825 		return -EINVAL;
2826 
2827 	err = remap_pfn_range_notrack(vma, addr, pfn, size, prot);
2828 	if (err)
2829 		untrack_pfn(vma, pfn, PAGE_ALIGN(size), true);
2830 	return err;
2831 }
2832 EXPORT_SYMBOL(remap_pfn_range);
2833 
2834 /**
2835  * vm_iomap_memory - remap memory to userspace
2836  * @vma: user vma to map to
2837  * @start: start of the physical memory to be mapped
2838  * @len: size of area
2839  *
2840  * This is a simplified io_remap_pfn_range() for common driver use. The
2841  * driver just needs to give us the physical memory range to be mapped,
2842  * we'll figure out the rest from the vma information.
2843  *
2844  * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
2845  * whatever write-combining details or similar.
2846  *
2847  * Return: %0 on success, negative error code otherwise.
2848  */
2849 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
2850 {
2851 	unsigned long vm_len, pfn, pages;
2852 
2853 	/* Check that the physical memory area passed in looks valid */
2854 	if (start + len < start)
2855 		return -EINVAL;
2856 	/*
2857 	 * You *really* shouldn't map things that aren't page-aligned,
2858 	 * but we've historically allowed it because IO memory might
2859 	 * just have smaller alignment.
2860 	 */
2861 	len += start & ~PAGE_MASK;
2862 	pfn = start >> PAGE_SHIFT;
2863 	pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
2864 	if (pfn + pages < pfn)
2865 		return -EINVAL;
2866 
2867 	/* We start the mapping 'vm_pgoff' pages into the area */
2868 	if (vma->vm_pgoff > pages)
2869 		return -EINVAL;
2870 	pfn += vma->vm_pgoff;
2871 	pages -= vma->vm_pgoff;
2872 
2873 	/* Can we fit all of the mapping? */
2874 	vm_len = vma->vm_end - vma->vm_start;
2875 	if (vm_len >> PAGE_SHIFT > pages)
2876 		return -EINVAL;
2877 
2878 	/* Ok, let it rip */
2879 	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
2880 }
2881 EXPORT_SYMBOL(vm_iomap_memory);
2882 
2883 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2884 				     unsigned long addr, unsigned long end,
2885 				     pte_fn_t fn, void *data, bool create,
2886 				     pgtbl_mod_mask *mask)
2887 {
2888 	pte_t *pte, *mapped_pte;
2889 	int err = 0;
2890 	spinlock_t *ptl;
2891 
2892 	if (create) {
2893 		mapped_pte = pte = (mm == &init_mm) ?
2894 			pte_alloc_kernel_track(pmd, addr, mask) :
2895 			pte_alloc_map_lock(mm, pmd, addr, &ptl);
2896 		if (!pte)
2897 			return -ENOMEM;
2898 	} else {
2899 		mapped_pte = pte = (mm == &init_mm) ?
2900 			pte_offset_kernel(pmd, addr) :
2901 			pte_offset_map_lock(mm, pmd, addr, &ptl);
2902 		if (!pte)
2903 			return -EINVAL;
2904 	}
2905 
2906 	arch_enter_lazy_mmu_mode();
2907 
2908 	if (fn) {
2909 		do {
2910 			if (create || !pte_none(ptep_get(pte))) {
2911 				err = fn(pte++, addr, data);
2912 				if (err)
2913 					break;
2914 			}
2915 		} while (addr += PAGE_SIZE, addr != end);
2916 	}
2917 	*mask |= PGTBL_PTE_MODIFIED;
2918 
2919 	arch_leave_lazy_mmu_mode();
2920 
2921 	if (mm != &init_mm)
2922 		pte_unmap_unlock(mapped_pte, ptl);
2923 	return err;
2924 }
2925 
2926 static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
2927 				     unsigned long addr, unsigned long end,
2928 				     pte_fn_t fn, void *data, bool create,
2929 				     pgtbl_mod_mask *mask)
2930 {
2931 	pmd_t *pmd;
2932 	unsigned long next;
2933 	int err = 0;
2934 
2935 	BUG_ON(pud_leaf(*pud));
2936 
2937 	if (create) {
2938 		pmd = pmd_alloc_track(mm, pud, addr, mask);
2939 		if (!pmd)
2940 			return -ENOMEM;
2941 	} else {
2942 		pmd = pmd_offset(pud, addr);
2943 	}
2944 	do {
2945 		next = pmd_addr_end(addr, end);
2946 		if (pmd_none(*pmd) && !create)
2947 			continue;
2948 		if (WARN_ON_ONCE(pmd_leaf(*pmd)))
2949 			return -EINVAL;
2950 		if (!pmd_none(*pmd) && WARN_ON_ONCE(pmd_bad(*pmd))) {
2951 			if (!create)
2952 				continue;
2953 			pmd_clear_bad(pmd);
2954 		}
2955 		err = apply_to_pte_range(mm, pmd, addr, next,
2956 					 fn, data, create, mask);
2957 		if (err)
2958 			break;
2959 	} while (pmd++, addr = next, addr != end);
2960 
2961 	return err;
2962 }
2963 
2964 static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
2965 				     unsigned long addr, unsigned long end,
2966 				     pte_fn_t fn, void *data, bool create,
2967 				     pgtbl_mod_mask *mask)
2968 {
2969 	pud_t *pud;
2970 	unsigned long next;
2971 	int err = 0;
2972 
2973 	if (create) {
2974 		pud = pud_alloc_track(mm, p4d, addr, mask);
2975 		if (!pud)
2976 			return -ENOMEM;
2977 	} else {
2978 		pud = pud_offset(p4d, addr);
2979 	}
2980 	do {
2981 		next = pud_addr_end(addr, end);
2982 		if (pud_none(*pud) && !create)
2983 			continue;
2984 		if (WARN_ON_ONCE(pud_leaf(*pud)))
2985 			return -EINVAL;
2986 		if (!pud_none(*pud) && WARN_ON_ONCE(pud_bad(*pud))) {
2987 			if (!create)
2988 				continue;
2989 			pud_clear_bad(pud);
2990 		}
2991 		err = apply_to_pmd_range(mm, pud, addr, next,
2992 					 fn, data, create, mask);
2993 		if (err)
2994 			break;
2995 	} while (pud++, addr = next, addr != end);
2996 
2997 	return err;
2998 }
2999 
3000 static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
3001 				     unsigned long addr, unsigned long end,
3002 				     pte_fn_t fn, void *data, bool create,
3003 				     pgtbl_mod_mask *mask)
3004 {
3005 	p4d_t *p4d;
3006 	unsigned long next;
3007 	int err = 0;
3008 
3009 	if (create) {
3010 		p4d = p4d_alloc_track(mm, pgd, addr, mask);
3011 		if (!p4d)
3012 			return -ENOMEM;
3013 	} else {
3014 		p4d = p4d_offset(pgd, addr);
3015 	}
3016 	do {
3017 		next = p4d_addr_end(addr, end);
3018 		if (p4d_none(*p4d) && !create)
3019 			continue;
3020 		if (WARN_ON_ONCE(p4d_leaf(*p4d)))
3021 			return -EINVAL;
3022 		if (!p4d_none(*p4d) && WARN_ON_ONCE(p4d_bad(*p4d))) {
3023 			if (!create)
3024 				continue;
3025 			p4d_clear_bad(p4d);
3026 		}
3027 		err = apply_to_pud_range(mm, p4d, addr, next,
3028 					 fn, data, create, mask);
3029 		if (err)
3030 			break;
3031 	} while (p4d++, addr = next, addr != end);
3032 
3033 	return err;
3034 }
3035 
3036 static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
3037 				 unsigned long size, pte_fn_t fn,
3038 				 void *data, bool create)
3039 {
3040 	pgd_t *pgd;
3041 	unsigned long start = addr, next;
3042 	unsigned long end = addr + size;
3043 	pgtbl_mod_mask mask = 0;
3044 	int err = 0;
3045 
3046 	if (WARN_ON(addr >= end))
3047 		return -EINVAL;
3048 
3049 	pgd = pgd_offset(mm, addr);
3050 	do {
3051 		next = pgd_addr_end(addr, end);
3052 		if (pgd_none(*pgd) && !create)
3053 			continue;
3054 		if (WARN_ON_ONCE(pgd_leaf(*pgd)))
3055 			return -EINVAL;
3056 		if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) {
3057 			if (!create)
3058 				continue;
3059 			pgd_clear_bad(pgd);
3060 		}
3061 		err = apply_to_p4d_range(mm, pgd, addr, next,
3062 					 fn, data, create, &mask);
3063 		if (err)
3064 			break;
3065 	} while (pgd++, addr = next, addr != end);
3066 
3067 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
3068 		arch_sync_kernel_mappings(start, start + size);
3069 
3070 	return err;
3071 }
3072 
3073 /*
3074  * Scan a region of virtual memory, filling in page tables as necessary
3075  * and calling a provided function on each leaf page table.
3076  */
3077 int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
3078 			unsigned long size, pte_fn_t fn, void *data)
3079 {
3080 	return __apply_to_page_range(mm, addr, size, fn, data, true);
3081 }
3082 EXPORT_SYMBOL_GPL(apply_to_page_range);
3083 
3084 /*
3085  * Scan a region of virtual memory, calling a provided function on
3086  * each leaf page table where it exists.
3087  *
3088  * Unlike apply_to_page_range, this does _not_ fill in page tables
3089  * where they are absent.
3090  */
3091 int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
3092 				 unsigned long size, pte_fn_t fn, void *data)
3093 {
3094 	return __apply_to_page_range(mm, addr, size, fn, data, false);
3095 }
3096 
3097 /*
3098  * handle_pte_fault chooses page fault handler according to an entry which was
3099  * read non-atomically.  Before making any commitment, on those architectures
3100  * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
3101  * parts, do_swap_page must check under lock before unmapping the pte and
3102  * proceeding (but do_wp_page is only called after already making such a check;
3103  * and do_anonymous_page can safely check later on).
3104  */
3105 static inline int pte_unmap_same(struct vm_fault *vmf)
3106 {
3107 	int same = 1;
3108 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION)
3109 	if (sizeof(pte_t) > sizeof(unsigned long)) {
3110 		spin_lock(vmf->ptl);
3111 		same = pte_same(ptep_get(vmf->pte), vmf->orig_pte);
3112 		spin_unlock(vmf->ptl);
3113 	}
3114 #endif
3115 	pte_unmap(vmf->pte);
3116 	vmf->pte = NULL;
3117 	return same;
3118 }
3119 
3120 /*
3121  * Return:
3122  *	0:		copied succeeded
3123  *	-EHWPOISON:	copy failed due to hwpoison in source page
3124  *	-EAGAIN:	copied failed (some other reason)
3125  */
3126 static inline int __wp_page_copy_user(struct page *dst, struct page *src,
3127 				      struct vm_fault *vmf)
3128 {
3129 	int ret;
3130 	void *kaddr;
3131 	void __user *uaddr;
3132 	struct vm_area_struct *vma = vmf->vma;
3133 	struct mm_struct *mm = vma->vm_mm;
3134 	unsigned long addr = vmf->address;
3135 
3136 	if (likely(src)) {
3137 		if (copy_mc_user_highpage(dst, src, addr, vma))
3138 			return -EHWPOISON;
3139 		return 0;
3140 	}
3141 
3142 	/*
3143 	 * If the source page was a PFN mapping, we don't have
3144 	 * a "struct page" for it. We do a best-effort copy by
3145 	 * just copying from the original user address. If that
3146 	 * fails, we just zero-fill it. Live with it.
3147 	 */
3148 	kaddr = kmap_local_page(dst);
3149 	pagefault_disable();
3150 	uaddr = (void __user *)(addr & PAGE_MASK);
3151 
3152 	/*
3153 	 * On architectures with software "accessed" bits, we would
3154 	 * take a double page fault, so mark it accessed here.
3155 	 */
3156 	vmf->pte = NULL;
3157 	if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) {
3158 		pte_t entry;
3159 
3160 		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
3161 		if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
3162 			/*
3163 			 * Other thread has already handled the fault
3164 			 * and update local tlb only
3165 			 */
3166 			if (vmf->pte)
3167 				update_mmu_tlb(vma, addr, vmf->pte);
3168 			ret = -EAGAIN;
3169 			goto pte_unlock;
3170 		}
3171 
3172 		entry = pte_mkyoung(vmf->orig_pte);
3173 		if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
3174 			update_mmu_cache_range(vmf, vma, addr, vmf->pte, 1);
3175 	}
3176 
3177 	/*
3178 	 * This really shouldn't fail, because the page is there
3179 	 * in the page tables. But it might just be unreadable,
3180 	 * in which case we just give up and fill the result with
3181 	 * zeroes.
3182 	 */
3183 	if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
3184 		if (vmf->pte)
3185 			goto warn;
3186 
3187 		/* Re-validate under PTL if the page is still mapped */
3188 		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
3189 		if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
3190 			/* The PTE changed under us, update local tlb */
3191 			if (vmf->pte)
3192 				update_mmu_tlb(vma, addr, vmf->pte);
3193 			ret = -EAGAIN;
3194 			goto pte_unlock;
3195 		}
3196 
3197 		/*
3198 		 * The same page can be mapped back since last copy attempt.
3199 		 * Try to copy again under PTL.
3200 		 */
3201 		if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
3202 			/*
3203 			 * Give a warn in case there can be some obscure
3204 			 * use-case
3205 			 */
3206 warn:
3207 			WARN_ON_ONCE(1);
3208 			clear_page(kaddr);
3209 		}
3210 	}
3211 
3212 	ret = 0;
3213 
3214 pte_unlock:
3215 	if (vmf->pte)
3216 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3217 	pagefault_enable();
3218 	kunmap_local(kaddr);
3219 	flush_dcache_page(dst);
3220 
3221 	return ret;
3222 }
3223 
3224 static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
3225 {
3226 	struct file *vm_file = vma->vm_file;
3227 
3228 	if (vm_file)
3229 		return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
3230 
3231 	/*
3232 	 * Special mappings (e.g. VDSO) do not have any file so fake
3233 	 * a default GFP_KERNEL for them.
3234 	 */
3235 	return GFP_KERNEL;
3236 }
3237 
3238 /*
3239  * Notify the address space that the page is about to become writable so that
3240  * it can prohibit this or wait for the page to get into an appropriate state.
3241  *
3242  * We do this without the lock held, so that it can sleep if it needs to.
3243  */
3244 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf, struct folio *folio)
3245 {
3246 	vm_fault_t ret;
3247 	unsigned int old_flags = vmf->flags;
3248 
3249 	vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
3250 
3251 	if (vmf->vma->vm_file &&
3252 	    IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
3253 		return VM_FAULT_SIGBUS;
3254 
3255 	ret = vmf->vma->vm_ops->page_mkwrite(vmf);
3256 	/* Restore original flags so that caller is not surprised */
3257 	vmf->flags = old_flags;
3258 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
3259 		return ret;
3260 	if (unlikely(!(ret & VM_FAULT_LOCKED))) {
3261 		folio_lock(folio);
3262 		if (!folio->mapping) {
3263 			folio_unlock(folio);
3264 			return 0; /* retry */
3265 		}
3266 		ret |= VM_FAULT_LOCKED;
3267 	} else
3268 		VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
3269 	return ret;
3270 }
3271 
3272 /*
3273  * Handle dirtying of a page in shared file mapping on a write fault.
3274  *
3275  * The function expects the page to be locked and unlocks it.
3276  */
3277 static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
3278 {
3279 	struct vm_area_struct *vma = vmf->vma;
3280 	struct address_space *mapping;
3281 	struct folio *folio = page_folio(vmf->page);
3282 	bool dirtied;
3283 	bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
3284 
3285 	dirtied = folio_mark_dirty(folio);
3286 	VM_BUG_ON_FOLIO(folio_test_anon(folio), folio);
3287 	/*
3288 	 * Take a local copy of the address_space - folio.mapping may be zeroed
3289 	 * by truncate after folio_unlock().   The address_space itself remains
3290 	 * pinned by vma->vm_file's reference.  We rely on folio_unlock()'s
3291 	 * release semantics to prevent the compiler from undoing this copying.
3292 	 */
3293 	mapping = folio_raw_mapping(folio);
3294 	folio_unlock(folio);
3295 
3296 	if (!page_mkwrite)
3297 		file_update_time(vma->vm_file);
3298 
3299 	/*
3300 	 * Throttle page dirtying rate down to writeback speed.
3301 	 *
3302 	 * mapping may be NULL here because some device drivers do not
3303 	 * set page.mapping but still dirty their pages
3304 	 *
3305 	 * Drop the mmap_lock before waiting on IO, if we can. The file
3306 	 * is pinning the mapping, as per above.
3307 	 */
3308 	if ((dirtied || page_mkwrite) && mapping) {
3309 		struct file *fpin;
3310 
3311 		fpin = maybe_unlock_mmap_for_io(vmf, NULL);
3312 		balance_dirty_pages_ratelimited(mapping);
3313 		if (fpin) {
3314 			fput(fpin);
3315 			return VM_FAULT_COMPLETED;
3316 		}
3317 	}
3318 
3319 	return 0;
3320 }
3321 
3322 /*
3323  * Handle write page faults for pages that can be reused in the current vma
3324  *
3325  * This can happen either due to the mapping being with the VM_SHARED flag,
3326  * or due to us being the last reference standing to the page. In either
3327  * case, all we need to do here is to mark the page as writable and update
3328  * any related book-keeping.
3329  */
3330 static inline void wp_page_reuse(struct vm_fault *vmf, struct folio *folio)
3331 	__releases(vmf->ptl)
3332 {
3333 	struct vm_area_struct *vma = vmf->vma;
3334 	pte_t entry;
3335 
3336 	VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE));
3337 	VM_WARN_ON(is_zero_pfn(pte_pfn(vmf->orig_pte)));
3338 
3339 	if (folio) {
3340 		VM_BUG_ON(folio_test_anon(folio) &&
3341 			  !PageAnonExclusive(vmf->page));
3342 		/*
3343 		 * Clear the folio's cpupid information as the existing
3344 		 * information potentially belongs to a now completely
3345 		 * unrelated process.
3346 		 */
3347 		folio_xchg_last_cpupid(folio, (1 << LAST_CPUPID_SHIFT) - 1);
3348 	}
3349 
3350 	flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3351 	entry = pte_mkyoung(vmf->orig_pte);
3352 	entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3353 	if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
3354 		update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
3355 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3356 	count_vm_event(PGREUSE);
3357 }
3358 
3359 /*
3360  * We could add a bitflag somewhere, but for now, we know that all
3361  * vm_ops that have a ->map_pages have been audited and don't need
3362  * the mmap_lock to be held.
3363  */
3364 static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf)
3365 {
3366 	struct vm_area_struct *vma = vmf->vma;
3367 
3368 	if (vma->vm_ops->map_pages || !(vmf->flags & FAULT_FLAG_VMA_LOCK))
3369 		return 0;
3370 	vma_end_read(vma);
3371 	return VM_FAULT_RETRY;
3372 }
3373 
3374 /**
3375  * __vmf_anon_prepare - Prepare to handle an anonymous fault.
3376  * @vmf: The vm_fault descriptor passed from the fault handler.
3377  *
3378  * When preparing to insert an anonymous page into a VMA from a
3379  * fault handler, call this function rather than anon_vma_prepare().
3380  * If this vma does not already have an associated anon_vma and we are
3381  * only protected by the per-VMA lock, the caller must retry with the
3382  * mmap_lock held.  __anon_vma_prepare() will look at adjacent VMAs to
3383  * determine if this VMA can share its anon_vma, and that's not safe to
3384  * do with only the per-VMA lock held for this VMA.
3385  *
3386  * Return: 0 if fault handling can proceed.  Any other value should be
3387  * returned to the caller.
3388  */
3389 vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf)
3390 {
3391 	struct vm_area_struct *vma = vmf->vma;
3392 	vm_fault_t ret = 0;
3393 
3394 	if (likely(vma->anon_vma))
3395 		return 0;
3396 	if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
3397 		if (!mmap_read_trylock(vma->vm_mm))
3398 			return VM_FAULT_RETRY;
3399 	}
3400 	if (__anon_vma_prepare(vma))
3401 		ret = VM_FAULT_OOM;
3402 	if (vmf->flags & FAULT_FLAG_VMA_LOCK)
3403 		mmap_read_unlock(vma->vm_mm);
3404 	return ret;
3405 }
3406 
3407 /*
3408  * Handle the case of a page which we actually need to copy to a new page,
3409  * either due to COW or unsharing.
3410  *
3411  * Called with mmap_lock locked and the old page referenced, but
3412  * without the ptl held.
3413  *
3414  * High level logic flow:
3415  *
3416  * - Allocate a page, copy the content of the old page to the new one.
3417  * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
3418  * - Take the PTL. If the pte changed, bail out and release the allocated page
3419  * - If the pte is still the way we remember it, update the page table and all
3420  *   relevant references. This includes dropping the reference the page-table
3421  *   held to the old page, as well as updating the rmap.
3422  * - In any case, unlock the PTL and drop the reference we took to the old page.
3423  */
3424 static vm_fault_t wp_page_copy(struct vm_fault *vmf)
3425 {
3426 	const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
3427 	struct vm_area_struct *vma = vmf->vma;
3428 	struct mm_struct *mm = vma->vm_mm;
3429 	struct folio *old_folio = NULL;
3430 	struct folio *new_folio = NULL;
3431 	pte_t entry;
3432 	int page_copied = 0;
3433 	struct mmu_notifier_range range;
3434 	vm_fault_t ret;
3435 	bool pfn_is_zero;
3436 
3437 	delayacct_wpcopy_start();
3438 
3439 	if (vmf->page)
3440 		old_folio = page_folio(vmf->page);
3441 	ret = vmf_anon_prepare(vmf);
3442 	if (unlikely(ret))
3443 		goto out;
3444 
3445 	pfn_is_zero = is_zero_pfn(pte_pfn(vmf->orig_pte));
3446 	new_folio = folio_prealloc(mm, vma, vmf->address, pfn_is_zero);
3447 	if (!new_folio)
3448 		goto oom;
3449 
3450 	if (!pfn_is_zero) {
3451 		int err;
3452 
3453 		err = __wp_page_copy_user(&new_folio->page, vmf->page, vmf);
3454 		if (err) {
3455 			/*
3456 			 * COW failed, if the fault was solved by other,
3457 			 * it's fine. If not, userspace would re-fault on
3458 			 * the same address and we will handle the fault
3459 			 * from the second attempt.
3460 			 * The -EHWPOISON case will not be retried.
3461 			 */
3462 			folio_put(new_folio);
3463 			if (old_folio)
3464 				folio_put(old_folio);
3465 
3466 			delayacct_wpcopy_end();
3467 			return err == -EHWPOISON ? VM_FAULT_HWPOISON : 0;
3468 		}
3469 		kmsan_copy_page_meta(&new_folio->page, vmf->page);
3470 	}
3471 
3472 	__folio_mark_uptodate(new_folio);
3473 
3474 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
3475 				vmf->address & PAGE_MASK,
3476 				(vmf->address & PAGE_MASK) + PAGE_SIZE);
3477 	mmu_notifier_invalidate_range_start(&range);
3478 
3479 	/*
3480 	 * Re-check the pte - we dropped the lock
3481 	 */
3482 	vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
3483 	if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
3484 		if (old_folio) {
3485 			if (!folio_test_anon(old_folio)) {
3486 				dec_mm_counter(mm, mm_counter_file(old_folio));
3487 				inc_mm_counter(mm, MM_ANONPAGES);
3488 			}
3489 		} else {
3490 			ksm_might_unmap_zero_page(mm, vmf->orig_pte);
3491 			inc_mm_counter(mm, MM_ANONPAGES);
3492 		}
3493 		flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3494 		entry = mk_pte(&new_folio->page, vma->vm_page_prot);
3495 		entry = pte_sw_mkyoung(entry);
3496 		if (unlikely(unshare)) {
3497 			if (pte_soft_dirty(vmf->orig_pte))
3498 				entry = pte_mksoft_dirty(entry);
3499 			if (pte_uffd_wp(vmf->orig_pte))
3500 				entry = pte_mkuffd_wp(entry);
3501 		} else {
3502 			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3503 		}
3504 
3505 		/*
3506 		 * Clear the pte entry and flush it first, before updating the
3507 		 * pte with the new entry, to keep TLBs on different CPUs in
3508 		 * sync. This code used to set the new PTE then flush TLBs, but
3509 		 * that left a window where the new PTE could be loaded into
3510 		 * some TLBs while the old PTE remains in others.
3511 		 */
3512 		ptep_clear_flush(vma, vmf->address, vmf->pte);
3513 		folio_add_new_anon_rmap(new_folio, vma, vmf->address, RMAP_EXCLUSIVE);
3514 		folio_add_lru_vma(new_folio, vma);
3515 		BUG_ON(unshare && pte_write(entry));
3516 		set_pte_at(mm, vmf->address, vmf->pte, entry);
3517 		update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
3518 		if (old_folio) {
3519 			/*
3520 			 * Only after switching the pte to the new page may
3521 			 * we remove the mapcount here. Otherwise another
3522 			 * process may come and find the rmap count decremented
3523 			 * before the pte is switched to the new page, and
3524 			 * "reuse" the old page writing into it while our pte
3525 			 * here still points into it and can be read by other
3526 			 * threads.
3527 			 *
3528 			 * The critical issue is to order this
3529 			 * folio_remove_rmap_pte() with the ptp_clear_flush
3530 			 * above. Those stores are ordered by (if nothing else,)
3531 			 * the barrier present in the atomic_add_negative
3532 			 * in folio_remove_rmap_pte();
3533 			 *
3534 			 * Then the TLB flush in ptep_clear_flush ensures that
3535 			 * no process can access the old page before the
3536 			 * decremented mapcount is visible. And the old page
3537 			 * cannot be reused until after the decremented
3538 			 * mapcount is visible. So transitively, TLBs to
3539 			 * old page will be flushed before it can be reused.
3540 			 */
3541 			folio_remove_rmap_pte(old_folio, vmf->page, vma);
3542 		}
3543 
3544 		/* Free the old page.. */
3545 		new_folio = old_folio;
3546 		page_copied = 1;
3547 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3548 	} else if (vmf->pte) {
3549 		update_mmu_tlb(vma, vmf->address, vmf->pte);
3550 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3551 	}
3552 
3553 	mmu_notifier_invalidate_range_end(&range);
3554 
3555 	if (new_folio)
3556 		folio_put(new_folio);
3557 	if (old_folio) {
3558 		if (page_copied)
3559 			free_swap_cache(old_folio);
3560 		folio_put(old_folio);
3561 	}
3562 
3563 	delayacct_wpcopy_end();
3564 	return 0;
3565 oom:
3566 	ret = VM_FAULT_OOM;
3567 out:
3568 	if (old_folio)
3569 		folio_put(old_folio);
3570 
3571 	delayacct_wpcopy_end();
3572 	return ret;
3573 }
3574 
3575 /**
3576  * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
3577  *			  writeable once the page is prepared
3578  *
3579  * @vmf: structure describing the fault
3580  * @folio: the folio of vmf->page
3581  *
3582  * This function handles all that is needed to finish a write page fault in a
3583  * shared mapping due to PTE being read-only once the mapped page is prepared.
3584  * It handles locking of PTE and modifying it.
3585  *
3586  * The function expects the page to be locked or other protection against
3587  * concurrent faults / writeback (such as DAX radix tree locks).
3588  *
3589  * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before
3590  * we acquired PTE lock.
3591  */
3592 static vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf, struct folio *folio)
3593 {
3594 	WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
3595 	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
3596 				       &vmf->ptl);
3597 	if (!vmf->pte)
3598 		return VM_FAULT_NOPAGE;
3599 	/*
3600 	 * We might have raced with another page fault while we released the
3601 	 * pte_offset_map_lock.
3602 	 */
3603 	if (!pte_same(ptep_get(vmf->pte), vmf->orig_pte)) {
3604 		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
3605 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3606 		return VM_FAULT_NOPAGE;
3607 	}
3608 	wp_page_reuse(vmf, folio);
3609 	return 0;
3610 }
3611 
3612 /*
3613  * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
3614  * mapping
3615  */
3616 static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
3617 {
3618 	struct vm_area_struct *vma = vmf->vma;
3619 
3620 	if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
3621 		vm_fault_t ret;
3622 
3623 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3624 		ret = vmf_can_call_fault(vmf);
3625 		if (ret)
3626 			return ret;
3627 
3628 		vmf->flags |= FAULT_FLAG_MKWRITE;
3629 		ret = vma->vm_ops->pfn_mkwrite(vmf);
3630 		if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
3631 			return ret;
3632 		return finish_mkwrite_fault(vmf, NULL);
3633 	}
3634 	wp_page_reuse(vmf, NULL);
3635 	return 0;
3636 }
3637 
3638 static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio)
3639 	__releases(vmf->ptl)
3640 {
3641 	struct vm_area_struct *vma = vmf->vma;
3642 	vm_fault_t ret = 0;
3643 
3644 	folio_get(folio);
3645 
3646 	if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
3647 		vm_fault_t tmp;
3648 
3649 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3650 		tmp = vmf_can_call_fault(vmf);
3651 		if (tmp) {
3652 			folio_put(folio);
3653 			return tmp;
3654 		}
3655 
3656 		tmp = do_page_mkwrite(vmf, folio);
3657 		if (unlikely(!tmp || (tmp &
3658 				      (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
3659 			folio_put(folio);
3660 			return tmp;
3661 		}
3662 		tmp = finish_mkwrite_fault(vmf, folio);
3663 		if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
3664 			folio_unlock(folio);
3665 			folio_put(folio);
3666 			return tmp;
3667 		}
3668 	} else {
3669 		wp_page_reuse(vmf, folio);
3670 		folio_lock(folio);
3671 	}
3672 	ret |= fault_dirty_shared_page(vmf);
3673 	folio_put(folio);
3674 
3675 	return ret;
3676 }
3677 
3678 static bool wp_can_reuse_anon_folio(struct folio *folio,
3679 				    struct vm_area_struct *vma)
3680 {
3681 	/*
3682 	 * We could currently only reuse a subpage of a large folio if no
3683 	 * other subpages of the large folios are still mapped. However,
3684 	 * let's just consistently not reuse subpages even if we could
3685 	 * reuse in that scenario, and give back a large folio a bit
3686 	 * sooner.
3687 	 */
3688 	if (folio_test_large(folio))
3689 		return false;
3690 
3691 	/*
3692 	 * We have to verify under folio lock: these early checks are
3693 	 * just an optimization to avoid locking the folio and freeing
3694 	 * the swapcache if there is little hope that we can reuse.
3695 	 *
3696 	 * KSM doesn't necessarily raise the folio refcount.
3697 	 */
3698 	if (folio_test_ksm(folio) || folio_ref_count(folio) > 3)
3699 		return false;
3700 	if (!folio_test_lru(folio))
3701 		/*
3702 		 * We cannot easily detect+handle references from
3703 		 * remote LRU caches or references to LRU folios.
3704 		 */
3705 		lru_add_drain();
3706 	if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio))
3707 		return false;
3708 	if (!folio_trylock(folio))
3709 		return false;
3710 	if (folio_test_swapcache(folio))
3711 		folio_free_swap(folio);
3712 	if (folio_test_ksm(folio) || folio_ref_count(folio) != 1) {
3713 		folio_unlock(folio);
3714 		return false;
3715 	}
3716 	/*
3717 	 * Ok, we've got the only folio reference from our mapping
3718 	 * and the folio is locked, it's dark out, and we're wearing
3719 	 * sunglasses. Hit it.
3720 	 */
3721 	folio_move_anon_rmap(folio, vma);
3722 	folio_unlock(folio);
3723 	return true;
3724 }
3725 
3726 /*
3727  * This routine handles present pages, when
3728  * * users try to write to a shared page (FAULT_FLAG_WRITE)
3729  * * GUP wants to take a R/O pin on a possibly shared anonymous page
3730  *   (FAULT_FLAG_UNSHARE)
3731  *
3732  * It is done by copying the page to a new address and decrementing the
3733  * shared-page counter for the old page.
3734  *
3735  * Note that this routine assumes that the protection checks have been
3736  * done by the caller (the low-level page fault routine in most cases).
3737  * Thus, with FAULT_FLAG_WRITE, we can safely just mark it writable once we've
3738  * done any necessary COW.
3739  *
3740  * In case of FAULT_FLAG_WRITE, we also mark the page dirty at this point even
3741  * though the page will change only once the write actually happens. This
3742  * avoids a few races, and potentially makes it more efficient.
3743  *
3744  * We enter with non-exclusive mmap_lock (to exclude vma changes,
3745  * but allow concurrent faults), with pte both mapped and locked.
3746  * We return with mmap_lock still held, but pte unmapped and unlocked.
3747  */
3748 static vm_fault_t do_wp_page(struct vm_fault *vmf)
3749 	__releases(vmf->ptl)
3750 {
3751 	const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
3752 	struct vm_area_struct *vma = vmf->vma;
3753 	struct folio *folio = NULL;
3754 	pte_t pte;
3755 
3756 	if (likely(!unshare)) {
3757 		if (userfaultfd_pte_wp(vma, ptep_get(vmf->pte))) {
3758 			if (!userfaultfd_wp_async(vma)) {
3759 				pte_unmap_unlock(vmf->pte, vmf->ptl);
3760 				return handle_userfault(vmf, VM_UFFD_WP);
3761 			}
3762 
3763 			/*
3764 			 * Nothing needed (cache flush, TLB invalidations,
3765 			 * etc.) because we're only removing the uffd-wp bit,
3766 			 * which is completely invisible to the user.
3767 			 */
3768 			pte = pte_clear_uffd_wp(ptep_get(vmf->pte));
3769 
3770 			set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
3771 			/*
3772 			 * Update this to be prepared for following up CoW
3773 			 * handling
3774 			 */
3775 			vmf->orig_pte = pte;
3776 		}
3777 
3778 		/*
3779 		 * Userfaultfd write-protect can defer flushes. Ensure the TLB
3780 		 * is flushed in this case before copying.
3781 		 */
3782 		if (unlikely(userfaultfd_wp(vmf->vma) &&
3783 			     mm_tlb_flush_pending(vmf->vma->vm_mm)))
3784 			flush_tlb_page(vmf->vma, vmf->address);
3785 	}
3786 
3787 	vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
3788 
3789 	if (vmf->page)
3790 		folio = page_folio(vmf->page);
3791 
3792 	/*
3793 	 * Shared mapping: we are guaranteed to have VM_WRITE and
3794 	 * FAULT_FLAG_WRITE set at this point.
3795 	 */
3796 	if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
3797 		/*
3798 		 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
3799 		 * VM_PFNMAP VMA.
3800 		 *
3801 		 * We should not cow pages in a shared writeable mapping.
3802 		 * Just mark the pages writable and/or call ops->pfn_mkwrite.
3803 		 */
3804 		if (!vmf->page)
3805 			return wp_pfn_shared(vmf);
3806 		return wp_page_shared(vmf, folio);
3807 	}
3808 
3809 	/*
3810 	 * Private mapping: create an exclusive anonymous page copy if reuse
3811 	 * is impossible. We might miss VM_WRITE for FOLL_FORCE handling.
3812 	 *
3813 	 * If we encounter a page that is marked exclusive, we must reuse
3814 	 * the page without further checks.
3815 	 */
3816 	if (folio && folio_test_anon(folio) &&
3817 	    (PageAnonExclusive(vmf->page) || wp_can_reuse_anon_folio(folio, vma))) {
3818 		if (!PageAnonExclusive(vmf->page))
3819 			SetPageAnonExclusive(vmf->page);
3820 		if (unlikely(unshare)) {
3821 			pte_unmap_unlock(vmf->pte, vmf->ptl);
3822 			return 0;
3823 		}
3824 		wp_page_reuse(vmf, folio);
3825 		return 0;
3826 	}
3827 	/*
3828 	 * Ok, we need to copy. Oh, well..
3829 	 */
3830 	if (folio)
3831 		folio_get(folio);
3832 
3833 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3834 #ifdef CONFIG_KSM
3835 	if (folio && folio_test_ksm(folio))
3836 		count_vm_event(COW_KSM);
3837 #endif
3838 	return wp_page_copy(vmf);
3839 }
3840 
3841 static void unmap_mapping_range_vma(struct vm_area_struct *vma,
3842 		unsigned long start_addr, unsigned long end_addr,
3843 		struct zap_details *details)
3844 {
3845 	zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
3846 }
3847 
3848 static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
3849 					    pgoff_t first_index,
3850 					    pgoff_t last_index,
3851 					    struct zap_details *details)
3852 {
3853 	struct vm_area_struct *vma;
3854 	pgoff_t vba, vea, zba, zea;
3855 
3856 	vma_interval_tree_foreach(vma, root, first_index, last_index) {
3857 		vba = vma->vm_pgoff;
3858 		vea = vba + vma_pages(vma) - 1;
3859 		zba = max(first_index, vba);
3860 		zea = min(last_index, vea);
3861 
3862 		unmap_mapping_range_vma(vma,
3863 			((zba - vba) << PAGE_SHIFT) + vma->vm_start,
3864 			((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
3865 				details);
3866 	}
3867 }
3868 
3869 /**
3870  * unmap_mapping_folio() - Unmap single folio from processes.
3871  * @folio: The locked folio to be unmapped.
3872  *
3873  * Unmap this folio from any userspace process which still has it mmaped.
3874  * Typically, for efficiency, the range of nearby pages has already been
3875  * unmapped by unmap_mapping_pages() or unmap_mapping_range().  But once
3876  * truncation or invalidation holds the lock on a folio, it may find that
3877  * the page has been remapped again: and then uses unmap_mapping_folio()
3878  * to unmap it finally.
3879  */
3880 void unmap_mapping_folio(struct folio *folio)
3881 {
3882 	struct address_space *mapping = folio->mapping;
3883 	struct zap_details details = { };
3884 	pgoff_t	first_index;
3885 	pgoff_t	last_index;
3886 
3887 	VM_BUG_ON(!folio_test_locked(folio));
3888 
3889 	first_index = folio->index;
3890 	last_index = folio_next_index(folio) - 1;
3891 
3892 	details.even_cows = false;
3893 	details.single_folio = folio;
3894 	details.zap_flags = ZAP_FLAG_DROP_MARKER;
3895 
3896 	i_mmap_lock_read(mapping);
3897 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3898 		unmap_mapping_range_tree(&mapping->i_mmap, first_index,
3899 					 last_index, &details);
3900 	i_mmap_unlock_read(mapping);
3901 }
3902 
3903 /**
3904  * unmap_mapping_pages() - Unmap pages from processes.
3905  * @mapping: The address space containing pages to be unmapped.
3906  * @start: Index of first page to be unmapped.
3907  * @nr: Number of pages to be unmapped.  0 to unmap to end of file.
3908  * @even_cows: Whether to unmap even private COWed pages.
3909  *
3910  * Unmap the pages in this address space from any userspace process which
3911  * has them mmaped.  Generally, you want to remove COWed pages as well when
3912  * a file is being truncated, but not when invalidating pages from the page
3913  * cache.
3914  */
3915 void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
3916 		pgoff_t nr, bool even_cows)
3917 {
3918 	struct zap_details details = { };
3919 	pgoff_t	first_index = start;
3920 	pgoff_t	last_index = start + nr - 1;
3921 
3922 	details.even_cows = even_cows;
3923 	if (last_index < first_index)
3924 		last_index = ULONG_MAX;
3925 
3926 	i_mmap_lock_read(mapping);
3927 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3928 		unmap_mapping_range_tree(&mapping->i_mmap, first_index,
3929 					 last_index, &details);
3930 	i_mmap_unlock_read(mapping);
3931 }
3932 EXPORT_SYMBOL_GPL(unmap_mapping_pages);
3933 
3934 /**
3935  * unmap_mapping_range - unmap the portion of all mmaps in the specified
3936  * address_space corresponding to the specified byte range in the underlying
3937  * file.
3938  *
3939  * @mapping: the address space containing mmaps to be unmapped.
3940  * @holebegin: byte in first page to unmap, relative to the start of
3941  * the underlying file.  This will be rounded down to a PAGE_SIZE
3942  * boundary.  Note that this is different from truncate_pagecache(), which
3943  * must keep the partial page.  In contrast, we must get rid of
3944  * partial pages.
3945  * @holelen: size of prospective hole in bytes.  This will be rounded
3946  * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
3947  * end of the file.
3948  * @even_cows: 1 when truncating a file, unmap even private COWed pages;
3949  * but 0 when invalidating pagecache, don't throw away private data.
3950  */
3951 void unmap_mapping_range(struct address_space *mapping,
3952 		loff_t const holebegin, loff_t const holelen, int even_cows)
3953 {
3954 	pgoff_t hba = (pgoff_t)(holebegin) >> PAGE_SHIFT;
3955 	pgoff_t hlen = ((pgoff_t)(holelen) + PAGE_SIZE - 1) >> PAGE_SHIFT;
3956 
3957 	/* Check for overflow. */
3958 	if (sizeof(holelen) > sizeof(hlen)) {
3959 		long long holeend =
3960 			(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3961 		if (holeend & ~(long long)ULONG_MAX)
3962 			hlen = ULONG_MAX - hba + 1;
3963 	}
3964 
3965 	unmap_mapping_pages(mapping, hba, hlen, even_cows);
3966 }
3967 EXPORT_SYMBOL(unmap_mapping_range);
3968 
3969 /*
3970  * Restore a potential device exclusive pte to a working pte entry
3971  */
3972 static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
3973 {
3974 	struct folio *folio = page_folio(vmf->page);
3975 	struct vm_area_struct *vma = vmf->vma;
3976 	struct mmu_notifier_range range;
3977 	vm_fault_t ret;
3978 
3979 	/*
3980 	 * We need a reference to lock the folio because we don't hold
3981 	 * the PTL so a racing thread can remove the device-exclusive
3982 	 * entry and unmap it. If the folio is free the entry must
3983 	 * have been removed already. If it happens to have already
3984 	 * been re-allocated after being freed all we do is lock and
3985 	 * unlock it.
3986 	 */
3987 	if (!folio_try_get(folio))
3988 		return 0;
3989 
3990 	ret = folio_lock_or_retry(folio, vmf);
3991 	if (ret) {
3992 		folio_put(folio);
3993 		return ret;
3994 	}
3995 	mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0,
3996 				vma->vm_mm, vmf->address & PAGE_MASK,
3997 				(vmf->address & PAGE_MASK) + PAGE_SIZE, NULL);
3998 	mmu_notifier_invalidate_range_start(&range);
3999 
4000 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
4001 				&vmf->ptl);
4002 	if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
4003 		restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte);
4004 
4005 	if (vmf->pte)
4006 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4007 	folio_unlock(folio);
4008 	folio_put(folio);
4009 
4010 	mmu_notifier_invalidate_range_end(&range);
4011 	return 0;
4012 }
4013 
4014 static inline bool should_try_to_free_swap(struct folio *folio,
4015 					   struct vm_area_struct *vma,
4016 					   unsigned int fault_flags)
4017 {
4018 	if (!folio_test_swapcache(folio))
4019 		return false;
4020 	if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) ||
4021 	    folio_test_mlocked(folio))
4022 		return true;
4023 	/*
4024 	 * If we want to map a page that's in the swapcache writable, we
4025 	 * have to detect via the refcount if we're really the exclusive
4026 	 * user. Try freeing the swapcache to get rid of the swapcache
4027 	 * reference only in case it's likely that we'll be the exlusive user.
4028 	 */
4029 	return (fault_flags & FAULT_FLAG_WRITE) && !folio_test_ksm(folio) &&
4030 		folio_ref_count(folio) == (1 + folio_nr_pages(folio));
4031 }
4032 
4033 static vm_fault_t pte_marker_clear(struct vm_fault *vmf)
4034 {
4035 	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
4036 				       vmf->address, &vmf->ptl);
4037 	if (!vmf->pte)
4038 		return 0;
4039 	/*
4040 	 * Be careful so that we will only recover a special uffd-wp pte into a
4041 	 * none pte.  Otherwise it means the pte could have changed, so retry.
4042 	 *
4043 	 * This should also cover the case where e.g. the pte changed
4044 	 * quickly from a PTE_MARKER_UFFD_WP into PTE_MARKER_POISONED.
4045 	 * So is_pte_marker() check is not enough to safely drop the pte.
4046 	 */
4047 	if (pte_same(vmf->orig_pte, ptep_get(vmf->pte)))
4048 		pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte);
4049 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4050 	return 0;
4051 }
4052 
4053 static vm_fault_t do_pte_missing(struct vm_fault *vmf)
4054 {
4055 	if (vma_is_anonymous(vmf->vma))
4056 		return do_anonymous_page(vmf);
4057 	else
4058 		return do_fault(vmf);
4059 }
4060 
4061 /*
4062  * This is actually a page-missing access, but with uffd-wp special pte
4063  * installed.  It means this pte was wr-protected before being unmapped.
4064  */
4065 static vm_fault_t pte_marker_handle_uffd_wp(struct vm_fault *vmf)
4066 {
4067 	/*
4068 	 * Just in case there're leftover special ptes even after the region
4069 	 * got unregistered - we can simply clear them.
4070 	 */
4071 	if (unlikely(!userfaultfd_wp(vmf->vma)))
4072 		return pte_marker_clear(vmf);
4073 
4074 	return do_pte_missing(vmf);
4075 }
4076 
4077 static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
4078 {
4079 	swp_entry_t entry = pte_to_swp_entry(vmf->orig_pte);
4080 	unsigned long marker = pte_marker_get(entry);
4081 
4082 	/*
4083 	 * PTE markers should never be empty.  If anything weird happened,
4084 	 * the best thing to do is to kill the process along with its mm.
4085 	 */
4086 	if (WARN_ON_ONCE(!marker))
4087 		return VM_FAULT_SIGBUS;
4088 
4089 	/* Higher priority than uffd-wp when data corrupted */
4090 	if (marker & PTE_MARKER_POISONED)
4091 		return VM_FAULT_HWPOISON;
4092 
4093 	/* Hitting a guard page is always a fatal condition. */
4094 	if (marker & PTE_MARKER_GUARD)
4095 		return VM_FAULT_SIGSEGV;
4096 
4097 	if (pte_marker_entry_uffd_wp(entry))
4098 		return pte_marker_handle_uffd_wp(vmf);
4099 
4100 	/* This is an unknown pte marker */
4101 	return VM_FAULT_SIGBUS;
4102 }
4103 
4104 static struct folio *__alloc_swap_folio(struct vm_fault *vmf)
4105 {
4106 	struct vm_area_struct *vma = vmf->vma;
4107 	struct folio *folio;
4108 	swp_entry_t entry;
4109 
4110 	folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vmf->address);
4111 	if (!folio)
4112 		return NULL;
4113 
4114 	entry = pte_to_swp_entry(vmf->orig_pte);
4115 	if (mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
4116 					   GFP_KERNEL, entry)) {
4117 		folio_put(folio);
4118 		return NULL;
4119 	}
4120 
4121 	return folio;
4122 }
4123 
4124 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4125 static inline int non_swapcache_batch(swp_entry_t entry, int max_nr)
4126 {
4127 	struct swap_info_struct *si = swp_swap_info(entry);
4128 	pgoff_t offset = swp_offset(entry);
4129 	int i;
4130 
4131 	/*
4132 	 * While allocating a large folio and doing swap_read_folio, which is
4133 	 * the case the being faulted pte doesn't have swapcache. We need to
4134 	 * ensure all PTEs have no cache as well, otherwise, we might go to
4135 	 * swap devices while the content is in swapcache.
4136 	 */
4137 	for (i = 0; i < max_nr; i++) {
4138 		if ((si->swap_map[offset + i] & SWAP_HAS_CACHE))
4139 			return i;
4140 	}
4141 
4142 	return i;
4143 }
4144 
4145 /*
4146  * Check if the PTEs within a range are contiguous swap entries
4147  * and have consistent swapcache, zeromap.
4148  */
4149 static bool can_swapin_thp(struct vm_fault *vmf, pte_t *ptep, int nr_pages)
4150 {
4151 	unsigned long addr;
4152 	swp_entry_t entry;
4153 	int idx;
4154 	pte_t pte;
4155 
4156 	addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE);
4157 	idx = (vmf->address - addr) / PAGE_SIZE;
4158 	pte = ptep_get(ptep);
4159 
4160 	if (!pte_same(pte, pte_move_swp_offset(vmf->orig_pte, -idx)))
4161 		return false;
4162 	entry = pte_to_swp_entry(pte);
4163 	if (swap_pte_batch(ptep, nr_pages, pte) != nr_pages)
4164 		return false;
4165 
4166 	/*
4167 	 * swap_read_folio() can't handle the case a large folio is hybridly
4168 	 * from different backends. And they are likely corner cases. Similar
4169 	 * things might be added once zswap support large folios.
4170 	 */
4171 	if (unlikely(swap_zeromap_batch(entry, nr_pages, NULL) != nr_pages))
4172 		return false;
4173 	if (unlikely(non_swapcache_batch(entry, nr_pages) != nr_pages))
4174 		return false;
4175 
4176 	return true;
4177 }
4178 
4179 static inline unsigned long thp_swap_suitable_orders(pgoff_t swp_offset,
4180 						     unsigned long addr,
4181 						     unsigned long orders)
4182 {
4183 	int order, nr;
4184 
4185 	order = highest_order(orders);
4186 
4187 	/*
4188 	 * To swap in a THP with nr pages, we require that its first swap_offset
4189 	 * is aligned with that number, as it was when the THP was swapped out.
4190 	 * This helps filter out most invalid entries.
4191 	 */
4192 	while (orders) {
4193 		nr = 1 << order;
4194 		if ((addr >> PAGE_SHIFT) % nr == swp_offset % nr)
4195 			break;
4196 		order = next_order(&orders, order);
4197 	}
4198 
4199 	return orders;
4200 }
4201 
4202 static struct folio *alloc_swap_folio(struct vm_fault *vmf)
4203 {
4204 	struct vm_area_struct *vma = vmf->vma;
4205 	unsigned long orders;
4206 	struct folio *folio;
4207 	unsigned long addr;
4208 	swp_entry_t entry;
4209 	spinlock_t *ptl;
4210 	pte_t *pte;
4211 	gfp_t gfp;
4212 	int order;
4213 
4214 	/*
4215 	 * If uffd is active for the vma we need per-page fault fidelity to
4216 	 * maintain the uffd semantics.
4217 	 */
4218 	if (unlikely(userfaultfd_armed(vma)))
4219 		goto fallback;
4220 
4221 	/*
4222 	 * A large swapped out folio could be partially or fully in zswap. We
4223 	 * lack handling for such cases, so fallback to swapping in order-0
4224 	 * folio.
4225 	 */
4226 	if (!zswap_never_enabled())
4227 		goto fallback;
4228 
4229 	entry = pte_to_swp_entry(vmf->orig_pte);
4230 	/*
4231 	 * Get a list of all the (large) orders below PMD_ORDER that are enabled
4232 	 * and suitable for swapping THP.
4233 	 */
4234 	orders = thp_vma_allowable_orders(vma, vma->vm_flags,
4235 			TVA_IN_PF | TVA_ENFORCE_SYSFS, BIT(PMD_ORDER) - 1);
4236 	orders = thp_vma_suitable_orders(vma, vmf->address, orders);
4237 	orders = thp_swap_suitable_orders(swp_offset(entry),
4238 					  vmf->address, orders);
4239 
4240 	if (!orders)
4241 		goto fallback;
4242 
4243 	pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
4244 				  vmf->address & PMD_MASK, &ptl);
4245 	if (unlikely(!pte))
4246 		goto fallback;
4247 
4248 	/*
4249 	 * For do_swap_page, find the highest order where the aligned range is
4250 	 * completely swap entries with contiguous swap offsets.
4251 	 */
4252 	order = highest_order(orders);
4253 	while (orders) {
4254 		addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
4255 		if (can_swapin_thp(vmf, pte + pte_index(addr), 1 << order))
4256 			break;
4257 		order = next_order(&orders, order);
4258 	}
4259 
4260 	pte_unmap_unlock(pte, ptl);
4261 
4262 	/* Try allocating the highest of the remaining orders. */
4263 	gfp = vma_thp_gfp_mask(vma);
4264 	while (orders) {
4265 		addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
4266 		folio = vma_alloc_folio(gfp, order, vma, addr);
4267 		if (folio) {
4268 			if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
4269 							    gfp, entry))
4270 				return folio;
4271 			count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK_CHARGE);
4272 			folio_put(folio);
4273 		}
4274 		count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK);
4275 		order = next_order(&orders, order);
4276 	}
4277 
4278 fallback:
4279 	return __alloc_swap_folio(vmf);
4280 }
4281 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
4282 static struct folio *alloc_swap_folio(struct vm_fault *vmf)
4283 {
4284 	return __alloc_swap_folio(vmf);
4285 }
4286 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4287 
4288 static DECLARE_WAIT_QUEUE_HEAD(swapcache_wq);
4289 
4290 /*
4291  * We enter with non-exclusive mmap_lock (to exclude vma changes,
4292  * but allow concurrent faults), and pte mapped but not yet locked.
4293  * We return with pte unmapped and unlocked.
4294  *
4295  * We return with the mmap_lock locked or unlocked in the same cases
4296  * as does filemap_fault().
4297  */
4298 vm_fault_t do_swap_page(struct vm_fault *vmf)
4299 {
4300 	struct vm_area_struct *vma = vmf->vma;
4301 	struct folio *swapcache, *folio = NULL;
4302 	DECLARE_WAITQUEUE(wait, current);
4303 	struct page *page;
4304 	struct swap_info_struct *si = NULL;
4305 	rmap_t rmap_flags = RMAP_NONE;
4306 	bool need_clear_cache = false;
4307 	bool exclusive = false;
4308 	swp_entry_t entry;
4309 	pte_t pte;
4310 	vm_fault_t ret = 0;
4311 	void *shadow = NULL;
4312 	int nr_pages;
4313 	unsigned long page_idx;
4314 	unsigned long address;
4315 	pte_t *ptep;
4316 
4317 	if (!pte_unmap_same(vmf))
4318 		goto out;
4319 
4320 	entry = pte_to_swp_entry(vmf->orig_pte);
4321 	if (unlikely(non_swap_entry(entry))) {
4322 		if (is_migration_entry(entry)) {
4323 			migration_entry_wait(vma->vm_mm, vmf->pmd,
4324 					     vmf->address);
4325 		} else if (is_device_exclusive_entry(entry)) {
4326 			vmf->page = pfn_swap_entry_to_page(entry);
4327 			ret = remove_device_exclusive_entry(vmf);
4328 		} else if (is_device_private_entry(entry)) {
4329 			if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
4330 				/*
4331 				 * migrate_to_ram is not yet ready to operate
4332 				 * under VMA lock.
4333 				 */
4334 				vma_end_read(vma);
4335 				ret = VM_FAULT_RETRY;
4336 				goto out;
4337 			}
4338 
4339 			vmf->page = pfn_swap_entry_to_page(entry);
4340 			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4341 					vmf->address, &vmf->ptl);
4342 			if (unlikely(!vmf->pte ||
4343 				     !pte_same(ptep_get(vmf->pte),
4344 							vmf->orig_pte)))
4345 				goto unlock;
4346 
4347 			/*
4348 			 * Get a page reference while we know the page can't be
4349 			 * freed.
4350 			 */
4351 			get_page(vmf->page);
4352 			pte_unmap_unlock(vmf->pte, vmf->ptl);
4353 			ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
4354 			put_page(vmf->page);
4355 		} else if (is_hwpoison_entry(entry)) {
4356 			ret = VM_FAULT_HWPOISON;
4357 		} else if (is_pte_marker_entry(entry)) {
4358 			ret = handle_pte_marker(vmf);
4359 		} else {
4360 			print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
4361 			ret = VM_FAULT_SIGBUS;
4362 		}
4363 		goto out;
4364 	}
4365 
4366 	/* Prevent swapoff from happening to us. */
4367 	si = get_swap_device(entry);
4368 	if (unlikely(!si))
4369 		goto out;
4370 
4371 	folio = swap_cache_get_folio(entry, vma, vmf->address);
4372 	if (folio)
4373 		page = folio_file_page(folio, swp_offset(entry));
4374 	swapcache = folio;
4375 
4376 	if (!folio) {
4377 		if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
4378 		    __swap_count(entry) == 1) {
4379 			/* skip swapcache */
4380 			folio = alloc_swap_folio(vmf);
4381 			if (folio) {
4382 				__folio_set_locked(folio);
4383 				__folio_set_swapbacked(folio);
4384 
4385 				nr_pages = folio_nr_pages(folio);
4386 				if (folio_test_large(folio))
4387 					entry.val = ALIGN_DOWN(entry.val, nr_pages);
4388 				/*
4389 				 * Prevent parallel swapin from proceeding with
4390 				 * the cache flag. Otherwise, another thread
4391 				 * may finish swapin first, free the entry, and
4392 				 * swapout reusing the same entry. It's
4393 				 * undetectable as pte_same() returns true due
4394 				 * to entry reuse.
4395 				 */
4396 				if (swapcache_prepare(entry, nr_pages)) {
4397 					/*
4398 					 * Relax a bit to prevent rapid
4399 					 * repeated page faults.
4400 					 */
4401 					add_wait_queue(&swapcache_wq, &wait);
4402 					schedule_timeout_uninterruptible(1);
4403 					remove_wait_queue(&swapcache_wq, &wait);
4404 					goto out_page;
4405 				}
4406 				need_clear_cache = true;
4407 
4408 				mem_cgroup_swapin_uncharge_swap(entry, nr_pages);
4409 
4410 				shadow = get_shadow_from_swap_cache(entry);
4411 				if (shadow)
4412 					workingset_refault(folio, shadow);
4413 
4414 				folio_add_lru(folio);
4415 
4416 				/* To provide entry to swap_read_folio() */
4417 				folio->swap = entry;
4418 				swap_read_folio(folio, NULL);
4419 				folio->private = NULL;
4420 			}
4421 		} else {
4422 			folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
4423 						vmf);
4424 			swapcache = folio;
4425 		}
4426 
4427 		if (!folio) {
4428 			/*
4429 			 * Back out if somebody else faulted in this pte
4430 			 * while we released the pte lock.
4431 			 */
4432 			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4433 					vmf->address, &vmf->ptl);
4434 			if (likely(vmf->pte &&
4435 				   pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
4436 				ret = VM_FAULT_OOM;
4437 			goto unlock;
4438 		}
4439 
4440 		/* Had to read the page from swap area: Major fault */
4441 		ret = VM_FAULT_MAJOR;
4442 		count_vm_event(PGMAJFAULT);
4443 		count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
4444 		page = folio_file_page(folio, swp_offset(entry));
4445 	} else if (PageHWPoison(page)) {
4446 		/*
4447 		 * hwpoisoned dirty swapcache pages are kept for killing
4448 		 * owner processes (which may be unknown at hwpoison time)
4449 		 */
4450 		ret = VM_FAULT_HWPOISON;
4451 		goto out_release;
4452 	}
4453 
4454 	ret |= folio_lock_or_retry(folio, vmf);
4455 	if (ret & VM_FAULT_RETRY)
4456 		goto out_release;
4457 
4458 	if (swapcache) {
4459 		/*
4460 		 * Make sure folio_free_swap() or swapoff did not release the
4461 		 * swapcache from under us.  The page pin, and pte_same test
4462 		 * below, are not enough to exclude that.  Even if it is still
4463 		 * swapcache, we need to check that the page's swap has not
4464 		 * changed.
4465 		 */
4466 		if (unlikely(!folio_test_swapcache(folio) ||
4467 			     page_swap_entry(page).val != entry.val))
4468 			goto out_page;
4469 
4470 		/*
4471 		 * KSM sometimes has to copy on read faults, for example, if
4472 		 * page->index of !PageKSM() pages would be nonlinear inside the
4473 		 * anon VMA -- PageKSM() is lost on actual swapout.
4474 		 */
4475 		folio = ksm_might_need_to_copy(folio, vma, vmf->address);
4476 		if (unlikely(!folio)) {
4477 			ret = VM_FAULT_OOM;
4478 			folio = swapcache;
4479 			goto out_page;
4480 		} else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
4481 			ret = VM_FAULT_HWPOISON;
4482 			folio = swapcache;
4483 			goto out_page;
4484 		}
4485 		if (folio != swapcache)
4486 			page = folio_page(folio, 0);
4487 
4488 		/*
4489 		 * If we want to map a page that's in the swapcache writable, we
4490 		 * have to detect via the refcount if we're really the exclusive
4491 		 * owner. Try removing the extra reference from the local LRU
4492 		 * caches if required.
4493 		 */
4494 		if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache &&
4495 		    !folio_test_ksm(folio) && !folio_test_lru(folio))
4496 			lru_add_drain();
4497 	}
4498 
4499 	folio_throttle_swaprate(folio, GFP_KERNEL);
4500 
4501 	/*
4502 	 * Back out if somebody else already faulted in this pte.
4503 	 */
4504 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
4505 			&vmf->ptl);
4506 	if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
4507 		goto out_nomap;
4508 
4509 	if (unlikely(!folio_test_uptodate(folio))) {
4510 		ret = VM_FAULT_SIGBUS;
4511 		goto out_nomap;
4512 	}
4513 
4514 	/* allocated large folios for SWP_SYNCHRONOUS_IO */
4515 	if (folio_test_large(folio) && !folio_test_swapcache(folio)) {
4516 		unsigned long nr = folio_nr_pages(folio);
4517 		unsigned long folio_start = ALIGN_DOWN(vmf->address, nr * PAGE_SIZE);
4518 		unsigned long idx = (vmf->address - folio_start) / PAGE_SIZE;
4519 		pte_t *folio_ptep = vmf->pte - idx;
4520 		pte_t folio_pte = ptep_get(folio_ptep);
4521 
4522 		if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
4523 		    swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
4524 			goto out_nomap;
4525 
4526 		page_idx = idx;
4527 		address = folio_start;
4528 		ptep = folio_ptep;
4529 		goto check_folio;
4530 	}
4531 
4532 	nr_pages = 1;
4533 	page_idx = 0;
4534 	address = vmf->address;
4535 	ptep = vmf->pte;
4536 	if (folio_test_large(folio) && folio_test_swapcache(folio)) {
4537 		int nr = folio_nr_pages(folio);
4538 		unsigned long idx = folio_page_idx(folio, page);
4539 		unsigned long folio_start = address - idx * PAGE_SIZE;
4540 		unsigned long folio_end = folio_start + nr * PAGE_SIZE;
4541 		pte_t *folio_ptep;
4542 		pte_t folio_pte;
4543 
4544 		if (unlikely(folio_start < max(address & PMD_MASK, vma->vm_start)))
4545 			goto check_folio;
4546 		if (unlikely(folio_end > pmd_addr_end(address, vma->vm_end)))
4547 			goto check_folio;
4548 
4549 		folio_ptep = vmf->pte - idx;
4550 		folio_pte = ptep_get(folio_ptep);
4551 		if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
4552 		    swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
4553 			goto check_folio;
4554 
4555 		page_idx = idx;
4556 		address = folio_start;
4557 		ptep = folio_ptep;
4558 		nr_pages = nr;
4559 		entry = folio->swap;
4560 		page = &folio->page;
4561 	}
4562 
4563 check_folio:
4564 	/*
4565 	 * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte
4566 	 * must never point at an anonymous page in the swapcache that is
4567 	 * PG_anon_exclusive. Sanity check that this holds and especially, that
4568 	 * no filesystem set PG_mappedtodisk on a page in the swapcache. Sanity
4569 	 * check after taking the PT lock and making sure that nobody
4570 	 * concurrently faulted in this page and set PG_anon_exclusive.
4571 	 */
4572 	BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio));
4573 	BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page));
4574 
4575 	/*
4576 	 * Check under PT lock (to protect against concurrent fork() sharing
4577 	 * the swap entry concurrently) for certainly exclusive pages.
4578 	 */
4579 	if (!folio_test_ksm(folio)) {
4580 		exclusive = pte_swp_exclusive(vmf->orig_pte);
4581 		if (folio != swapcache) {
4582 			/*
4583 			 * We have a fresh page that is not exposed to the
4584 			 * swapcache -> certainly exclusive.
4585 			 */
4586 			exclusive = true;
4587 		} else if (exclusive && folio_test_writeback(folio) &&
4588 			  data_race(si->flags & SWP_STABLE_WRITES)) {
4589 			/*
4590 			 * This is tricky: not all swap backends support
4591 			 * concurrent page modifications while under writeback.
4592 			 *
4593 			 * So if we stumble over such a page in the swapcache
4594 			 * we must not set the page exclusive, otherwise we can
4595 			 * map it writable without further checks and modify it
4596 			 * while still under writeback.
4597 			 *
4598 			 * For these problematic swap backends, simply drop the
4599 			 * exclusive marker: this is perfectly fine as we start
4600 			 * writeback only if we fully unmapped the page and
4601 			 * there are no unexpected references on the page after
4602 			 * unmapping succeeded. After fully unmapped, no
4603 			 * further GUP references (FOLL_GET and FOLL_PIN) can
4604 			 * appear, so dropping the exclusive marker and mapping
4605 			 * it only R/O is fine.
4606 			 */
4607 			exclusive = false;
4608 		}
4609 	}
4610 
4611 	/*
4612 	 * Some architectures may have to restore extra metadata to the page
4613 	 * when reading from swap. This metadata may be indexed by swap entry
4614 	 * so this must be called before swap_free().
4615 	 */
4616 	arch_swap_restore(folio_swap(entry, folio), folio);
4617 
4618 	/*
4619 	 * Remove the swap entry and conditionally try to free up the swapcache.
4620 	 * We're already holding a reference on the page but haven't mapped it
4621 	 * yet.
4622 	 */
4623 	swap_free_nr(entry, nr_pages);
4624 	if (should_try_to_free_swap(folio, vma, vmf->flags))
4625 		folio_free_swap(folio);
4626 
4627 	add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
4628 	add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
4629 	pte = mk_pte(page, vma->vm_page_prot);
4630 	if (pte_swp_soft_dirty(vmf->orig_pte))
4631 		pte = pte_mksoft_dirty(pte);
4632 	if (pte_swp_uffd_wp(vmf->orig_pte))
4633 		pte = pte_mkuffd_wp(pte);
4634 
4635 	/*
4636 	 * Same logic as in do_wp_page(); however, optimize for pages that are
4637 	 * certainly not shared either because we just allocated them without
4638 	 * exposing them to the swapcache or because the swap entry indicates
4639 	 * exclusivity.
4640 	 */
4641 	if (!folio_test_ksm(folio) &&
4642 	    (exclusive || folio_ref_count(folio) == 1)) {
4643 		if ((vma->vm_flags & VM_WRITE) && !userfaultfd_pte_wp(vma, pte) &&
4644 		    !pte_needs_soft_dirty_wp(vma, pte)) {
4645 			pte = pte_mkwrite(pte, vma);
4646 			if (vmf->flags & FAULT_FLAG_WRITE) {
4647 				pte = pte_mkdirty(pte);
4648 				vmf->flags &= ~FAULT_FLAG_WRITE;
4649 			}
4650 		}
4651 		rmap_flags |= RMAP_EXCLUSIVE;
4652 	}
4653 	folio_ref_add(folio, nr_pages - 1);
4654 	flush_icache_pages(vma, page, nr_pages);
4655 	vmf->orig_pte = pte_advance_pfn(pte, page_idx);
4656 
4657 	/* ksm created a completely new copy */
4658 	if (unlikely(folio != swapcache && swapcache)) {
4659 		folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE);
4660 		folio_add_lru_vma(folio, vma);
4661 	} else if (!folio_test_anon(folio)) {
4662 		/*
4663 		 * We currently only expect small !anon folios which are either
4664 		 * fully exclusive or fully shared, or new allocated large
4665 		 * folios which are fully exclusive. If we ever get large
4666 		 * folios within swapcache here, we have to be careful.
4667 		 */
4668 		VM_WARN_ON_ONCE(folio_test_large(folio) && folio_test_swapcache(folio));
4669 		VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
4670 		folio_add_new_anon_rmap(folio, vma, address, rmap_flags);
4671 	} else {
4672 		folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address,
4673 					rmap_flags);
4674 	}
4675 
4676 	VM_BUG_ON(!folio_test_anon(folio) ||
4677 			(pte_write(pte) && !PageAnonExclusive(page)));
4678 	set_ptes(vma->vm_mm, address, ptep, pte, nr_pages);
4679 	arch_do_swap_page_nr(vma->vm_mm, vma, address,
4680 			pte, pte, nr_pages);
4681 
4682 	folio_unlock(folio);
4683 	if (folio != swapcache && swapcache) {
4684 		/*
4685 		 * Hold the lock to avoid the swap entry to be reused
4686 		 * until we take the PT lock for the pte_same() check
4687 		 * (to avoid false positives from pte_same). For
4688 		 * further safety release the lock after the swap_free
4689 		 * so that the swap count won't change under a
4690 		 * parallel locked swapcache.
4691 		 */
4692 		folio_unlock(swapcache);
4693 		folio_put(swapcache);
4694 	}
4695 
4696 	if (vmf->flags & FAULT_FLAG_WRITE) {
4697 		ret |= do_wp_page(vmf);
4698 		if (ret & VM_FAULT_ERROR)
4699 			ret &= VM_FAULT_ERROR;
4700 		goto out;
4701 	}
4702 
4703 	/* No need to invalidate - it was non-present before */
4704 	update_mmu_cache_range(vmf, vma, address, ptep, nr_pages);
4705 unlock:
4706 	if (vmf->pte)
4707 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4708 out:
4709 	/* Clear the swap cache pin for direct swapin after PTL unlock */
4710 	if (need_clear_cache) {
4711 		swapcache_clear(si, entry, nr_pages);
4712 		if (waitqueue_active(&swapcache_wq))
4713 			wake_up(&swapcache_wq);
4714 	}
4715 	if (si)
4716 		put_swap_device(si);
4717 	return ret;
4718 out_nomap:
4719 	if (vmf->pte)
4720 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4721 out_page:
4722 	folio_unlock(folio);
4723 out_release:
4724 	folio_put(folio);
4725 	if (folio != swapcache && swapcache) {
4726 		folio_unlock(swapcache);
4727 		folio_put(swapcache);
4728 	}
4729 	if (need_clear_cache) {
4730 		swapcache_clear(si, entry, nr_pages);
4731 		if (waitqueue_active(&swapcache_wq))
4732 			wake_up(&swapcache_wq);
4733 	}
4734 	if (si)
4735 		put_swap_device(si);
4736 	return ret;
4737 }
4738 
4739 static bool pte_range_none(pte_t *pte, int nr_pages)
4740 {
4741 	int i;
4742 
4743 	for (i = 0; i < nr_pages; i++) {
4744 		if (!pte_none(ptep_get_lockless(pte + i)))
4745 			return false;
4746 	}
4747 
4748 	return true;
4749 }
4750 
4751 static struct folio *alloc_anon_folio(struct vm_fault *vmf)
4752 {
4753 	struct vm_area_struct *vma = vmf->vma;
4754 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4755 	unsigned long orders;
4756 	struct folio *folio;
4757 	unsigned long addr;
4758 	pte_t *pte;
4759 	gfp_t gfp;
4760 	int order;
4761 
4762 	/*
4763 	 * If uffd is active for the vma we need per-page fault fidelity to
4764 	 * maintain the uffd semantics.
4765 	 */
4766 	if (unlikely(userfaultfd_armed(vma)))
4767 		goto fallback;
4768 
4769 	/*
4770 	 * Get a list of all the (large) orders below PMD_ORDER that are enabled
4771 	 * for this vma. Then filter out the orders that can't be allocated over
4772 	 * the faulting address and still be fully contained in the vma.
4773 	 */
4774 	orders = thp_vma_allowable_orders(vma, vma->vm_flags,
4775 			TVA_IN_PF | TVA_ENFORCE_SYSFS, BIT(PMD_ORDER) - 1);
4776 	orders = thp_vma_suitable_orders(vma, vmf->address, orders);
4777 
4778 	if (!orders)
4779 		goto fallback;
4780 
4781 	pte = pte_offset_map(vmf->pmd, vmf->address & PMD_MASK);
4782 	if (!pte)
4783 		return ERR_PTR(-EAGAIN);
4784 
4785 	/*
4786 	 * Find the highest order where the aligned range is completely
4787 	 * pte_none(). Note that all remaining orders will be completely
4788 	 * pte_none().
4789 	 */
4790 	order = highest_order(orders);
4791 	while (orders) {
4792 		addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
4793 		if (pte_range_none(pte + pte_index(addr), 1 << order))
4794 			break;
4795 		order = next_order(&orders, order);
4796 	}
4797 
4798 	pte_unmap(pte);
4799 
4800 	if (!orders)
4801 		goto fallback;
4802 
4803 	/* Try allocating the highest of the remaining orders. */
4804 	gfp = vma_thp_gfp_mask(vma);
4805 	while (orders) {
4806 		addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
4807 		folio = vma_alloc_folio(gfp, order, vma, addr);
4808 		if (folio) {
4809 			if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
4810 				count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
4811 				folio_put(folio);
4812 				goto next;
4813 			}
4814 			folio_throttle_swaprate(folio, gfp);
4815 			/*
4816 			 * When a folio is not zeroed during allocation
4817 			 * (__GFP_ZERO not used) or user folios require special
4818 			 * handling, folio_zero_user() is used to make sure
4819 			 * that the page corresponding to the faulting address
4820 			 * will be hot in the cache after zeroing.
4821 			 */
4822 			if (user_alloc_needs_zeroing())
4823 				folio_zero_user(folio, vmf->address);
4824 			return folio;
4825 		}
4826 next:
4827 		count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK);
4828 		order = next_order(&orders, order);
4829 	}
4830 
4831 fallback:
4832 #endif
4833 	return folio_prealloc(vma->vm_mm, vma, vmf->address, true);
4834 }
4835 
4836 /*
4837  * We enter with non-exclusive mmap_lock (to exclude vma changes,
4838  * but allow concurrent faults), and pte mapped but not yet locked.
4839  * We return with mmap_lock still held, but pte unmapped and unlocked.
4840  */
4841 static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
4842 {
4843 	struct vm_area_struct *vma = vmf->vma;
4844 	unsigned long addr = vmf->address;
4845 	struct folio *folio;
4846 	vm_fault_t ret = 0;
4847 	int nr_pages = 1;
4848 	pte_t entry;
4849 
4850 	/* File mapping without ->vm_ops ? */
4851 	if (vma->vm_flags & VM_SHARED)
4852 		return VM_FAULT_SIGBUS;
4853 
4854 	/*
4855 	 * Use pte_alloc() instead of pte_alloc_map(), so that OOM can
4856 	 * be distinguished from a transient failure of pte_offset_map().
4857 	 */
4858 	if (pte_alloc(vma->vm_mm, vmf->pmd))
4859 		return VM_FAULT_OOM;
4860 
4861 	/* Use the zero-page for reads */
4862 	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
4863 			!mm_forbids_zeropage(vma->vm_mm)) {
4864 		entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
4865 						vma->vm_page_prot));
4866 		vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4867 				vmf->address, &vmf->ptl);
4868 		if (!vmf->pte)
4869 			goto unlock;
4870 		if (vmf_pte_changed(vmf)) {
4871 			update_mmu_tlb(vma, vmf->address, vmf->pte);
4872 			goto unlock;
4873 		}
4874 		ret = check_stable_address_space(vma->vm_mm);
4875 		if (ret)
4876 			goto unlock;
4877 		/* Deliver the page fault to userland, check inside PT lock */
4878 		if (userfaultfd_missing(vma)) {
4879 			pte_unmap_unlock(vmf->pte, vmf->ptl);
4880 			return handle_userfault(vmf, VM_UFFD_MISSING);
4881 		}
4882 		goto setpte;
4883 	}
4884 
4885 	/* Allocate our own private page. */
4886 	ret = vmf_anon_prepare(vmf);
4887 	if (ret)
4888 		return ret;
4889 	/* Returns NULL on OOM or ERR_PTR(-EAGAIN) if we must retry the fault */
4890 	folio = alloc_anon_folio(vmf);
4891 	if (IS_ERR(folio))
4892 		return 0;
4893 	if (!folio)
4894 		goto oom;
4895 
4896 	nr_pages = folio_nr_pages(folio);
4897 	addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE);
4898 
4899 	/*
4900 	 * The memory barrier inside __folio_mark_uptodate makes sure that
4901 	 * preceding stores to the page contents become visible before
4902 	 * the set_pte_at() write.
4903 	 */
4904 	__folio_mark_uptodate(folio);
4905 
4906 	entry = mk_pte(&folio->page, vma->vm_page_prot);
4907 	entry = pte_sw_mkyoung(entry);
4908 	if (vma->vm_flags & VM_WRITE)
4909 		entry = pte_mkwrite(pte_mkdirty(entry), vma);
4910 
4911 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
4912 	if (!vmf->pte)
4913 		goto release;
4914 	if (nr_pages == 1 && vmf_pte_changed(vmf)) {
4915 		update_mmu_tlb(vma, addr, vmf->pte);
4916 		goto release;
4917 	} else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) {
4918 		update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages);
4919 		goto release;
4920 	}
4921 
4922 	ret = check_stable_address_space(vma->vm_mm);
4923 	if (ret)
4924 		goto release;
4925 
4926 	/* Deliver the page fault to userland, check inside PT lock */
4927 	if (userfaultfd_missing(vma)) {
4928 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4929 		folio_put(folio);
4930 		return handle_userfault(vmf, VM_UFFD_MISSING);
4931 	}
4932 
4933 	folio_ref_add(folio, nr_pages - 1);
4934 	add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
4935 	count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_FAULT_ALLOC);
4936 	folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
4937 	folio_add_lru_vma(folio, vma);
4938 setpte:
4939 	if (vmf_orig_pte_uffd_wp(vmf))
4940 		entry = pte_mkuffd_wp(entry);
4941 	set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr_pages);
4942 
4943 	/* No need to invalidate - it was non-present before */
4944 	update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr_pages);
4945 unlock:
4946 	if (vmf->pte)
4947 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4948 	return ret;
4949 release:
4950 	folio_put(folio);
4951 	goto unlock;
4952 oom:
4953 	return VM_FAULT_OOM;
4954 }
4955 
4956 /*
4957  * The mmap_lock must have been held on entry, and may have been
4958  * released depending on flags and vma->vm_ops->fault() return value.
4959  * See filemap_fault() and __lock_page_retry().
4960  */
4961 static vm_fault_t __do_fault(struct vm_fault *vmf)
4962 {
4963 	struct vm_area_struct *vma = vmf->vma;
4964 	struct folio *folio;
4965 	vm_fault_t ret;
4966 
4967 	/*
4968 	 * Preallocate pte before we take page_lock because this might lead to
4969 	 * deadlocks for memcg reclaim which waits for pages under writeback:
4970 	 *				lock_page(A)
4971 	 *				SetPageWriteback(A)
4972 	 *				unlock_page(A)
4973 	 * lock_page(B)
4974 	 *				lock_page(B)
4975 	 * pte_alloc_one
4976 	 *   shrink_folio_list
4977 	 *     wait_on_page_writeback(A)
4978 	 *				SetPageWriteback(B)
4979 	 *				unlock_page(B)
4980 	 *				# flush A, B to clear the writeback
4981 	 */
4982 	if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
4983 		vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
4984 		if (!vmf->prealloc_pte)
4985 			return VM_FAULT_OOM;
4986 	}
4987 
4988 	ret = vma->vm_ops->fault(vmf);
4989 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
4990 			    VM_FAULT_DONE_COW)))
4991 		return ret;
4992 
4993 	folio = page_folio(vmf->page);
4994 	if (unlikely(PageHWPoison(vmf->page))) {
4995 		vm_fault_t poisonret = VM_FAULT_HWPOISON;
4996 		if (ret & VM_FAULT_LOCKED) {
4997 			if (page_mapped(vmf->page))
4998 				unmap_mapping_folio(folio);
4999 			/* Retry if a clean folio was removed from the cache. */
5000 			if (mapping_evict_folio(folio->mapping, folio))
5001 				poisonret = VM_FAULT_NOPAGE;
5002 			folio_unlock(folio);
5003 		}
5004 		folio_put(folio);
5005 		vmf->page = NULL;
5006 		return poisonret;
5007 	}
5008 
5009 	if (unlikely(!(ret & VM_FAULT_LOCKED)))
5010 		folio_lock(folio);
5011 	else
5012 		VM_BUG_ON_PAGE(!folio_test_locked(folio), vmf->page);
5013 
5014 	return ret;
5015 }
5016 
5017 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5018 static void deposit_prealloc_pte(struct vm_fault *vmf)
5019 {
5020 	struct vm_area_struct *vma = vmf->vma;
5021 
5022 	pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
5023 	/*
5024 	 * We are going to consume the prealloc table,
5025 	 * count that as nr_ptes.
5026 	 */
5027 	mm_inc_nr_ptes(vma->vm_mm);
5028 	vmf->prealloc_pte = NULL;
5029 }
5030 
5031 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
5032 {
5033 	struct folio *folio = page_folio(page);
5034 	struct vm_area_struct *vma = vmf->vma;
5035 	bool write = vmf->flags & FAULT_FLAG_WRITE;
5036 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
5037 	pmd_t entry;
5038 	vm_fault_t ret = VM_FAULT_FALLBACK;
5039 
5040 	/*
5041 	 * It is too late to allocate a small folio, we already have a large
5042 	 * folio in the pagecache: especially s390 KVM cannot tolerate any
5043 	 * PMD mappings, but PTE-mapped THP are fine. So let's simply refuse any
5044 	 * PMD mappings if THPs are disabled.
5045 	 */
5046 	if (thp_disabled_by_hw() || vma_thp_disabled(vma, vma->vm_flags))
5047 		return ret;
5048 
5049 	if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
5050 		return ret;
5051 
5052 	if (folio_order(folio) != HPAGE_PMD_ORDER)
5053 		return ret;
5054 	page = &folio->page;
5055 
5056 	/*
5057 	 * Just backoff if any subpage of a THP is corrupted otherwise
5058 	 * the corrupted page may mapped by PMD silently to escape the
5059 	 * check.  This kind of THP just can be PTE mapped.  Access to
5060 	 * the corrupted subpage should trigger SIGBUS as expected.
5061 	 */
5062 	if (unlikely(folio_test_has_hwpoisoned(folio)))
5063 		return ret;
5064 
5065 	/*
5066 	 * Archs like ppc64 need additional space to store information
5067 	 * related to pte entry. Use the preallocated table for that.
5068 	 */
5069 	if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
5070 		vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
5071 		if (!vmf->prealloc_pte)
5072 			return VM_FAULT_OOM;
5073 	}
5074 
5075 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
5076 	if (unlikely(!pmd_none(*vmf->pmd)))
5077 		goto out;
5078 
5079 	flush_icache_pages(vma, page, HPAGE_PMD_NR);
5080 
5081 	entry = mk_huge_pmd(page, vma->vm_page_prot);
5082 	if (write)
5083 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
5084 
5085 	add_mm_counter(vma->vm_mm, mm_counter_file(folio), HPAGE_PMD_NR);
5086 	folio_add_file_rmap_pmd(folio, page, vma);
5087 
5088 	/*
5089 	 * deposit and withdraw with pmd lock held
5090 	 */
5091 	if (arch_needs_pgtable_deposit())
5092 		deposit_prealloc_pte(vmf);
5093 
5094 	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
5095 
5096 	update_mmu_cache_pmd(vma, haddr, vmf->pmd);
5097 
5098 	/* fault is handled */
5099 	ret = 0;
5100 	count_vm_event(THP_FILE_MAPPED);
5101 out:
5102 	spin_unlock(vmf->ptl);
5103 	return ret;
5104 }
5105 #else
5106 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
5107 {
5108 	return VM_FAULT_FALLBACK;
5109 }
5110 #endif
5111 
5112 /**
5113  * set_pte_range - Set a range of PTEs to point to pages in a folio.
5114  * @vmf: Fault decription.
5115  * @folio: The folio that contains @page.
5116  * @page: The first page to create a PTE for.
5117  * @nr: The number of PTEs to create.
5118  * @addr: The first address to create a PTE for.
5119  */
5120 void set_pte_range(struct vm_fault *vmf, struct folio *folio,
5121 		struct page *page, unsigned int nr, unsigned long addr)
5122 {
5123 	struct vm_area_struct *vma = vmf->vma;
5124 	bool write = vmf->flags & FAULT_FLAG_WRITE;
5125 	bool prefault = !in_range(vmf->address, addr, nr * PAGE_SIZE);
5126 	pte_t entry;
5127 
5128 	flush_icache_pages(vma, page, nr);
5129 	entry = mk_pte(page, vma->vm_page_prot);
5130 
5131 	if (prefault && arch_wants_old_prefaulted_pte())
5132 		entry = pte_mkold(entry);
5133 	else
5134 		entry = pte_sw_mkyoung(entry);
5135 
5136 	if (write)
5137 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
5138 	if (unlikely(vmf_orig_pte_uffd_wp(vmf)))
5139 		entry = pte_mkuffd_wp(entry);
5140 	/* copy-on-write page */
5141 	if (write && !(vma->vm_flags & VM_SHARED)) {
5142 		VM_BUG_ON_FOLIO(nr != 1, folio);
5143 		folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
5144 		folio_add_lru_vma(folio, vma);
5145 	} else {
5146 		folio_add_file_rmap_ptes(folio, page, nr, vma);
5147 	}
5148 	set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr);
5149 
5150 	/* no need to invalidate: a not-present page won't be cached */
5151 	update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr);
5152 }
5153 
5154 static bool vmf_pte_changed(struct vm_fault *vmf)
5155 {
5156 	if (vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)
5157 		return !pte_same(ptep_get(vmf->pte), vmf->orig_pte);
5158 
5159 	return !pte_none(ptep_get(vmf->pte));
5160 }
5161 
5162 /**
5163  * finish_fault - finish page fault once we have prepared the page to fault
5164  *
5165  * @vmf: structure describing the fault
5166  *
5167  * This function handles all that is needed to finish a page fault once the
5168  * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
5169  * given page, adds reverse page mapping, handles memcg charges and LRU
5170  * addition.
5171  *
5172  * The function expects the page to be locked and on success it consumes a
5173  * reference of a page being mapped (for the PTE which maps it).
5174  *
5175  * Return: %0 on success, %VM_FAULT_ code in case of error.
5176  */
5177 vm_fault_t finish_fault(struct vm_fault *vmf)
5178 {
5179 	struct vm_area_struct *vma = vmf->vma;
5180 	struct page *page;
5181 	struct folio *folio;
5182 	vm_fault_t ret;
5183 	bool is_cow = (vmf->flags & FAULT_FLAG_WRITE) &&
5184 		      !(vma->vm_flags & VM_SHARED);
5185 	int type, nr_pages;
5186 	unsigned long addr = vmf->address;
5187 
5188 	/* Did we COW the page? */
5189 	if (is_cow)
5190 		page = vmf->cow_page;
5191 	else
5192 		page = vmf->page;
5193 
5194 	/*
5195 	 * check even for read faults because we might have lost our CoWed
5196 	 * page
5197 	 */
5198 	if (!(vma->vm_flags & VM_SHARED)) {
5199 		ret = check_stable_address_space(vma->vm_mm);
5200 		if (ret)
5201 			return ret;
5202 	}
5203 
5204 	if (pmd_none(*vmf->pmd)) {
5205 		if (PageTransCompound(page)) {
5206 			ret = do_set_pmd(vmf, page);
5207 			if (ret != VM_FAULT_FALLBACK)
5208 				return ret;
5209 		}
5210 
5211 		if (vmf->prealloc_pte)
5212 			pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte);
5213 		else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd)))
5214 			return VM_FAULT_OOM;
5215 	}
5216 
5217 	folio = page_folio(page);
5218 	nr_pages = folio_nr_pages(folio);
5219 
5220 	/*
5221 	 * Using per-page fault to maintain the uffd semantics, and same
5222 	 * approach also applies to non-anonymous-shmem faults to avoid
5223 	 * inflating the RSS of the process.
5224 	 */
5225 	if (!vma_is_anon_shmem(vma) || unlikely(userfaultfd_armed(vma))) {
5226 		nr_pages = 1;
5227 	} else if (nr_pages > 1) {
5228 		pgoff_t idx = folio_page_idx(folio, page);
5229 		/* The page offset of vmf->address within the VMA. */
5230 		pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff;
5231 		/* The index of the entry in the pagetable for fault page. */
5232 		pgoff_t pte_off = pte_index(vmf->address);
5233 
5234 		/*
5235 		 * Fallback to per-page fault in case the folio size in page
5236 		 * cache beyond the VMA limits and PMD pagetable limits.
5237 		 */
5238 		if (unlikely(vma_off < idx ||
5239 			    vma_off + (nr_pages - idx) > vma_pages(vma) ||
5240 			    pte_off < idx ||
5241 			    pte_off + (nr_pages - idx)  > PTRS_PER_PTE)) {
5242 			nr_pages = 1;
5243 		} else {
5244 			/* Now we can set mappings for the whole large folio. */
5245 			addr = vmf->address - idx * PAGE_SIZE;
5246 			page = &folio->page;
5247 		}
5248 	}
5249 
5250 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
5251 				       addr, &vmf->ptl);
5252 	if (!vmf->pte)
5253 		return VM_FAULT_NOPAGE;
5254 
5255 	/* Re-check under ptl */
5256 	if (nr_pages == 1 && unlikely(vmf_pte_changed(vmf))) {
5257 		update_mmu_tlb(vma, addr, vmf->pte);
5258 		ret = VM_FAULT_NOPAGE;
5259 		goto unlock;
5260 	} else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) {
5261 		update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages);
5262 		ret = VM_FAULT_NOPAGE;
5263 		goto unlock;
5264 	}
5265 
5266 	folio_ref_add(folio, nr_pages - 1);
5267 	set_pte_range(vmf, folio, page, nr_pages, addr);
5268 	type = is_cow ? MM_ANONPAGES : mm_counter_file(folio);
5269 	add_mm_counter(vma->vm_mm, type, nr_pages);
5270 	ret = 0;
5271 
5272 unlock:
5273 	pte_unmap_unlock(vmf->pte, vmf->ptl);
5274 	return ret;
5275 }
5276 
5277 static unsigned long fault_around_pages __read_mostly =
5278 	65536 >> PAGE_SHIFT;
5279 
5280 #ifdef CONFIG_DEBUG_FS
5281 static int fault_around_bytes_get(void *data, u64 *val)
5282 {
5283 	*val = fault_around_pages << PAGE_SHIFT;
5284 	return 0;
5285 }
5286 
5287 /*
5288  * fault_around_bytes must be rounded down to the nearest page order as it's
5289  * what do_fault_around() expects to see.
5290  */
5291 static int fault_around_bytes_set(void *data, u64 val)
5292 {
5293 	if (val / PAGE_SIZE > PTRS_PER_PTE)
5294 		return -EINVAL;
5295 
5296 	/*
5297 	 * The minimum value is 1 page, however this results in no fault-around
5298 	 * at all. See should_fault_around().
5299 	 */
5300 	val = max(val, PAGE_SIZE);
5301 	fault_around_pages = rounddown_pow_of_two(val) >> PAGE_SHIFT;
5302 
5303 	return 0;
5304 }
5305 DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
5306 		fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
5307 
5308 static int __init fault_around_debugfs(void)
5309 {
5310 	debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
5311 				   &fault_around_bytes_fops);
5312 	return 0;
5313 }
5314 late_initcall(fault_around_debugfs);
5315 #endif
5316 
5317 /*
5318  * do_fault_around() tries to map few pages around the fault address. The hope
5319  * is that the pages will be needed soon and this will lower the number of
5320  * faults to handle.
5321  *
5322  * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
5323  * not ready to be mapped: not up-to-date, locked, etc.
5324  *
5325  * This function doesn't cross VMA or page table boundaries, in order to call
5326  * map_pages() and acquire a PTE lock only once.
5327  *
5328  * fault_around_pages defines how many pages we'll try to map.
5329  * do_fault_around() expects it to be set to a power of two less than or equal
5330  * to PTRS_PER_PTE.
5331  *
5332  * The virtual address of the area that we map is naturally aligned to
5333  * fault_around_pages * PAGE_SIZE rounded down to the machine page size
5334  * (and therefore to page order).  This way it's easier to guarantee
5335  * that we don't cross page table boundaries.
5336  */
5337 static vm_fault_t do_fault_around(struct vm_fault *vmf)
5338 {
5339 	pgoff_t nr_pages = READ_ONCE(fault_around_pages);
5340 	pgoff_t pte_off = pte_index(vmf->address);
5341 	/* The page offset of vmf->address within the VMA. */
5342 	pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff;
5343 	pgoff_t from_pte, to_pte;
5344 	vm_fault_t ret;
5345 
5346 	/* The PTE offset of the start address, clamped to the VMA. */
5347 	from_pte = max(ALIGN_DOWN(pte_off, nr_pages),
5348 		       pte_off - min(pte_off, vma_off));
5349 
5350 	/* The PTE offset of the end address, clamped to the VMA and PTE. */
5351 	to_pte = min3(from_pte + nr_pages, (pgoff_t)PTRS_PER_PTE,
5352 		      pte_off + vma_pages(vmf->vma) - vma_off) - 1;
5353 
5354 	if (pmd_none(*vmf->pmd)) {
5355 		vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
5356 		if (!vmf->prealloc_pte)
5357 			return VM_FAULT_OOM;
5358 	}
5359 
5360 	rcu_read_lock();
5361 	ret = vmf->vma->vm_ops->map_pages(vmf,
5362 			vmf->pgoff + from_pte - pte_off,
5363 			vmf->pgoff + to_pte - pte_off);
5364 	rcu_read_unlock();
5365 
5366 	return ret;
5367 }
5368 
5369 /* Return true if we should do read fault-around, false otherwise */
5370 static inline bool should_fault_around(struct vm_fault *vmf)
5371 {
5372 	/* No ->map_pages?  No way to fault around... */
5373 	if (!vmf->vma->vm_ops->map_pages)
5374 		return false;
5375 
5376 	if (uffd_disable_fault_around(vmf->vma))
5377 		return false;
5378 
5379 	/* A single page implies no faulting 'around' at all. */
5380 	return fault_around_pages > 1;
5381 }
5382 
5383 static vm_fault_t do_read_fault(struct vm_fault *vmf)
5384 {
5385 	vm_fault_t ret = 0;
5386 	struct folio *folio;
5387 
5388 	/*
5389 	 * Let's call ->map_pages() first and use ->fault() as fallback
5390 	 * if page by the offset is not ready to be mapped (cold cache or
5391 	 * something).
5392 	 */
5393 	if (should_fault_around(vmf)) {
5394 		ret = do_fault_around(vmf);
5395 		if (ret)
5396 			return ret;
5397 	}
5398 
5399 	ret = vmf_can_call_fault(vmf);
5400 	if (ret)
5401 		return ret;
5402 
5403 	ret = __do_fault(vmf);
5404 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5405 		return ret;
5406 
5407 	ret |= finish_fault(vmf);
5408 	folio = page_folio(vmf->page);
5409 	folio_unlock(folio);
5410 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5411 		folio_put(folio);
5412 	return ret;
5413 }
5414 
5415 static vm_fault_t do_cow_fault(struct vm_fault *vmf)
5416 {
5417 	struct vm_area_struct *vma = vmf->vma;
5418 	struct folio *folio;
5419 	vm_fault_t ret;
5420 
5421 	ret = vmf_can_call_fault(vmf);
5422 	if (!ret)
5423 		ret = vmf_anon_prepare(vmf);
5424 	if (ret)
5425 		return ret;
5426 
5427 	folio = folio_prealloc(vma->vm_mm, vma, vmf->address, false);
5428 	if (!folio)
5429 		return VM_FAULT_OOM;
5430 
5431 	vmf->cow_page = &folio->page;
5432 
5433 	ret = __do_fault(vmf);
5434 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5435 		goto uncharge_out;
5436 	if (ret & VM_FAULT_DONE_COW)
5437 		return ret;
5438 
5439 	if (copy_mc_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma)) {
5440 		ret = VM_FAULT_HWPOISON;
5441 		goto unlock;
5442 	}
5443 	__folio_mark_uptodate(folio);
5444 
5445 	ret |= finish_fault(vmf);
5446 unlock:
5447 	unlock_page(vmf->page);
5448 	put_page(vmf->page);
5449 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5450 		goto uncharge_out;
5451 	return ret;
5452 uncharge_out:
5453 	folio_put(folio);
5454 	return ret;
5455 }
5456 
5457 static vm_fault_t do_shared_fault(struct vm_fault *vmf)
5458 {
5459 	struct vm_area_struct *vma = vmf->vma;
5460 	vm_fault_t ret, tmp;
5461 	struct folio *folio;
5462 
5463 	ret = vmf_can_call_fault(vmf);
5464 	if (ret)
5465 		return ret;
5466 
5467 	ret = __do_fault(vmf);
5468 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5469 		return ret;
5470 
5471 	folio = page_folio(vmf->page);
5472 
5473 	/*
5474 	 * Check if the backing address space wants to know that the page is
5475 	 * about to become writable
5476 	 */
5477 	if (vma->vm_ops->page_mkwrite) {
5478 		folio_unlock(folio);
5479 		tmp = do_page_mkwrite(vmf, folio);
5480 		if (unlikely(!tmp ||
5481 				(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
5482 			folio_put(folio);
5483 			return tmp;
5484 		}
5485 	}
5486 
5487 	ret |= finish_fault(vmf);
5488 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
5489 					VM_FAULT_RETRY))) {
5490 		folio_unlock(folio);
5491 		folio_put(folio);
5492 		return ret;
5493 	}
5494 
5495 	ret |= fault_dirty_shared_page(vmf);
5496 	return ret;
5497 }
5498 
5499 /*
5500  * We enter with non-exclusive mmap_lock (to exclude vma changes,
5501  * but allow concurrent faults).
5502  * The mmap_lock may have been released depending on flags and our
5503  * return value.  See filemap_fault() and __folio_lock_or_retry().
5504  * If mmap_lock is released, vma may become invalid (for example
5505  * by other thread calling munmap()).
5506  */
5507 static vm_fault_t do_fault(struct vm_fault *vmf)
5508 {
5509 	struct vm_area_struct *vma = vmf->vma;
5510 	struct mm_struct *vm_mm = vma->vm_mm;
5511 	vm_fault_t ret;
5512 
5513 	/*
5514 	 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
5515 	 */
5516 	if (!vma->vm_ops->fault) {
5517 		vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
5518 					       vmf->address, &vmf->ptl);
5519 		if (unlikely(!vmf->pte))
5520 			ret = VM_FAULT_SIGBUS;
5521 		else {
5522 			/*
5523 			 * Make sure this is not a temporary clearing of pte
5524 			 * by holding ptl and checking again. A R/M/W update
5525 			 * of pte involves: take ptl, clearing the pte so that
5526 			 * we don't have concurrent modification by hardware
5527 			 * followed by an update.
5528 			 */
5529 			if (unlikely(pte_none(ptep_get(vmf->pte))))
5530 				ret = VM_FAULT_SIGBUS;
5531 			else
5532 				ret = VM_FAULT_NOPAGE;
5533 
5534 			pte_unmap_unlock(vmf->pte, vmf->ptl);
5535 		}
5536 	} else if (!(vmf->flags & FAULT_FLAG_WRITE))
5537 		ret = do_read_fault(vmf);
5538 	else if (!(vma->vm_flags & VM_SHARED))
5539 		ret = do_cow_fault(vmf);
5540 	else
5541 		ret = do_shared_fault(vmf);
5542 
5543 	/* preallocated pagetable is unused: free it */
5544 	if (vmf->prealloc_pte) {
5545 		pte_free(vm_mm, vmf->prealloc_pte);
5546 		vmf->prealloc_pte = NULL;
5547 	}
5548 	return ret;
5549 }
5550 
5551 int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
5552 		      unsigned long addr, int *flags,
5553 		      bool writable, int *last_cpupid)
5554 {
5555 	struct vm_area_struct *vma = vmf->vma;
5556 
5557 	/*
5558 	 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
5559 	 * much anyway since they can be in shared cache state. This misses
5560 	 * the case where a mapping is writable but the process never writes
5561 	 * to it but pte_write gets cleared during protection updates and
5562 	 * pte_dirty has unpredictable behaviour between PTE scan updates,
5563 	 * background writeback, dirty balancing and application behaviour.
5564 	 */
5565 	if (!writable)
5566 		*flags |= TNF_NO_GROUP;
5567 
5568 	/*
5569 	 * Flag if the folio is shared between multiple address spaces. This
5570 	 * is later used when determining whether to group tasks together
5571 	 */
5572 	if (folio_likely_mapped_shared(folio) && (vma->vm_flags & VM_SHARED))
5573 		*flags |= TNF_SHARED;
5574 	/*
5575 	 * For memory tiering mode, cpupid of slow memory page is used
5576 	 * to record page access time.  So use default value.
5577 	 */
5578 	if (folio_use_access_time(folio))
5579 		*last_cpupid = (-1 & LAST_CPUPID_MASK);
5580 	else
5581 		*last_cpupid = folio_last_cpupid(folio);
5582 
5583 	/* Record the current PID acceesing VMA */
5584 	vma_set_access_pid_bit(vma);
5585 
5586 	count_vm_numa_event(NUMA_HINT_FAULTS);
5587 #ifdef CONFIG_NUMA_BALANCING
5588 	count_memcg_folio_events(folio, NUMA_HINT_FAULTS, 1);
5589 #endif
5590 	if (folio_nid(folio) == numa_node_id()) {
5591 		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
5592 		*flags |= TNF_FAULT_LOCAL;
5593 	}
5594 
5595 	return mpol_misplaced(folio, vmf, addr);
5596 }
5597 
5598 static void numa_rebuild_single_mapping(struct vm_fault *vmf, struct vm_area_struct *vma,
5599 					unsigned long fault_addr, pte_t *fault_pte,
5600 					bool writable)
5601 {
5602 	pte_t pte, old_pte;
5603 
5604 	old_pte = ptep_modify_prot_start(vma, fault_addr, fault_pte);
5605 	pte = pte_modify(old_pte, vma->vm_page_prot);
5606 	pte = pte_mkyoung(pte);
5607 	if (writable)
5608 		pte = pte_mkwrite(pte, vma);
5609 	ptep_modify_prot_commit(vma, fault_addr, fault_pte, old_pte, pte);
5610 	update_mmu_cache_range(vmf, vma, fault_addr, fault_pte, 1);
5611 }
5612 
5613 static void numa_rebuild_large_mapping(struct vm_fault *vmf, struct vm_area_struct *vma,
5614 				       struct folio *folio, pte_t fault_pte,
5615 				       bool ignore_writable, bool pte_write_upgrade)
5616 {
5617 	int nr = pte_pfn(fault_pte) - folio_pfn(folio);
5618 	unsigned long start, end, addr = vmf->address;
5619 	unsigned long addr_start = addr - (nr << PAGE_SHIFT);
5620 	unsigned long pt_start = ALIGN_DOWN(addr, PMD_SIZE);
5621 	pte_t *start_ptep;
5622 
5623 	/* Stay within the VMA and within the page table. */
5624 	start = max3(addr_start, pt_start, vma->vm_start);
5625 	end = min3(addr_start + folio_size(folio), pt_start + PMD_SIZE,
5626 		   vma->vm_end);
5627 	start_ptep = vmf->pte - ((addr - start) >> PAGE_SHIFT);
5628 
5629 	/* Restore all PTEs' mapping of the large folio */
5630 	for (addr = start; addr != end; start_ptep++, addr += PAGE_SIZE) {
5631 		pte_t ptent = ptep_get(start_ptep);
5632 		bool writable = false;
5633 
5634 		if (!pte_present(ptent) || !pte_protnone(ptent))
5635 			continue;
5636 
5637 		if (pfn_folio(pte_pfn(ptent)) != folio)
5638 			continue;
5639 
5640 		if (!ignore_writable) {
5641 			ptent = pte_modify(ptent, vma->vm_page_prot);
5642 			writable = pte_write(ptent);
5643 			if (!writable && pte_write_upgrade &&
5644 			    can_change_pte_writable(vma, addr, ptent))
5645 				writable = true;
5646 		}
5647 
5648 		numa_rebuild_single_mapping(vmf, vma, addr, start_ptep, writable);
5649 	}
5650 }
5651 
5652 static vm_fault_t do_numa_page(struct vm_fault *vmf)
5653 {
5654 	struct vm_area_struct *vma = vmf->vma;
5655 	struct folio *folio = NULL;
5656 	int nid = NUMA_NO_NODE;
5657 	bool writable = false, ignore_writable = false;
5658 	bool pte_write_upgrade = vma_wants_manual_pte_write_upgrade(vma);
5659 	int last_cpupid;
5660 	int target_nid;
5661 	pte_t pte, old_pte;
5662 	int flags = 0, nr_pages;
5663 
5664 	/*
5665 	 * The pte cannot be used safely until we verify, while holding the page
5666 	 * table lock, that its contents have not changed during fault handling.
5667 	 */
5668 	spin_lock(vmf->ptl);
5669 	/* Read the live PTE from the page tables: */
5670 	old_pte = ptep_get(vmf->pte);
5671 
5672 	if (unlikely(!pte_same(old_pte, vmf->orig_pte))) {
5673 		pte_unmap_unlock(vmf->pte, vmf->ptl);
5674 		return 0;
5675 	}
5676 
5677 	pte = pte_modify(old_pte, vma->vm_page_prot);
5678 
5679 	/*
5680 	 * Detect now whether the PTE could be writable; this information
5681 	 * is only valid while holding the PT lock.
5682 	 */
5683 	writable = pte_write(pte);
5684 	if (!writable && pte_write_upgrade &&
5685 	    can_change_pte_writable(vma, vmf->address, pte))
5686 		writable = true;
5687 
5688 	folio = vm_normal_folio(vma, vmf->address, pte);
5689 	if (!folio || folio_is_zone_device(folio))
5690 		goto out_map;
5691 
5692 	nid = folio_nid(folio);
5693 	nr_pages = folio_nr_pages(folio);
5694 
5695 	target_nid = numa_migrate_check(folio, vmf, vmf->address, &flags,
5696 					writable, &last_cpupid);
5697 	if (target_nid == NUMA_NO_NODE)
5698 		goto out_map;
5699 	if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
5700 		flags |= TNF_MIGRATE_FAIL;
5701 		goto out_map;
5702 	}
5703 	/* The folio is isolated and isolation code holds a folio reference. */
5704 	pte_unmap_unlock(vmf->pte, vmf->ptl);
5705 	writable = false;
5706 	ignore_writable = true;
5707 
5708 	/* Migrate to the requested node */
5709 	if (!migrate_misplaced_folio(folio, target_nid)) {
5710 		nid = target_nid;
5711 		flags |= TNF_MIGRATED;
5712 		task_numa_fault(last_cpupid, nid, nr_pages, flags);
5713 		return 0;
5714 	}
5715 
5716 	flags |= TNF_MIGRATE_FAIL;
5717 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
5718 				       vmf->address, &vmf->ptl);
5719 	if (unlikely(!vmf->pte))
5720 		return 0;
5721 	if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
5722 		pte_unmap_unlock(vmf->pte, vmf->ptl);
5723 		return 0;
5724 	}
5725 out_map:
5726 	/*
5727 	 * Make it present again, depending on how arch implements
5728 	 * non-accessible ptes, some can allow access by kernel mode.
5729 	 */
5730 	if (folio && folio_test_large(folio))
5731 		numa_rebuild_large_mapping(vmf, vma, folio, pte, ignore_writable,
5732 					   pte_write_upgrade);
5733 	else
5734 		numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte,
5735 					    writable);
5736 	pte_unmap_unlock(vmf->pte, vmf->ptl);
5737 
5738 	if (nid != NUMA_NO_NODE)
5739 		task_numa_fault(last_cpupid, nid, nr_pages, flags);
5740 	return 0;
5741 }
5742 
5743 static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
5744 {
5745 	struct vm_area_struct *vma = vmf->vma;
5746 
5747 	if (vma_is_anonymous(vma))
5748 		return do_huge_pmd_anonymous_page(vmf);
5749 	/*
5750 	 * Currently we just emit PAGE_SIZE for our fault events, so don't allow
5751 	 * a huge fault if we have a pre content watch on this file.  This would
5752 	 * be trivial to support, but there would need to be tests to ensure
5753 	 * this works properly and those don't exist currently.
5754 	 */
5755 	if (unlikely(FMODE_FSNOTIFY_HSM(vma->vm_file->f_mode)))
5756 		return VM_FAULT_FALLBACK;
5757 	if (vma->vm_ops->huge_fault)
5758 		return vma->vm_ops->huge_fault(vmf, PMD_ORDER);
5759 	return VM_FAULT_FALLBACK;
5760 }
5761 
5762 /* `inline' is required to avoid gcc 4.1.2 build error */
5763 static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf)
5764 {
5765 	struct vm_area_struct *vma = vmf->vma;
5766 	const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
5767 	vm_fault_t ret;
5768 
5769 	if (vma_is_anonymous(vma)) {
5770 		if (likely(!unshare) &&
5771 		    userfaultfd_huge_pmd_wp(vma, vmf->orig_pmd)) {
5772 			if (userfaultfd_wp_async(vmf->vma))
5773 				goto split;
5774 			return handle_userfault(vmf, VM_UFFD_WP);
5775 		}
5776 		return do_huge_pmd_wp_page(vmf);
5777 	}
5778 
5779 	if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
5780 		/* See comment in create_huge_pmd. */
5781 		if (unlikely(FMODE_FSNOTIFY_HSM(vma->vm_file->f_mode)))
5782 			goto split;
5783 		if (vma->vm_ops->huge_fault) {
5784 			ret = vma->vm_ops->huge_fault(vmf, PMD_ORDER);
5785 			if (!(ret & VM_FAULT_FALLBACK))
5786 				return ret;
5787 		}
5788 	}
5789 
5790 split:
5791 	/* COW or write-notify handled on pte level: split pmd. */
5792 	__split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
5793 
5794 	return VM_FAULT_FALLBACK;
5795 }
5796 
5797 static vm_fault_t create_huge_pud(struct vm_fault *vmf)
5798 {
5799 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&			\
5800 	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
5801 	struct vm_area_struct *vma = vmf->vma;
5802 	/* No support for anonymous transparent PUD pages yet */
5803 	if (vma_is_anonymous(vma))
5804 		return VM_FAULT_FALLBACK;
5805 	/* See comment in create_huge_pmd. */
5806 	if (unlikely(FMODE_FSNOTIFY_HSM(vma->vm_file->f_mode)))
5807 		return VM_FAULT_FALLBACK;
5808 	if (vma->vm_ops->huge_fault)
5809 		return vma->vm_ops->huge_fault(vmf, PUD_ORDER);
5810 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
5811 	return VM_FAULT_FALLBACK;
5812 }
5813 
5814 static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
5815 {
5816 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&			\
5817 	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
5818 	struct vm_area_struct *vma = vmf->vma;
5819 	vm_fault_t ret;
5820 
5821 	/* No support for anonymous transparent PUD pages yet */
5822 	if (vma_is_anonymous(vma))
5823 		goto split;
5824 	if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
5825 		/* See comment in create_huge_pmd. */
5826 		if (unlikely(FMODE_FSNOTIFY_HSM(vma->vm_file->f_mode)))
5827 			goto split;
5828 		if (vma->vm_ops->huge_fault) {
5829 			ret = vma->vm_ops->huge_fault(vmf, PUD_ORDER);
5830 			if (!(ret & VM_FAULT_FALLBACK))
5831 				return ret;
5832 		}
5833 	}
5834 split:
5835 	/* COW or write-notify not handled on PUD level: split pud.*/
5836 	__split_huge_pud(vma, vmf->pud, vmf->address);
5837 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
5838 	return VM_FAULT_FALLBACK;
5839 }
5840 
5841 /*
5842  * These routines also need to handle stuff like marking pages dirty
5843  * and/or accessed for architectures that don't do it in hardware (most
5844  * RISC architectures).  The early dirtying is also good on the i386.
5845  *
5846  * There is also a hook called "update_mmu_cache()" that architectures
5847  * with external mmu caches can use to update those (ie the Sparc or
5848  * PowerPC hashed page tables that act as extended TLBs).
5849  *
5850  * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow
5851  * concurrent faults).
5852  *
5853  * The mmap_lock may have been released depending on flags and our return value.
5854  * See filemap_fault() and __folio_lock_or_retry().
5855  */
5856 static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
5857 {
5858 	pte_t entry;
5859 
5860 	if (unlikely(pmd_none(*vmf->pmd))) {
5861 		/*
5862 		 * Leave __pte_alloc() until later: because vm_ops->fault may
5863 		 * want to allocate huge page, and if we expose page table
5864 		 * for an instant, it will be difficult to retract from
5865 		 * concurrent faults and from rmap lookups.
5866 		 */
5867 		vmf->pte = NULL;
5868 		vmf->flags &= ~FAULT_FLAG_ORIG_PTE_VALID;
5869 	} else {
5870 		pmd_t dummy_pmdval;
5871 
5872 		/*
5873 		 * A regular pmd is established and it can't morph into a huge
5874 		 * pmd by anon khugepaged, since that takes mmap_lock in write
5875 		 * mode; but shmem or file collapse to THP could still morph
5876 		 * it into a huge pmd: just retry later if so.
5877 		 *
5878 		 * Use the maywrite version to indicate that vmf->pte may be
5879 		 * modified, but since we will use pte_same() to detect the
5880 		 * change of the !pte_none() entry, there is no need to recheck
5881 		 * the pmdval. Here we chooes to pass a dummy variable instead
5882 		 * of NULL, which helps new user think about why this place is
5883 		 * special.
5884 		 */
5885 		vmf->pte = pte_offset_map_rw_nolock(vmf->vma->vm_mm, vmf->pmd,
5886 						    vmf->address, &dummy_pmdval,
5887 						    &vmf->ptl);
5888 		if (unlikely(!vmf->pte))
5889 			return 0;
5890 		vmf->orig_pte = ptep_get_lockless(vmf->pte);
5891 		vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID;
5892 
5893 		if (pte_none(vmf->orig_pte)) {
5894 			pte_unmap(vmf->pte);
5895 			vmf->pte = NULL;
5896 		}
5897 	}
5898 
5899 	if (!vmf->pte)
5900 		return do_pte_missing(vmf);
5901 
5902 	if (!pte_present(vmf->orig_pte))
5903 		return do_swap_page(vmf);
5904 
5905 	if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
5906 		return do_numa_page(vmf);
5907 
5908 	spin_lock(vmf->ptl);
5909 	entry = vmf->orig_pte;
5910 	if (unlikely(!pte_same(ptep_get(vmf->pte), entry))) {
5911 		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
5912 		goto unlock;
5913 	}
5914 	if (vmf->flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
5915 		if (!pte_write(entry))
5916 			return do_wp_page(vmf);
5917 		else if (likely(vmf->flags & FAULT_FLAG_WRITE))
5918 			entry = pte_mkdirty(entry);
5919 	}
5920 	entry = pte_mkyoung(entry);
5921 	if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
5922 				vmf->flags & FAULT_FLAG_WRITE)) {
5923 		update_mmu_cache_range(vmf, vmf->vma, vmf->address,
5924 				vmf->pte, 1);
5925 	} else {
5926 		/* Skip spurious TLB flush for retried page fault */
5927 		if (vmf->flags & FAULT_FLAG_TRIED)
5928 			goto unlock;
5929 		/*
5930 		 * This is needed only for protection faults but the arch code
5931 		 * is not yet telling us if this is a protection fault or not.
5932 		 * This still avoids useless tlb flushes for .text page faults
5933 		 * with threads.
5934 		 */
5935 		if (vmf->flags & FAULT_FLAG_WRITE)
5936 			flush_tlb_fix_spurious_fault(vmf->vma, vmf->address,
5937 						     vmf->pte);
5938 	}
5939 unlock:
5940 	pte_unmap_unlock(vmf->pte, vmf->ptl);
5941 	return 0;
5942 }
5943 
5944 /*
5945  * On entry, we hold either the VMA lock or the mmap_lock
5946  * (FAULT_FLAG_VMA_LOCK tells you which).  If VM_FAULT_RETRY is set in
5947  * the result, the mmap_lock is not held on exit.  See filemap_fault()
5948  * and __folio_lock_or_retry().
5949  */
5950 static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
5951 		unsigned long address, unsigned int flags)
5952 {
5953 	struct vm_fault vmf = {
5954 		.vma = vma,
5955 		.address = address & PAGE_MASK,
5956 		.real_address = address,
5957 		.flags = flags,
5958 		.pgoff = linear_page_index(vma, address),
5959 		.gfp_mask = __get_fault_gfp_mask(vma),
5960 	};
5961 	struct mm_struct *mm = vma->vm_mm;
5962 	unsigned long vm_flags = vma->vm_flags;
5963 	pgd_t *pgd;
5964 	p4d_t *p4d;
5965 	vm_fault_t ret;
5966 
5967 	pgd = pgd_offset(mm, address);
5968 	p4d = p4d_alloc(mm, pgd, address);
5969 	if (!p4d)
5970 		return VM_FAULT_OOM;
5971 
5972 	vmf.pud = pud_alloc(mm, p4d, address);
5973 	if (!vmf.pud)
5974 		return VM_FAULT_OOM;
5975 retry_pud:
5976 	if (pud_none(*vmf.pud) &&
5977 	    thp_vma_allowable_order(vma, vm_flags,
5978 				TVA_IN_PF | TVA_ENFORCE_SYSFS, PUD_ORDER)) {
5979 		ret = create_huge_pud(&vmf);
5980 		if (!(ret & VM_FAULT_FALLBACK))
5981 			return ret;
5982 	} else {
5983 		pud_t orig_pud = *vmf.pud;
5984 
5985 		barrier();
5986 		if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
5987 
5988 			/*
5989 			 * TODO once we support anonymous PUDs: NUMA case and
5990 			 * FAULT_FLAG_UNSHARE handling.
5991 			 */
5992 			if ((flags & FAULT_FLAG_WRITE) && !pud_write(orig_pud)) {
5993 				ret = wp_huge_pud(&vmf, orig_pud);
5994 				if (!(ret & VM_FAULT_FALLBACK))
5995 					return ret;
5996 			} else {
5997 				huge_pud_set_accessed(&vmf, orig_pud);
5998 				return 0;
5999 			}
6000 		}
6001 	}
6002 
6003 	vmf.pmd = pmd_alloc(mm, vmf.pud, address);
6004 	if (!vmf.pmd)
6005 		return VM_FAULT_OOM;
6006 
6007 	/* Huge pud page fault raced with pmd_alloc? */
6008 	if (pud_trans_unstable(vmf.pud))
6009 		goto retry_pud;
6010 
6011 	if (pmd_none(*vmf.pmd) &&
6012 	    thp_vma_allowable_order(vma, vm_flags,
6013 				TVA_IN_PF | TVA_ENFORCE_SYSFS, PMD_ORDER)) {
6014 		ret = create_huge_pmd(&vmf);
6015 		if (!(ret & VM_FAULT_FALLBACK))
6016 			return ret;
6017 	} else {
6018 		vmf.orig_pmd = pmdp_get_lockless(vmf.pmd);
6019 
6020 		if (unlikely(is_swap_pmd(vmf.orig_pmd))) {
6021 			VM_BUG_ON(thp_migration_supported() &&
6022 					  !is_pmd_migration_entry(vmf.orig_pmd));
6023 			if (is_pmd_migration_entry(vmf.orig_pmd))
6024 				pmd_migration_entry_wait(mm, vmf.pmd);
6025 			return 0;
6026 		}
6027 		if (pmd_trans_huge(vmf.orig_pmd) || pmd_devmap(vmf.orig_pmd)) {
6028 			if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma))
6029 				return do_huge_pmd_numa_page(&vmf);
6030 
6031 			if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
6032 			    !pmd_write(vmf.orig_pmd)) {
6033 				ret = wp_huge_pmd(&vmf);
6034 				if (!(ret & VM_FAULT_FALLBACK))
6035 					return ret;
6036 			} else {
6037 				huge_pmd_set_accessed(&vmf);
6038 				return 0;
6039 			}
6040 		}
6041 	}
6042 
6043 	return handle_pte_fault(&vmf);
6044 }
6045 
6046 /**
6047  * mm_account_fault - Do page fault accounting
6048  * @mm: mm from which memcg should be extracted. It can be NULL.
6049  * @regs: the pt_regs struct pointer.  When set to NULL, will skip accounting
6050  *        of perf event counters, but we'll still do the per-task accounting to
6051  *        the task who triggered this page fault.
6052  * @address: the faulted address.
6053  * @flags: the fault flags.
6054  * @ret: the fault retcode.
6055  *
6056  * This will take care of most of the page fault accounting.  Meanwhile, it
6057  * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter
6058  * updates.  However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should
6059  * still be in per-arch page fault handlers at the entry of page fault.
6060  */
6061 static inline void mm_account_fault(struct mm_struct *mm, struct pt_regs *regs,
6062 				    unsigned long address, unsigned int flags,
6063 				    vm_fault_t ret)
6064 {
6065 	bool major;
6066 
6067 	/* Incomplete faults will be accounted upon completion. */
6068 	if (ret & VM_FAULT_RETRY)
6069 		return;
6070 
6071 	/*
6072 	 * To preserve the behavior of older kernels, PGFAULT counters record
6073 	 * both successful and failed faults, as opposed to perf counters,
6074 	 * which ignore failed cases.
6075 	 */
6076 	count_vm_event(PGFAULT);
6077 	count_memcg_event_mm(mm, PGFAULT);
6078 
6079 	/*
6080 	 * Do not account for unsuccessful faults (e.g. when the address wasn't
6081 	 * valid).  That includes arch_vma_access_permitted() failing before
6082 	 * reaching here. So this is not a "this many hardware page faults"
6083 	 * counter.  We should use the hw profiling for that.
6084 	 */
6085 	if (ret & VM_FAULT_ERROR)
6086 		return;
6087 
6088 	/*
6089 	 * We define the fault as a major fault when the final successful fault
6090 	 * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't
6091 	 * handle it immediately previously).
6092 	 */
6093 	major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED);
6094 
6095 	if (major)
6096 		current->maj_flt++;
6097 	else
6098 		current->min_flt++;
6099 
6100 	/*
6101 	 * If the fault is done for GUP, regs will be NULL.  We only do the
6102 	 * accounting for the per thread fault counters who triggered the
6103 	 * fault, and we skip the perf event updates.
6104 	 */
6105 	if (!regs)
6106 		return;
6107 
6108 	if (major)
6109 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
6110 	else
6111 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
6112 }
6113 
6114 #ifdef CONFIG_LRU_GEN
6115 static void lru_gen_enter_fault(struct vm_area_struct *vma)
6116 {
6117 	/* the LRU algorithm only applies to accesses with recency */
6118 	current->in_lru_fault = vma_has_recency(vma);
6119 }
6120 
6121 static void lru_gen_exit_fault(void)
6122 {
6123 	current->in_lru_fault = false;
6124 }
6125 #else
6126 static void lru_gen_enter_fault(struct vm_area_struct *vma)
6127 {
6128 }
6129 
6130 static void lru_gen_exit_fault(void)
6131 {
6132 }
6133 #endif /* CONFIG_LRU_GEN */
6134 
6135 static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma,
6136 				       unsigned int *flags)
6137 {
6138 	if (unlikely(*flags & FAULT_FLAG_UNSHARE)) {
6139 		if (WARN_ON_ONCE(*flags & FAULT_FLAG_WRITE))
6140 			return VM_FAULT_SIGSEGV;
6141 		/*
6142 		 * FAULT_FLAG_UNSHARE only applies to COW mappings. Let's
6143 		 * just treat it like an ordinary read-fault otherwise.
6144 		 */
6145 		if (!is_cow_mapping(vma->vm_flags))
6146 			*flags &= ~FAULT_FLAG_UNSHARE;
6147 	} else if (*flags & FAULT_FLAG_WRITE) {
6148 		/* Write faults on read-only mappings are impossible ... */
6149 		if (WARN_ON_ONCE(!(vma->vm_flags & VM_MAYWRITE)))
6150 			return VM_FAULT_SIGSEGV;
6151 		/* ... and FOLL_FORCE only applies to COW mappings. */
6152 		if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE) &&
6153 				 !is_cow_mapping(vma->vm_flags)))
6154 			return VM_FAULT_SIGSEGV;
6155 	}
6156 #ifdef CONFIG_PER_VMA_LOCK
6157 	/*
6158 	 * Per-VMA locks can't be used with FAULT_FLAG_RETRY_NOWAIT because of
6159 	 * the assumption that lock is dropped on VM_FAULT_RETRY.
6160 	 */
6161 	if (WARN_ON_ONCE((*flags &
6162 			(FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT)) ==
6163 			(FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT)))
6164 		return VM_FAULT_SIGSEGV;
6165 #endif
6166 
6167 	return 0;
6168 }
6169 
6170 /*
6171  * By the time we get here, we already hold either the VMA lock or the
6172  * mmap_lock (FAULT_FLAG_VMA_LOCK tells you which).
6173  *
6174  * The mmap_lock may have been released depending on flags and our
6175  * return value.  See filemap_fault() and __folio_lock_or_retry().
6176  */
6177 vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
6178 			   unsigned int flags, struct pt_regs *regs)
6179 {
6180 	/* If the fault handler drops the mmap_lock, vma may be freed */
6181 	struct mm_struct *mm = vma->vm_mm;
6182 	vm_fault_t ret;
6183 	bool is_droppable;
6184 
6185 	__set_current_state(TASK_RUNNING);
6186 
6187 	ret = sanitize_fault_flags(vma, &flags);
6188 	if (ret)
6189 		goto out;
6190 
6191 	if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
6192 					    flags & FAULT_FLAG_INSTRUCTION,
6193 					    flags & FAULT_FLAG_REMOTE)) {
6194 		ret = VM_FAULT_SIGSEGV;
6195 		goto out;
6196 	}
6197 
6198 	is_droppable = !!(vma->vm_flags & VM_DROPPABLE);
6199 
6200 	/*
6201 	 * Enable the memcg OOM handling for faults triggered in user
6202 	 * space.  Kernel faults are handled more gracefully.
6203 	 */
6204 	if (flags & FAULT_FLAG_USER)
6205 		mem_cgroup_enter_user_fault();
6206 
6207 	lru_gen_enter_fault(vma);
6208 
6209 	if (unlikely(is_vm_hugetlb_page(vma)))
6210 		ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
6211 	else
6212 		ret = __handle_mm_fault(vma, address, flags);
6213 
6214 	/*
6215 	 * Warning: It is no longer safe to dereference vma-> after this point,
6216 	 * because mmap_lock might have been dropped by __handle_mm_fault(), so
6217 	 * vma might be destroyed from underneath us.
6218 	 */
6219 
6220 	lru_gen_exit_fault();
6221 
6222 	/* If the mapping is droppable, then errors due to OOM aren't fatal. */
6223 	if (is_droppable)
6224 		ret &= ~VM_FAULT_OOM;
6225 
6226 	if (flags & FAULT_FLAG_USER) {
6227 		mem_cgroup_exit_user_fault();
6228 		/*
6229 		 * The task may have entered a memcg OOM situation but
6230 		 * if the allocation error was handled gracefully (no
6231 		 * VM_FAULT_OOM), there is no need to kill anything.
6232 		 * Just clean up the OOM state peacefully.
6233 		 */
6234 		if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
6235 			mem_cgroup_oom_synchronize(false);
6236 	}
6237 out:
6238 	mm_account_fault(mm, regs, address, flags, ret);
6239 
6240 	return ret;
6241 }
6242 EXPORT_SYMBOL_GPL(handle_mm_fault);
6243 
6244 #ifdef CONFIG_LOCK_MM_AND_FIND_VMA
6245 #include <linux/extable.h>
6246 
6247 static inline bool get_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs)
6248 {
6249 	if (likely(mmap_read_trylock(mm)))
6250 		return true;
6251 
6252 	if (regs && !user_mode(regs)) {
6253 		unsigned long ip = exception_ip(regs);
6254 		if (!search_exception_tables(ip))
6255 			return false;
6256 	}
6257 
6258 	return !mmap_read_lock_killable(mm);
6259 }
6260 
6261 static inline bool mmap_upgrade_trylock(struct mm_struct *mm)
6262 {
6263 	/*
6264 	 * We don't have this operation yet.
6265 	 *
6266 	 * It should be easy enough to do: it's basically a
6267 	 *    atomic_long_try_cmpxchg_acquire()
6268 	 * from RWSEM_READER_BIAS -> RWSEM_WRITER_LOCKED, but
6269 	 * it also needs the proper lockdep magic etc.
6270 	 */
6271 	return false;
6272 }
6273 
6274 static inline bool upgrade_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs)
6275 {
6276 	mmap_read_unlock(mm);
6277 	if (regs && !user_mode(regs)) {
6278 		unsigned long ip = exception_ip(regs);
6279 		if (!search_exception_tables(ip))
6280 			return false;
6281 	}
6282 	return !mmap_write_lock_killable(mm);
6283 }
6284 
6285 /*
6286  * Helper for page fault handling.
6287  *
6288  * This is kind of equivalent to "mmap_read_lock()" followed
6289  * by "find_extend_vma()", except it's a lot more careful about
6290  * the locking (and will drop the lock on failure).
6291  *
6292  * For example, if we have a kernel bug that causes a page
6293  * fault, we don't want to just use mmap_read_lock() to get
6294  * the mm lock, because that would deadlock if the bug were
6295  * to happen while we're holding the mm lock for writing.
6296  *
6297  * So this checks the exception tables on kernel faults in
6298  * order to only do this all for instructions that are actually
6299  * expected to fault.
6300  *
6301  * We can also actually take the mm lock for writing if we
6302  * need to extend the vma, which helps the VM layer a lot.
6303  */
6304 struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
6305 			unsigned long addr, struct pt_regs *regs)
6306 {
6307 	struct vm_area_struct *vma;
6308 
6309 	if (!get_mmap_lock_carefully(mm, regs))
6310 		return NULL;
6311 
6312 	vma = find_vma(mm, addr);
6313 	if (likely(vma && (vma->vm_start <= addr)))
6314 		return vma;
6315 
6316 	/*
6317 	 * Well, dang. We might still be successful, but only
6318 	 * if we can extend a vma to do so.
6319 	 */
6320 	if (!vma || !(vma->vm_flags & VM_GROWSDOWN)) {
6321 		mmap_read_unlock(mm);
6322 		return NULL;
6323 	}
6324 
6325 	/*
6326 	 * We can try to upgrade the mmap lock atomically,
6327 	 * in which case we can continue to use the vma
6328 	 * we already looked up.
6329 	 *
6330 	 * Otherwise we'll have to drop the mmap lock and
6331 	 * re-take it, and also look up the vma again,
6332 	 * re-checking it.
6333 	 */
6334 	if (!mmap_upgrade_trylock(mm)) {
6335 		if (!upgrade_mmap_lock_carefully(mm, regs))
6336 			return NULL;
6337 
6338 		vma = find_vma(mm, addr);
6339 		if (!vma)
6340 			goto fail;
6341 		if (vma->vm_start <= addr)
6342 			goto success;
6343 		if (!(vma->vm_flags & VM_GROWSDOWN))
6344 			goto fail;
6345 	}
6346 
6347 	if (expand_stack_locked(vma, addr))
6348 		goto fail;
6349 
6350 success:
6351 	mmap_write_downgrade(mm);
6352 	return vma;
6353 
6354 fail:
6355 	mmap_write_unlock(mm);
6356 	return NULL;
6357 }
6358 #endif
6359 
6360 #ifdef CONFIG_PER_VMA_LOCK
6361 /*
6362  * Lookup and lock a VMA under RCU protection. Returned VMA is guaranteed to be
6363  * stable and not isolated. If the VMA is not found or is being modified the
6364  * function returns NULL.
6365  */
6366 struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
6367 					  unsigned long address)
6368 {
6369 	MA_STATE(mas, &mm->mm_mt, address, address);
6370 	struct vm_area_struct *vma;
6371 
6372 	rcu_read_lock();
6373 retry:
6374 	vma = mas_walk(&mas);
6375 	if (!vma)
6376 		goto inval;
6377 
6378 	if (!vma_start_read(vma))
6379 		goto inval;
6380 
6381 	/* Check if the VMA got isolated after we found it */
6382 	if (vma->detached) {
6383 		vma_end_read(vma);
6384 		count_vm_vma_lock_event(VMA_LOCK_MISS);
6385 		/* The area was replaced with another one */
6386 		goto retry;
6387 	}
6388 	/*
6389 	 * At this point, we have a stable reference to a VMA: The VMA is
6390 	 * locked and we know it hasn't already been isolated.
6391 	 * From here on, we can access the VMA without worrying about which
6392 	 * fields are accessible for RCU readers.
6393 	 */
6394 
6395 	/* Check since vm_start/vm_end might change before we lock the VMA */
6396 	if (unlikely(address < vma->vm_start || address >= vma->vm_end))
6397 		goto inval_end_read;
6398 
6399 	rcu_read_unlock();
6400 	return vma;
6401 
6402 inval_end_read:
6403 	vma_end_read(vma);
6404 inval:
6405 	rcu_read_unlock();
6406 	count_vm_vma_lock_event(VMA_LOCK_ABORT);
6407 	return NULL;
6408 }
6409 #endif /* CONFIG_PER_VMA_LOCK */
6410 
6411 #ifndef __PAGETABLE_P4D_FOLDED
6412 /*
6413  * Allocate p4d page table.
6414  * We've already handled the fast-path in-line.
6415  */
6416 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
6417 {
6418 	p4d_t *new = p4d_alloc_one(mm, address);
6419 	if (!new)
6420 		return -ENOMEM;
6421 
6422 	spin_lock(&mm->page_table_lock);
6423 	if (pgd_present(*pgd)) {	/* Another has populated it */
6424 		p4d_free(mm, new);
6425 	} else {
6426 		smp_wmb(); /* See comment in pmd_install() */
6427 		pgd_populate(mm, pgd, new);
6428 	}
6429 	spin_unlock(&mm->page_table_lock);
6430 	return 0;
6431 }
6432 #endif /* __PAGETABLE_P4D_FOLDED */
6433 
6434 #ifndef __PAGETABLE_PUD_FOLDED
6435 /*
6436  * Allocate page upper directory.
6437  * We've already handled the fast-path in-line.
6438  */
6439 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
6440 {
6441 	pud_t *new = pud_alloc_one(mm, address);
6442 	if (!new)
6443 		return -ENOMEM;
6444 
6445 	spin_lock(&mm->page_table_lock);
6446 	if (!p4d_present(*p4d)) {
6447 		mm_inc_nr_puds(mm);
6448 		smp_wmb(); /* See comment in pmd_install() */
6449 		p4d_populate(mm, p4d, new);
6450 	} else	/* Another has populated it */
6451 		pud_free(mm, new);
6452 	spin_unlock(&mm->page_table_lock);
6453 	return 0;
6454 }
6455 #endif /* __PAGETABLE_PUD_FOLDED */
6456 
6457 #ifndef __PAGETABLE_PMD_FOLDED
6458 /*
6459  * Allocate page middle directory.
6460  * We've already handled the fast-path in-line.
6461  */
6462 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
6463 {
6464 	spinlock_t *ptl;
6465 	pmd_t *new = pmd_alloc_one(mm, address);
6466 	if (!new)
6467 		return -ENOMEM;
6468 
6469 	ptl = pud_lock(mm, pud);
6470 	if (!pud_present(*pud)) {
6471 		mm_inc_nr_pmds(mm);
6472 		smp_wmb(); /* See comment in pmd_install() */
6473 		pud_populate(mm, pud, new);
6474 	} else {	/* Another has populated it */
6475 		pmd_free(mm, new);
6476 	}
6477 	spin_unlock(ptl);
6478 	return 0;
6479 }
6480 #endif /* __PAGETABLE_PMD_FOLDED */
6481 
6482 static inline void pfnmap_args_setup(struct follow_pfnmap_args *args,
6483 				     spinlock_t *lock, pte_t *ptep,
6484 				     pgprot_t pgprot, unsigned long pfn_base,
6485 				     unsigned long addr_mask, bool writable,
6486 				     bool special)
6487 {
6488 	args->lock = lock;
6489 	args->ptep = ptep;
6490 	args->pfn = pfn_base + ((args->address & ~addr_mask) >> PAGE_SHIFT);
6491 	args->pgprot = pgprot;
6492 	args->writable = writable;
6493 	args->special = special;
6494 }
6495 
6496 static inline void pfnmap_lockdep_assert(struct vm_area_struct *vma)
6497 {
6498 #ifdef CONFIG_LOCKDEP
6499 	struct file *file = vma->vm_file;
6500 	struct address_space *mapping = file ? file->f_mapping : NULL;
6501 
6502 	if (mapping)
6503 		lockdep_assert(lockdep_is_held(&mapping->i_mmap_rwsem) ||
6504 			       lockdep_is_held(&vma->vm_mm->mmap_lock));
6505 	else
6506 		lockdep_assert(lockdep_is_held(&vma->vm_mm->mmap_lock));
6507 #endif
6508 }
6509 
6510 /**
6511  * follow_pfnmap_start() - Look up a pfn mapping at a user virtual address
6512  * @args: Pointer to struct @follow_pfnmap_args
6513  *
6514  * The caller needs to setup args->vma and args->address to point to the
6515  * virtual address as the target of such lookup.  On a successful return,
6516  * the results will be put into other output fields.
6517  *
6518  * After the caller finished using the fields, the caller must invoke
6519  * another follow_pfnmap_end() to proper releases the locks and resources
6520  * of such look up request.
6521  *
6522  * During the start() and end() calls, the results in @args will be valid
6523  * as proper locks will be held.  After the end() is called, all the fields
6524  * in @follow_pfnmap_args will be invalid to be further accessed.  Further
6525  * use of such information after end() may require proper synchronizations
6526  * by the caller with page table updates, otherwise it can create a
6527  * security bug.
6528  *
6529  * If the PTE maps a refcounted page, callers are responsible to protect
6530  * against invalidation with MMU notifiers; otherwise access to the PFN at
6531  * a later point in time can trigger use-after-free.
6532  *
6533  * Only IO mappings and raw PFN mappings are allowed.  The mmap semaphore
6534  * should be taken for read, and the mmap semaphore cannot be released
6535  * before the end() is invoked.
6536  *
6537  * This function must not be used to modify PTE content.
6538  *
6539  * Return: zero on success, negative otherwise.
6540  */
6541 int follow_pfnmap_start(struct follow_pfnmap_args *args)
6542 {
6543 	struct vm_area_struct *vma = args->vma;
6544 	unsigned long address = args->address;
6545 	struct mm_struct *mm = vma->vm_mm;
6546 	spinlock_t *lock;
6547 	pgd_t *pgdp;
6548 	p4d_t *p4dp, p4d;
6549 	pud_t *pudp, pud;
6550 	pmd_t *pmdp, pmd;
6551 	pte_t *ptep, pte;
6552 
6553 	pfnmap_lockdep_assert(vma);
6554 
6555 	if (unlikely(address < vma->vm_start || address >= vma->vm_end))
6556 		goto out;
6557 
6558 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
6559 		goto out;
6560 retry:
6561 	pgdp = pgd_offset(mm, address);
6562 	if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
6563 		goto out;
6564 
6565 	p4dp = p4d_offset(pgdp, address);
6566 	p4d = READ_ONCE(*p4dp);
6567 	if (p4d_none(p4d) || unlikely(p4d_bad(p4d)))
6568 		goto out;
6569 
6570 	pudp = pud_offset(p4dp, address);
6571 	pud = READ_ONCE(*pudp);
6572 	if (pud_none(pud))
6573 		goto out;
6574 	if (pud_leaf(pud)) {
6575 		lock = pud_lock(mm, pudp);
6576 		if (!unlikely(pud_leaf(pud))) {
6577 			spin_unlock(lock);
6578 			goto retry;
6579 		}
6580 		pfnmap_args_setup(args, lock, NULL, pud_pgprot(pud),
6581 				  pud_pfn(pud), PUD_MASK, pud_write(pud),
6582 				  pud_special(pud));
6583 		return 0;
6584 	}
6585 
6586 	pmdp = pmd_offset(pudp, address);
6587 	pmd = pmdp_get_lockless(pmdp);
6588 	if (pmd_leaf(pmd)) {
6589 		lock = pmd_lock(mm, pmdp);
6590 		if (!unlikely(pmd_leaf(pmd))) {
6591 			spin_unlock(lock);
6592 			goto retry;
6593 		}
6594 		pfnmap_args_setup(args, lock, NULL, pmd_pgprot(pmd),
6595 				  pmd_pfn(pmd), PMD_MASK, pmd_write(pmd),
6596 				  pmd_special(pmd));
6597 		return 0;
6598 	}
6599 
6600 	ptep = pte_offset_map_lock(mm, pmdp, address, &lock);
6601 	if (!ptep)
6602 		goto out;
6603 	pte = ptep_get(ptep);
6604 	if (!pte_present(pte))
6605 		goto unlock;
6606 	pfnmap_args_setup(args, lock, ptep, pte_pgprot(pte),
6607 			  pte_pfn(pte), PAGE_MASK, pte_write(pte),
6608 			  pte_special(pte));
6609 	return 0;
6610 unlock:
6611 	pte_unmap_unlock(ptep, lock);
6612 out:
6613 	return -EINVAL;
6614 }
6615 EXPORT_SYMBOL_GPL(follow_pfnmap_start);
6616 
6617 /**
6618  * follow_pfnmap_end(): End a follow_pfnmap_start() process
6619  * @args: Pointer to struct @follow_pfnmap_args
6620  *
6621  * Must be used in pair of follow_pfnmap_start().  See the start() function
6622  * above for more information.
6623  */
6624 void follow_pfnmap_end(struct follow_pfnmap_args *args)
6625 {
6626 	if (args->lock)
6627 		spin_unlock(args->lock);
6628 	if (args->ptep)
6629 		pte_unmap(args->ptep);
6630 }
6631 EXPORT_SYMBOL_GPL(follow_pfnmap_end);
6632 
6633 #ifdef CONFIG_HAVE_IOREMAP_PROT
6634 /**
6635  * generic_access_phys - generic implementation for iomem mmap access
6636  * @vma: the vma to access
6637  * @addr: userspace address, not relative offset within @vma
6638  * @buf: buffer to read/write
6639  * @len: length of transfer
6640  * @write: set to FOLL_WRITE when writing, otherwise reading
6641  *
6642  * This is a generic implementation for &vm_operations_struct.access for an
6643  * iomem mapping. This callback is used by access_process_vm() when the @vma is
6644  * not page based.
6645  */
6646 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
6647 			void *buf, int len, int write)
6648 {
6649 	resource_size_t phys_addr;
6650 	unsigned long prot = 0;
6651 	void __iomem *maddr;
6652 	int offset = offset_in_page(addr);
6653 	int ret = -EINVAL;
6654 	bool writable;
6655 	struct follow_pfnmap_args args = { .vma = vma, .address = addr };
6656 
6657 retry:
6658 	if (follow_pfnmap_start(&args))
6659 		return -EINVAL;
6660 	prot = pgprot_val(args.pgprot);
6661 	phys_addr = (resource_size_t)args.pfn << PAGE_SHIFT;
6662 	writable = args.writable;
6663 	follow_pfnmap_end(&args);
6664 
6665 	if ((write & FOLL_WRITE) && !writable)
6666 		return -EINVAL;
6667 
6668 	maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
6669 	if (!maddr)
6670 		return -ENOMEM;
6671 
6672 	if (follow_pfnmap_start(&args))
6673 		goto out_unmap;
6674 
6675 	if ((prot != pgprot_val(args.pgprot)) ||
6676 	    (phys_addr != (args.pfn << PAGE_SHIFT)) ||
6677 	    (writable != args.writable)) {
6678 		follow_pfnmap_end(&args);
6679 		iounmap(maddr);
6680 		goto retry;
6681 	}
6682 
6683 	if (write)
6684 		memcpy_toio(maddr + offset, buf, len);
6685 	else
6686 		memcpy_fromio(buf, maddr + offset, len);
6687 	ret = len;
6688 	follow_pfnmap_end(&args);
6689 out_unmap:
6690 	iounmap(maddr);
6691 
6692 	return ret;
6693 }
6694 EXPORT_SYMBOL_GPL(generic_access_phys);
6695 #endif
6696 
6697 /*
6698  * Access another process' address space as given in mm.
6699  */
6700 static int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
6701 			      void *buf, int len, unsigned int gup_flags)
6702 {
6703 	void *old_buf = buf;
6704 	int write = gup_flags & FOLL_WRITE;
6705 
6706 	if (mmap_read_lock_killable(mm))
6707 		return 0;
6708 
6709 	/* Untag the address before looking up the VMA */
6710 	addr = untagged_addr_remote(mm, addr);
6711 
6712 	/* Avoid triggering the temporary warning in __get_user_pages */
6713 	if (!vma_lookup(mm, addr) && !expand_stack(mm, addr))
6714 		return 0;
6715 
6716 	/* ignore errors, just check how much was successfully transferred */
6717 	while (len) {
6718 		int bytes, offset;
6719 		void *maddr;
6720 		struct vm_area_struct *vma = NULL;
6721 		struct page *page = get_user_page_vma_remote(mm, addr,
6722 							     gup_flags, &vma);
6723 
6724 		if (IS_ERR(page)) {
6725 			/* We might need to expand the stack to access it */
6726 			vma = vma_lookup(mm, addr);
6727 			if (!vma) {
6728 				vma = expand_stack(mm, addr);
6729 
6730 				/* mmap_lock was dropped on failure */
6731 				if (!vma)
6732 					return buf - old_buf;
6733 
6734 				/* Try again if stack expansion worked */
6735 				continue;
6736 			}
6737 
6738 			/*
6739 			 * Check if this is a VM_IO | VM_PFNMAP VMA, which
6740 			 * we can access using slightly different code.
6741 			 */
6742 			bytes = 0;
6743 #ifdef CONFIG_HAVE_IOREMAP_PROT
6744 			if (vma->vm_ops && vma->vm_ops->access)
6745 				bytes = vma->vm_ops->access(vma, addr, buf,
6746 							    len, write);
6747 #endif
6748 			if (bytes <= 0)
6749 				break;
6750 		} else {
6751 			bytes = len;
6752 			offset = addr & (PAGE_SIZE-1);
6753 			if (bytes > PAGE_SIZE-offset)
6754 				bytes = PAGE_SIZE-offset;
6755 
6756 			maddr = kmap_local_page(page);
6757 			if (write) {
6758 				copy_to_user_page(vma, page, addr,
6759 						  maddr + offset, buf, bytes);
6760 				set_page_dirty_lock(page);
6761 			} else {
6762 				copy_from_user_page(vma, page, addr,
6763 						    buf, maddr + offset, bytes);
6764 			}
6765 			unmap_and_put_page(page, maddr);
6766 		}
6767 		len -= bytes;
6768 		buf += bytes;
6769 		addr += bytes;
6770 	}
6771 	mmap_read_unlock(mm);
6772 
6773 	return buf - old_buf;
6774 }
6775 
6776 /**
6777  * access_remote_vm - access another process' address space
6778  * @mm:		the mm_struct of the target address space
6779  * @addr:	start address to access
6780  * @buf:	source or destination buffer
6781  * @len:	number of bytes to transfer
6782  * @gup_flags:	flags modifying lookup behaviour
6783  *
6784  * The caller must hold a reference on @mm.
6785  *
6786  * Return: number of bytes copied from source to destination.
6787  */
6788 int access_remote_vm(struct mm_struct *mm, unsigned long addr,
6789 		void *buf, int len, unsigned int gup_flags)
6790 {
6791 	return __access_remote_vm(mm, addr, buf, len, gup_flags);
6792 }
6793 
6794 /*
6795  * Access another process' address space.
6796  * Source/target buffer must be kernel space,
6797  * Do not walk the page table directly, use get_user_pages
6798  */
6799 int access_process_vm(struct task_struct *tsk, unsigned long addr,
6800 		void *buf, int len, unsigned int gup_flags)
6801 {
6802 	struct mm_struct *mm;
6803 	int ret;
6804 
6805 	mm = get_task_mm(tsk);
6806 	if (!mm)
6807 		return 0;
6808 
6809 	ret = __access_remote_vm(mm, addr, buf, len, gup_flags);
6810 
6811 	mmput(mm);
6812 
6813 	return ret;
6814 }
6815 EXPORT_SYMBOL_GPL(access_process_vm);
6816 
6817 #ifdef CONFIG_BPF_SYSCALL
6818 /*
6819  * Copy a string from another process's address space as given in mm.
6820  * If there is any error return -EFAULT.
6821  */
6822 static int __copy_remote_vm_str(struct mm_struct *mm, unsigned long addr,
6823 				void *buf, int len, unsigned int gup_flags)
6824 {
6825 	void *old_buf = buf;
6826 	int err = 0;
6827 
6828 	*(char *)buf = '\0';
6829 
6830 	if (mmap_read_lock_killable(mm))
6831 		return -EFAULT;
6832 
6833 	addr = untagged_addr_remote(mm, addr);
6834 
6835 	/* Avoid triggering the temporary warning in __get_user_pages */
6836 	if (!vma_lookup(mm, addr)) {
6837 		err = -EFAULT;
6838 		goto out;
6839 	}
6840 
6841 	while (len) {
6842 		int bytes, offset, retval;
6843 		void *maddr;
6844 		struct page *page;
6845 		struct vm_area_struct *vma = NULL;
6846 
6847 		page = get_user_page_vma_remote(mm, addr, gup_flags, &vma);
6848 		if (IS_ERR(page)) {
6849 			/*
6850 			 * Treat as a total failure for now until we decide how
6851 			 * to handle the CONFIG_HAVE_IOREMAP_PROT case and
6852 			 * stack expansion.
6853 			 */
6854 			*(char *)buf = '\0';
6855 			err = -EFAULT;
6856 			goto out;
6857 		}
6858 
6859 		bytes = len;
6860 		offset = addr & (PAGE_SIZE - 1);
6861 		if (bytes > PAGE_SIZE - offset)
6862 			bytes = PAGE_SIZE - offset;
6863 
6864 		maddr = kmap_local_page(page);
6865 		retval = strscpy(buf, maddr + offset, bytes);
6866 		if (retval >= 0) {
6867 			/* Found the end of the string */
6868 			buf += retval;
6869 			unmap_and_put_page(page, maddr);
6870 			break;
6871 		}
6872 
6873 		buf += bytes - 1;
6874 		/*
6875 		 * Because strscpy always NUL terminates we need to
6876 		 * copy the last byte in the page if we are going to
6877 		 * load more pages
6878 		 */
6879 		if (bytes != len) {
6880 			addr += bytes - 1;
6881 			copy_from_user_page(vma, page, addr, buf, maddr + (PAGE_SIZE - 1), 1);
6882 			buf += 1;
6883 			addr += 1;
6884 		}
6885 		len -= bytes;
6886 
6887 		unmap_and_put_page(page, maddr);
6888 	}
6889 
6890 out:
6891 	mmap_read_unlock(mm);
6892 	if (err)
6893 		return err;
6894 	return buf - old_buf;
6895 }
6896 
6897 /**
6898  * copy_remote_vm_str - copy a string from another process's address space.
6899  * @tsk:	the task of the target address space
6900  * @addr:	start address to read from
6901  * @buf:	destination buffer
6902  * @len:	number of bytes to copy
6903  * @gup_flags:	flags modifying lookup behaviour
6904  *
6905  * The caller must hold a reference on @mm.
6906  *
6907  * Return: number of bytes copied from @addr (source) to @buf (destination);
6908  * not including the trailing NUL. Always guaranteed to leave NUL-terminated
6909  * buffer. On any error, return -EFAULT.
6910  */
6911 int copy_remote_vm_str(struct task_struct *tsk, unsigned long addr,
6912 		       void *buf, int len, unsigned int gup_flags)
6913 {
6914 	struct mm_struct *mm;
6915 	int ret;
6916 
6917 	if (unlikely(len == 0))
6918 		return 0;
6919 
6920 	mm = get_task_mm(tsk);
6921 	if (!mm) {
6922 		*(char *)buf = '\0';
6923 		return -EFAULT;
6924 	}
6925 
6926 	ret = __copy_remote_vm_str(mm, addr, buf, len, gup_flags);
6927 
6928 	mmput(mm);
6929 
6930 	return ret;
6931 }
6932 EXPORT_SYMBOL_GPL(copy_remote_vm_str);
6933 #endif /* CONFIG_BPF_SYSCALL */
6934 
6935 /*
6936  * Print the name of a VMA.
6937  */
6938 void print_vma_addr(char *prefix, unsigned long ip)
6939 {
6940 	struct mm_struct *mm = current->mm;
6941 	struct vm_area_struct *vma;
6942 
6943 	/*
6944 	 * we might be running from an atomic context so we cannot sleep
6945 	 */
6946 	if (!mmap_read_trylock(mm))
6947 		return;
6948 
6949 	vma = vma_lookup(mm, ip);
6950 	if (vma && vma->vm_file) {
6951 		struct file *f = vma->vm_file;
6952 		ip -= vma->vm_start;
6953 		ip += vma->vm_pgoff << PAGE_SHIFT;
6954 		printk("%s%pD[%lx,%lx+%lx]", prefix, f, ip,
6955 				vma->vm_start,
6956 				vma->vm_end - vma->vm_start);
6957 	}
6958 	mmap_read_unlock(mm);
6959 }
6960 
6961 #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
6962 void __might_fault(const char *file, int line)
6963 {
6964 	if (pagefault_disabled())
6965 		return;
6966 	__might_sleep(file, line);
6967 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
6968 	if (current->mm)
6969 		might_lock_read(&current->mm->mmap_lock);
6970 #endif
6971 }
6972 EXPORT_SYMBOL(__might_fault);
6973 #endif
6974 
6975 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
6976 /*
6977  * Process all subpages of the specified huge page with the specified
6978  * operation.  The target subpage will be processed last to keep its
6979  * cache lines hot.
6980  */
6981 static inline int process_huge_page(
6982 	unsigned long addr_hint, unsigned int nr_pages,
6983 	int (*process_subpage)(unsigned long addr, int idx, void *arg),
6984 	void *arg)
6985 {
6986 	int i, n, base, l, ret;
6987 	unsigned long addr = addr_hint &
6988 		~(((unsigned long)nr_pages << PAGE_SHIFT) - 1);
6989 
6990 	/* Process target subpage last to keep its cache lines hot */
6991 	might_sleep();
6992 	n = (addr_hint - addr) / PAGE_SIZE;
6993 	if (2 * n <= nr_pages) {
6994 		/* If target subpage in first half of huge page */
6995 		base = 0;
6996 		l = n;
6997 		/* Process subpages at the end of huge page */
6998 		for (i = nr_pages - 1; i >= 2 * n; i--) {
6999 			cond_resched();
7000 			ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
7001 			if (ret)
7002 				return ret;
7003 		}
7004 	} else {
7005 		/* If target subpage in second half of huge page */
7006 		base = nr_pages - 2 * (nr_pages - n);
7007 		l = nr_pages - n;
7008 		/* Process subpages at the begin of huge page */
7009 		for (i = 0; i < base; i++) {
7010 			cond_resched();
7011 			ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
7012 			if (ret)
7013 				return ret;
7014 		}
7015 	}
7016 	/*
7017 	 * Process remaining subpages in left-right-left-right pattern
7018 	 * towards the target subpage
7019 	 */
7020 	for (i = 0; i < l; i++) {
7021 		int left_idx = base + i;
7022 		int right_idx = base + 2 * l - 1 - i;
7023 
7024 		cond_resched();
7025 		ret = process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
7026 		if (ret)
7027 			return ret;
7028 		cond_resched();
7029 		ret = process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
7030 		if (ret)
7031 			return ret;
7032 	}
7033 	return 0;
7034 }
7035 
7036 static void clear_gigantic_page(struct folio *folio, unsigned long addr_hint,
7037 				unsigned int nr_pages)
7038 {
7039 	unsigned long addr = ALIGN_DOWN(addr_hint, folio_size(folio));
7040 	int i;
7041 
7042 	might_sleep();
7043 	for (i = 0; i < nr_pages; i++) {
7044 		cond_resched();
7045 		clear_user_highpage(folio_page(folio, i), addr + i * PAGE_SIZE);
7046 	}
7047 }
7048 
7049 static int clear_subpage(unsigned long addr, int idx, void *arg)
7050 {
7051 	struct folio *folio = arg;
7052 
7053 	clear_user_highpage(folio_page(folio, idx), addr);
7054 	return 0;
7055 }
7056 
7057 /**
7058  * folio_zero_user - Zero a folio which will be mapped to userspace.
7059  * @folio: The folio to zero.
7060  * @addr_hint: The address will be accessed or the base address if uncelar.
7061  */
7062 void folio_zero_user(struct folio *folio, unsigned long addr_hint)
7063 {
7064 	unsigned int nr_pages = folio_nr_pages(folio);
7065 
7066 	if (unlikely(nr_pages > MAX_ORDER_NR_PAGES))
7067 		clear_gigantic_page(folio, addr_hint, nr_pages);
7068 	else
7069 		process_huge_page(addr_hint, nr_pages, clear_subpage, folio);
7070 }
7071 
7072 static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
7073 				   unsigned long addr_hint,
7074 				   struct vm_area_struct *vma,
7075 				   unsigned int nr_pages)
7076 {
7077 	unsigned long addr = ALIGN_DOWN(addr_hint, folio_size(dst));
7078 	struct page *dst_page;
7079 	struct page *src_page;
7080 	int i;
7081 
7082 	for (i = 0; i < nr_pages; i++) {
7083 		dst_page = folio_page(dst, i);
7084 		src_page = folio_page(src, i);
7085 
7086 		cond_resched();
7087 		if (copy_mc_user_highpage(dst_page, src_page,
7088 					  addr + i*PAGE_SIZE, vma))
7089 			return -EHWPOISON;
7090 	}
7091 	return 0;
7092 }
7093 
7094 struct copy_subpage_arg {
7095 	struct folio *dst;
7096 	struct folio *src;
7097 	struct vm_area_struct *vma;
7098 };
7099 
7100 static int copy_subpage(unsigned long addr, int idx, void *arg)
7101 {
7102 	struct copy_subpage_arg *copy_arg = arg;
7103 	struct page *dst = folio_page(copy_arg->dst, idx);
7104 	struct page *src = folio_page(copy_arg->src, idx);
7105 
7106 	if (copy_mc_user_highpage(dst, src, addr, copy_arg->vma))
7107 		return -EHWPOISON;
7108 	return 0;
7109 }
7110 
7111 int copy_user_large_folio(struct folio *dst, struct folio *src,
7112 			  unsigned long addr_hint, struct vm_area_struct *vma)
7113 {
7114 	unsigned int nr_pages = folio_nr_pages(dst);
7115 	struct copy_subpage_arg arg = {
7116 		.dst = dst,
7117 		.src = src,
7118 		.vma = vma,
7119 	};
7120 
7121 	if (unlikely(nr_pages > MAX_ORDER_NR_PAGES))
7122 		return copy_user_gigantic_page(dst, src, addr_hint, vma, nr_pages);
7123 
7124 	return process_huge_page(addr_hint, nr_pages, copy_subpage, &arg);
7125 }
7126 
7127 long copy_folio_from_user(struct folio *dst_folio,
7128 			   const void __user *usr_src,
7129 			   bool allow_pagefault)
7130 {
7131 	void *kaddr;
7132 	unsigned long i, rc = 0;
7133 	unsigned int nr_pages = folio_nr_pages(dst_folio);
7134 	unsigned long ret_val = nr_pages * PAGE_SIZE;
7135 	struct page *subpage;
7136 
7137 	for (i = 0; i < nr_pages; i++) {
7138 		subpage = folio_page(dst_folio, i);
7139 		kaddr = kmap_local_page(subpage);
7140 		if (!allow_pagefault)
7141 			pagefault_disable();
7142 		rc = copy_from_user(kaddr, usr_src + i * PAGE_SIZE, PAGE_SIZE);
7143 		if (!allow_pagefault)
7144 			pagefault_enable();
7145 		kunmap_local(kaddr);
7146 
7147 		ret_val -= (PAGE_SIZE - rc);
7148 		if (rc)
7149 			break;
7150 
7151 		flush_dcache_page(subpage);
7152 
7153 		cond_resched();
7154 	}
7155 	return ret_val;
7156 }
7157 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
7158 
7159 #if defined(CONFIG_SPLIT_PTE_PTLOCKS) && ALLOC_SPLIT_PTLOCKS
7160 
7161 static struct kmem_cache *page_ptl_cachep;
7162 
7163 void __init ptlock_cache_init(void)
7164 {
7165 	page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
7166 			SLAB_PANIC, NULL);
7167 }
7168 
7169 bool ptlock_alloc(struct ptdesc *ptdesc)
7170 {
7171 	spinlock_t *ptl;
7172 
7173 	ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
7174 	if (!ptl)
7175 		return false;
7176 	ptdesc->ptl = ptl;
7177 	return true;
7178 }
7179 
7180 void ptlock_free(struct ptdesc *ptdesc)
7181 {
7182 	if (ptdesc->ptl)
7183 		kmem_cache_free(page_ptl_cachep, ptdesc->ptl);
7184 }
7185 #endif
7186 
7187 void vma_pgtable_walk_begin(struct vm_area_struct *vma)
7188 {
7189 	if (is_vm_hugetlb_page(vma))
7190 		hugetlb_vma_lock_read(vma);
7191 }
7192 
7193 void vma_pgtable_walk_end(struct vm_area_struct *vma)
7194 {
7195 	if (is_vm_hugetlb_page(vma))
7196 		hugetlb_vma_unlock_read(vma);
7197 }
7198