xref: /linux/mm/memory.c (revision 839c4f596f898edc424070dc8b517381572f8502)
1 
2 // SPDX-License-Identifier: GPL-2.0-only
3 /*
4  *  linux/mm/memory.c
5  *
6  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
7  */
8 
9 /*
10  * demand-loading started 01.12.91 - seems it is high on the list of
11  * things wanted, and it should be easy to implement. - Linus
12  */
13 
14 /*
15  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
16  * pages started 02.12.91, seems to work. - Linus.
17  *
18  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
19  * would have taken more than the 6M I have free, but it worked well as
20  * far as I could see.
21  *
22  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
23  */
24 
25 /*
26  * Real VM (paging to/from disk) started 18.12.91. Much more work and
27  * thought has to go into this. Oh, well..
28  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
29  *		Found it. Everything seems to work now.
30  * 20.12.91  -  Ok, making the swap-device changeable like the root.
31  */
32 
33 /*
34  * 05.04.94  -  Multi-page memory management added for v1.1.
35  *              Idea by Alex Bligh (alex@cconcepts.co.uk)
36  *
37  * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
38  *		(Gerhard.Wichert@pdb.siemens.de)
39  *
40  * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
41  */
42 
43 #include <linux/kernel_stat.h>
44 #include <linux/mm.h>
45 #include <linux/mm_inline.h>
46 #include <linux/sched/mm.h>
47 #include <linux/sched/coredump.h>
48 #include <linux/sched/numa_balancing.h>
49 #include <linux/sched/task.h>
50 #include <linux/hugetlb.h>
51 #include <linux/mman.h>
52 #include <linux/swap.h>
53 #include <linux/highmem.h>
54 #include <linux/pagemap.h>
55 #include <linux/memremap.h>
56 #include <linux/kmsan.h>
57 #include <linux/ksm.h>
58 #include <linux/rmap.h>
59 #include <linux/export.h>
60 #include <linux/delayacct.h>
61 #include <linux/init.h>
62 #include <linux/pfn_t.h>
63 #include <linux/writeback.h>
64 #include <linux/memcontrol.h>
65 #include <linux/mmu_notifier.h>
66 #include <linux/swapops.h>
67 #include <linux/elf.h>
68 #include <linux/gfp.h>
69 #include <linux/migrate.h>
70 #include <linux/string.h>
71 #include <linux/memory-tiers.h>
72 #include <linux/debugfs.h>
73 #include <linux/userfaultfd_k.h>
74 #include <linux/dax.h>
75 #include <linux/oom.h>
76 #include <linux/numa.h>
77 #include <linux/perf_event.h>
78 #include <linux/ptrace.h>
79 #include <linux/vmalloc.h>
80 #include <linux/sched/sysctl.h>
81 
82 #include <trace/events/kmem.h>
83 
84 #include <asm/io.h>
85 #include <asm/mmu_context.h>
86 #include <asm/pgalloc.h>
87 #include <linux/uaccess.h>
88 #include <asm/tlb.h>
89 #include <asm/tlbflush.h>
90 
91 #include "pgalloc-track.h"
92 #include "internal.h"
93 #include "swap.h"
94 
95 #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
96 #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
97 #endif
98 
99 #ifndef CONFIG_NUMA
100 unsigned long max_mapnr;
101 EXPORT_SYMBOL(max_mapnr);
102 
103 struct page *mem_map;
104 EXPORT_SYMBOL(mem_map);
105 #endif
106 
107 static vm_fault_t do_fault(struct vm_fault *vmf);
108 static vm_fault_t do_anonymous_page(struct vm_fault *vmf);
109 static bool vmf_pte_changed(struct vm_fault *vmf);
110 
111 /*
112  * Return true if the original pte was a uffd-wp pte marker (so the pte was
113  * wr-protected).
114  */
115 static __always_inline bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf)
116 {
117 	if (!userfaultfd_wp(vmf->vma))
118 		return false;
119 	if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID))
120 		return false;
121 
122 	return pte_marker_uffd_wp(vmf->orig_pte);
123 }
124 
125 /*
126  * A number of key systems in x86 including ioremap() rely on the assumption
127  * that high_memory defines the upper bound on direct map memory, then end
128  * of ZONE_NORMAL.
129  */
130 void *high_memory;
131 EXPORT_SYMBOL(high_memory);
132 
133 /*
134  * Randomize the address space (stacks, mmaps, brk, etc.).
135  *
136  * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
137  *   as ancient (libc5 based) binaries can segfault. )
138  */
139 int randomize_va_space __read_mostly =
140 #ifdef CONFIG_COMPAT_BRK
141 					1;
142 #else
143 					2;
144 #endif
145 
146 #ifndef arch_wants_old_prefaulted_pte
147 static inline bool arch_wants_old_prefaulted_pte(void)
148 {
149 	/*
150 	 * Transitioning a PTE from 'old' to 'young' can be expensive on
151 	 * some architectures, even if it's performed in hardware. By
152 	 * default, "false" means prefaulted entries will be 'young'.
153 	 */
154 	return false;
155 }
156 #endif
157 
158 static int __init disable_randmaps(char *s)
159 {
160 	randomize_va_space = 0;
161 	return 1;
162 }
163 __setup("norandmaps", disable_randmaps);
164 
165 unsigned long zero_pfn __read_mostly;
166 EXPORT_SYMBOL(zero_pfn);
167 
168 unsigned long highest_memmap_pfn __read_mostly;
169 
170 /*
171  * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
172  */
173 static int __init init_zero_pfn(void)
174 {
175 	zero_pfn = page_to_pfn(ZERO_PAGE(0));
176 	return 0;
177 }
178 early_initcall(init_zero_pfn);
179 
180 void mm_trace_rss_stat(struct mm_struct *mm, int member)
181 {
182 	trace_rss_stat(mm, member);
183 }
184 
185 /*
186  * Note: this doesn't free the actual pages themselves. That
187  * has been handled earlier when unmapping all the memory regions.
188  */
189 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
190 			   unsigned long addr)
191 {
192 	pgtable_t token = pmd_pgtable(*pmd);
193 	pmd_clear(pmd);
194 	pte_free_tlb(tlb, token, addr);
195 	mm_dec_nr_ptes(tlb->mm);
196 }
197 
198 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
199 				unsigned long addr, unsigned long end,
200 				unsigned long floor, unsigned long ceiling)
201 {
202 	pmd_t *pmd;
203 	unsigned long next;
204 	unsigned long start;
205 
206 	start = addr;
207 	pmd = pmd_offset(pud, addr);
208 	do {
209 		next = pmd_addr_end(addr, end);
210 		if (pmd_none_or_clear_bad(pmd))
211 			continue;
212 		free_pte_range(tlb, pmd, addr);
213 	} while (pmd++, addr = next, addr != end);
214 
215 	start &= PUD_MASK;
216 	if (start < floor)
217 		return;
218 	if (ceiling) {
219 		ceiling &= PUD_MASK;
220 		if (!ceiling)
221 			return;
222 	}
223 	if (end - 1 > ceiling - 1)
224 		return;
225 
226 	pmd = pmd_offset(pud, start);
227 	pud_clear(pud);
228 	pmd_free_tlb(tlb, pmd, start);
229 	mm_dec_nr_pmds(tlb->mm);
230 }
231 
232 static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
233 				unsigned long addr, unsigned long end,
234 				unsigned long floor, unsigned long ceiling)
235 {
236 	pud_t *pud;
237 	unsigned long next;
238 	unsigned long start;
239 
240 	start = addr;
241 	pud = pud_offset(p4d, addr);
242 	do {
243 		next = pud_addr_end(addr, end);
244 		if (pud_none_or_clear_bad(pud))
245 			continue;
246 		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
247 	} while (pud++, addr = next, addr != end);
248 
249 	start &= P4D_MASK;
250 	if (start < floor)
251 		return;
252 	if (ceiling) {
253 		ceiling &= P4D_MASK;
254 		if (!ceiling)
255 			return;
256 	}
257 	if (end - 1 > ceiling - 1)
258 		return;
259 
260 	pud = pud_offset(p4d, start);
261 	p4d_clear(p4d);
262 	pud_free_tlb(tlb, pud, start);
263 	mm_dec_nr_puds(tlb->mm);
264 }
265 
266 static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
267 				unsigned long addr, unsigned long end,
268 				unsigned long floor, unsigned long ceiling)
269 {
270 	p4d_t *p4d;
271 	unsigned long next;
272 	unsigned long start;
273 
274 	start = addr;
275 	p4d = p4d_offset(pgd, addr);
276 	do {
277 		next = p4d_addr_end(addr, end);
278 		if (p4d_none_or_clear_bad(p4d))
279 			continue;
280 		free_pud_range(tlb, p4d, addr, next, floor, ceiling);
281 	} while (p4d++, addr = next, addr != end);
282 
283 	start &= PGDIR_MASK;
284 	if (start < floor)
285 		return;
286 	if (ceiling) {
287 		ceiling &= PGDIR_MASK;
288 		if (!ceiling)
289 			return;
290 	}
291 	if (end - 1 > ceiling - 1)
292 		return;
293 
294 	p4d = p4d_offset(pgd, start);
295 	pgd_clear(pgd);
296 	p4d_free_tlb(tlb, p4d, start);
297 }
298 
299 /*
300  * This function frees user-level page tables of a process.
301  */
302 void free_pgd_range(struct mmu_gather *tlb,
303 			unsigned long addr, unsigned long end,
304 			unsigned long floor, unsigned long ceiling)
305 {
306 	pgd_t *pgd;
307 	unsigned long next;
308 
309 	/*
310 	 * The next few lines have given us lots of grief...
311 	 *
312 	 * Why are we testing PMD* at this top level?  Because often
313 	 * there will be no work to do at all, and we'd prefer not to
314 	 * go all the way down to the bottom just to discover that.
315 	 *
316 	 * Why all these "- 1"s?  Because 0 represents both the bottom
317 	 * of the address space and the top of it (using -1 for the
318 	 * top wouldn't help much: the masks would do the wrong thing).
319 	 * The rule is that addr 0 and floor 0 refer to the bottom of
320 	 * the address space, but end 0 and ceiling 0 refer to the top
321 	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
322 	 * that end 0 case should be mythical).
323 	 *
324 	 * Wherever addr is brought up or ceiling brought down, we must
325 	 * be careful to reject "the opposite 0" before it confuses the
326 	 * subsequent tests.  But what about where end is brought down
327 	 * by PMD_SIZE below? no, end can't go down to 0 there.
328 	 *
329 	 * Whereas we round start (addr) and ceiling down, by different
330 	 * masks at different levels, in order to test whether a table
331 	 * now has no other vmas using it, so can be freed, we don't
332 	 * bother to round floor or end up - the tests don't need that.
333 	 */
334 
335 	addr &= PMD_MASK;
336 	if (addr < floor) {
337 		addr += PMD_SIZE;
338 		if (!addr)
339 			return;
340 	}
341 	if (ceiling) {
342 		ceiling &= PMD_MASK;
343 		if (!ceiling)
344 			return;
345 	}
346 	if (end - 1 > ceiling - 1)
347 		end -= PMD_SIZE;
348 	if (addr > end - 1)
349 		return;
350 	/*
351 	 * We add page table cache pages with PAGE_SIZE,
352 	 * (see pte_free_tlb()), flush the tlb if we need
353 	 */
354 	tlb_change_page_size(tlb, PAGE_SIZE);
355 	pgd = pgd_offset(tlb->mm, addr);
356 	do {
357 		next = pgd_addr_end(addr, end);
358 		if (pgd_none_or_clear_bad(pgd))
359 			continue;
360 		free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
361 	} while (pgd++, addr = next, addr != end);
362 }
363 
364 void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
365 		   struct vm_area_struct *vma, unsigned long floor,
366 		   unsigned long ceiling, bool mm_wr_locked)
367 {
368 	struct unlink_vma_file_batch vb;
369 
370 	do {
371 		unsigned long addr = vma->vm_start;
372 		struct vm_area_struct *next;
373 
374 		/*
375 		 * Note: USER_PGTABLES_CEILING may be passed as ceiling and may
376 		 * be 0.  This will underflow and is okay.
377 		 */
378 		next = mas_find(mas, ceiling - 1);
379 		if (unlikely(xa_is_zero(next)))
380 			next = NULL;
381 
382 		/*
383 		 * Hide vma from rmap and truncate_pagecache before freeing
384 		 * pgtables
385 		 */
386 		if (mm_wr_locked)
387 			vma_start_write(vma);
388 		unlink_anon_vmas(vma);
389 
390 		if (is_vm_hugetlb_page(vma)) {
391 			unlink_file_vma(vma);
392 			hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
393 				floor, next ? next->vm_start : ceiling);
394 		} else {
395 			unlink_file_vma_batch_init(&vb);
396 			unlink_file_vma_batch_add(&vb, vma);
397 
398 			/*
399 			 * Optimization: gather nearby vmas into one call down
400 			 */
401 			while (next && next->vm_start <= vma->vm_end + PMD_SIZE
402 			       && !is_vm_hugetlb_page(next)) {
403 				vma = next;
404 				next = mas_find(mas, ceiling - 1);
405 				if (unlikely(xa_is_zero(next)))
406 					next = NULL;
407 				if (mm_wr_locked)
408 					vma_start_write(vma);
409 				unlink_anon_vmas(vma);
410 				unlink_file_vma_batch_add(&vb, vma);
411 			}
412 			unlink_file_vma_batch_final(&vb);
413 			free_pgd_range(tlb, addr, vma->vm_end,
414 				floor, next ? next->vm_start : ceiling);
415 		}
416 		vma = next;
417 	} while (vma);
418 }
419 
420 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
421 {
422 	spinlock_t *ptl = pmd_lock(mm, pmd);
423 
424 	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
425 		mm_inc_nr_ptes(mm);
426 		/*
427 		 * Ensure all pte setup (eg. pte page lock and page clearing) are
428 		 * visible before the pte is made visible to other CPUs by being
429 		 * put into page tables.
430 		 *
431 		 * The other side of the story is the pointer chasing in the page
432 		 * table walking code (when walking the page table without locking;
433 		 * ie. most of the time). Fortunately, these data accesses consist
434 		 * of a chain of data-dependent loads, meaning most CPUs (alpha
435 		 * being the notable exception) will already guarantee loads are
436 		 * seen in-order. See the alpha page table accessors for the
437 		 * smp_rmb() barriers in page table walking code.
438 		 */
439 		smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
440 		pmd_populate(mm, pmd, *pte);
441 		*pte = NULL;
442 	}
443 	spin_unlock(ptl);
444 }
445 
446 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
447 {
448 	pgtable_t new = pte_alloc_one(mm);
449 	if (!new)
450 		return -ENOMEM;
451 
452 	pmd_install(mm, pmd, &new);
453 	if (new)
454 		pte_free(mm, new);
455 	return 0;
456 }
457 
458 int __pte_alloc_kernel(pmd_t *pmd)
459 {
460 	pte_t *new = pte_alloc_one_kernel(&init_mm);
461 	if (!new)
462 		return -ENOMEM;
463 
464 	spin_lock(&init_mm.page_table_lock);
465 	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
466 		smp_wmb(); /* See comment in pmd_install() */
467 		pmd_populate_kernel(&init_mm, pmd, new);
468 		new = NULL;
469 	}
470 	spin_unlock(&init_mm.page_table_lock);
471 	if (new)
472 		pte_free_kernel(&init_mm, new);
473 	return 0;
474 }
475 
476 static inline void init_rss_vec(int *rss)
477 {
478 	memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
479 }
480 
481 static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
482 {
483 	int i;
484 
485 	for (i = 0; i < NR_MM_COUNTERS; i++)
486 		if (rss[i])
487 			add_mm_counter(mm, i, rss[i]);
488 }
489 
490 /*
491  * This function is called to print an error when a bad pte
492  * is found. For example, we might have a PFN-mapped pte in
493  * a region that doesn't allow it.
494  *
495  * The calling function must still handle the error.
496  */
497 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
498 			  pte_t pte, struct page *page)
499 {
500 	pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
501 	p4d_t *p4d = p4d_offset(pgd, addr);
502 	pud_t *pud = pud_offset(p4d, addr);
503 	pmd_t *pmd = pmd_offset(pud, addr);
504 	struct address_space *mapping;
505 	pgoff_t index;
506 	static unsigned long resume;
507 	static unsigned long nr_shown;
508 	static unsigned long nr_unshown;
509 
510 	/*
511 	 * Allow a burst of 60 reports, then keep quiet for that minute;
512 	 * or allow a steady drip of one report per second.
513 	 */
514 	if (nr_shown == 60) {
515 		if (time_before(jiffies, resume)) {
516 			nr_unshown++;
517 			return;
518 		}
519 		if (nr_unshown) {
520 			pr_alert("BUG: Bad page map: %lu messages suppressed\n",
521 				 nr_unshown);
522 			nr_unshown = 0;
523 		}
524 		nr_shown = 0;
525 	}
526 	if (nr_shown++ == 0)
527 		resume = jiffies + 60 * HZ;
528 
529 	mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
530 	index = linear_page_index(vma, addr);
531 
532 	pr_alert("BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
533 		 current->comm,
534 		 (long long)pte_val(pte), (long long)pmd_val(*pmd));
535 	if (page)
536 		dump_page(page, "bad pte");
537 	pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
538 		 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
539 	pr_alert("file:%pD fault:%ps mmap:%ps read_folio:%ps\n",
540 		 vma->vm_file,
541 		 vma->vm_ops ? vma->vm_ops->fault : NULL,
542 		 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
543 		 mapping ? mapping->a_ops->read_folio : NULL);
544 	dump_stack();
545 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
546 }
547 
548 /*
549  * vm_normal_page -- This function gets the "struct page" associated with a pte.
550  *
551  * "Special" mappings do not wish to be associated with a "struct page" (either
552  * it doesn't exist, or it exists but they don't want to touch it). In this
553  * case, NULL is returned here. "Normal" mappings do have a struct page.
554  *
555  * There are 2 broad cases. Firstly, an architecture may define a pte_special()
556  * pte bit, in which case this function is trivial. Secondly, an architecture
557  * may not have a spare pte bit, which requires a more complicated scheme,
558  * described below.
559  *
560  * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
561  * special mapping (even if there are underlying and valid "struct pages").
562  * COWed pages of a VM_PFNMAP are always normal.
563  *
564  * The way we recognize COWed pages within VM_PFNMAP mappings is through the
565  * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
566  * set, and the vm_pgoff will point to the first PFN mapped: thus every special
567  * mapping will always honor the rule
568  *
569  *	pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
570  *
571  * And for normal mappings this is false.
572  *
573  * This restricts such mappings to be a linear translation from virtual address
574  * to pfn. To get around this restriction, we allow arbitrary mappings so long
575  * as the vma is not a COW mapping; in that case, we know that all ptes are
576  * special (because none can have been COWed).
577  *
578  *
579  * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
580  *
581  * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
582  * page" backing, however the difference is that _all_ pages with a struct
583  * page (that is, those where pfn_valid is true) are refcounted and considered
584  * normal pages by the VM. The only exception are zeropages, which are
585  * *never* refcounted.
586  *
587  * The disadvantage is that pages are refcounted (which can be slower and
588  * simply not an option for some PFNMAP users). The advantage is that we
589  * don't have to follow the strict linearity rule of PFNMAP mappings in
590  * order to support COWable mappings.
591  *
592  */
593 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
594 			    pte_t pte)
595 {
596 	unsigned long pfn = pte_pfn(pte);
597 
598 	if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
599 		if (likely(!pte_special(pte)))
600 			goto check_pfn;
601 		if (vma->vm_ops && vma->vm_ops->find_special_page)
602 			return vma->vm_ops->find_special_page(vma, addr);
603 		if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
604 			return NULL;
605 		if (is_zero_pfn(pfn))
606 			return NULL;
607 		if (pte_devmap(pte))
608 		/*
609 		 * NOTE: New users of ZONE_DEVICE will not set pte_devmap()
610 		 * and will have refcounts incremented on their struct pages
611 		 * when they are inserted into PTEs, thus they are safe to
612 		 * return here. Legacy ZONE_DEVICE pages that set pte_devmap()
613 		 * do not have refcounts. Example of legacy ZONE_DEVICE is
614 		 * MEMORY_DEVICE_FS_DAX type in pmem or virtio_fs drivers.
615 		 */
616 			return NULL;
617 
618 		print_bad_pte(vma, addr, pte, NULL);
619 		return NULL;
620 	}
621 
622 	/* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
623 
624 	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
625 		if (vma->vm_flags & VM_MIXEDMAP) {
626 			if (!pfn_valid(pfn))
627 				return NULL;
628 			if (is_zero_pfn(pfn))
629 				return NULL;
630 			goto out;
631 		} else {
632 			unsigned long off;
633 			off = (addr - vma->vm_start) >> PAGE_SHIFT;
634 			if (pfn == vma->vm_pgoff + off)
635 				return NULL;
636 			if (!is_cow_mapping(vma->vm_flags))
637 				return NULL;
638 		}
639 	}
640 
641 	if (is_zero_pfn(pfn))
642 		return NULL;
643 
644 check_pfn:
645 	if (unlikely(pfn > highest_memmap_pfn)) {
646 		print_bad_pte(vma, addr, pte, NULL);
647 		return NULL;
648 	}
649 
650 	/*
651 	 * NOTE! We still have PageReserved() pages in the page tables.
652 	 * eg. VDSO mappings can cause them to exist.
653 	 */
654 out:
655 	VM_WARN_ON_ONCE(is_zero_pfn(pfn));
656 	return pfn_to_page(pfn);
657 }
658 
659 struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
660 			    pte_t pte)
661 {
662 	struct page *page = vm_normal_page(vma, addr, pte);
663 
664 	if (page)
665 		return page_folio(page);
666 	return NULL;
667 }
668 
669 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
670 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
671 				pmd_t pmd)
672 {
673 	unsigned long pfn = pmd_pfn(pmd);
674 
675 	/*
676 	 * There is no pmd_special() but there may be special pmds, e.g.
677 	 * in a direct-access (dax) mapping, so let's just replicate the
678 	 * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
679 	 */
680 	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
681 		if (vma->vm_flags & VM_MIXEDMAP) {
682 			if (!pfn_valid(pfn))
683 				return NULL;
684 			goto out;
685 		} else {
686 			unsigned long off;
687 			off = (addr - vma->vm_start) >> PAGE_SHIFT;
688 			if (pfn == vma->vm_pgoff + off)
689 				return NULL;
690 			if (!is_cow_mapping(vma->vm_flags))
691 				return NULL;
692 		}
693 	}
694 
695 	if (pmd_devmap(pmd))
696 		return NULL;
697 	if (is_huge_zero_pmd(pmd))
698 		return NULL;
699 	if (unlikely(pfn > highest_memmap_pfn))
700 		return NULL;
701 
702 	/*
703 	 * NOTE! We still have PageReserved() pages in the page tables.
704 	 * eg. VDSO mappings can cause them to exist.
705 	 */
706 out:
707 	return pfn_to_page(pfn);
708 }
709 
710 struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
711 				  unsigned long addr, pmd_t pmd)
712 {
713 	struct page *page = vm_normal_page_pmd(vma, addr, pmd);
714 
715 	if (page)
716 		return page_folio(page);
717 	return NULL;
718 }
719 #endif
720 
721 static void restore_exclusive_pte(struct vm_area_struct *vma,
722 				  struct page *page, unsigned long address,
723 				  pte_t *ptep)
724 {
725 	struct folio *folio = page_folio(page);
726 	pte_t orig_pte;
727 	pte_t pte;
728 	swp_entry_t entry;
729 
730 	orig_pte = ptep_get(ptep);
731 	pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
732 	if (pte_swp_soft_dirty(orig_pte))
733 		pte = pte_mksoft_dirty(pte);
734 
735 	entry = pte_to_swp_entry(orig_pte);
736 	if (pte_swp_uffd_wp(orig_pte))
737 		pte = pte_mkuffd_wp(pte);
738 	else if (is_writable_device_exclusive_entry(entry))
739 		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
740 
741 	VM_BUG_ON_FOLIO(pte_write(pte) && (!folio_test_anon(folio) &&
742 					   PageAnonExclusive(page)), folio);
743 
744 	/*
745 	 * No need to take a page reference as one was already
746 	 * created when the swap entry was made.
747 	 */
748 	if (folio_test_anon(folio))
749 		folio_add_anon_rmap_pte(folio, page, vma, address, RMAP_NONE);
750 	else
751 		/*
752 		 * Currently device exclusive access only supports anonymous
753 		 * memory so the entry shouldn't point to a filebacked page.
754 		 */
755 		WARN_ON_ONCE(1);
756 
757 	set_pte_at(vma->vm_mm, address, ptep, pte);
758 
759 	/*
760 	 * No need to invalidate - it was non-present before. However
761 	 * secondary CPUs may have mappings that need invalidating.
762 	 */
763 	update_mmu_cache(vma, address, ptep);
764 }
765 
766 /*
767  * Tries to restore an exclusive pte if the page lock can be acquired without
768  * sleeping.
769  */
770 static int
771 try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma,
772 			unsigned long addr)
773 {
774 	swp_entry_t entry = pte_to_swp_entry(ptep_get(src_pte));
775 	struct page *page = pfn_swap_entry_to_page(entry);
776 
777 	if (trylock_page(page)) {
778 		restore_exclusive_pte(vma, page, addr, src_pte);
779 		unlock_page(page);
780 		return 0;
781 	}
782 
783 	return -EBUSY;
784 }
785 
786 /*
787  * copy one vm_area from one task to the other. Assumes the page tables
788  * already present in the new task to be cleared in the whole range
789  * covered by this vma.
790  */
791 
792 static unsigned long
793 copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
794 		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma,
795 		struct vm_area_struct *src_vma, unsigned long addr, int *rss)
796 {
797 	unsigned long vm_flags = dst_vma->vm_flags;
798 	pte_t orig_pte = ptep_get(src_pte);
799 	pte_t pte = orig_pte;
800 	struct folio *folio;
801 	struct page *page;
802 	swp_entry_t entry = pte_to_swp_entry(orig_pte);
803 
804 	if (likely(!non_swap_entry(entry))) {
805 		if (swap_duplicate(entry) < 0)
806 			return -EIO;
807 
808 		/* make sure dst_mm is on swapoff's mmlist. */
809 		if (unlikely(list_empty(&dst_mm->mmlist))) {
810 			spin_lock(&mmlist_lock);
811 			if (list_empty(&dst_mm->mmlist))
812 				list_add(&dst_mm->mmlist,
813 						&src_mm->mmlist);
814 			spin_unlock(&mmlist_lock);
815 		}
816 		/* Mark the swap entry as shared. */
817 		if (pte_swp_exclusive(orig_pte)) {
818 			pte = pte_swp_clear_exclusive(orig_pte);
819 			set_pte_at(src_mm, addr, src_pte, pte);
820 		}
821 		rss[MM_SWAPENTS]++;
822 	} else if (is_migration_entry(entry)) {
823 		folio = pfn_swap_entry_folio(entry);
824 
825 		rss[mm_counter(folio)]++;
826 
827 		if (!is_readable_migration_entry(entry) &&
828 				is_cow_mapping(vm_flags)) {
829 			/*
830 			 * COW mappings require pages in both parent and child
831 			 * to be set to read. A previously exclusive entry is
832 			 * now shared.
833 			 */
834 			entry = make_readable_migration_entry(
835 							swp_offset(entry));
836 			pte = swp_entry_to_pte(entry);
837 			if (pte_swp_soft_dirty(orig_pte))
838 				pte = pte_swp_mksoft_dirty(pte);
839 			if (pte_swp_uffd_wp(orig_pte))
840 				pte = pte_swp_mkuffd_wp(pte);
841 			set_pte_at(src_mm, addr, src_pte, pte);
842 		}
843 	} else if (is_device_private_entry(entry)) {
844 		page = pfn_swap_entry_to_page(entry);
845 		folio = page_folio(page);
846 
847 		/*
848 		 * Update rss count even for unaddressable pages, as
849 		 * they should treated just like normal pages in this
850 		 * respect.
851 		 *
852 		 * We will likely want to have some new rss counters
853 		 * for unaddressable pages, at some point. But for now
854 		 * keep things as they are.
855 		 */
856 		folio_get(folio);
857 		rss[mm_counter(folio)]++;
858 		/* Cannot fail as these pages cannot get pinned. */
859 		folio_try_dup_anon_rmap_pte(folio, page, src_vma);
860 
861 		/*
862 		 * We do not preserve soft-dirty information, because so
863 		 * far, checkpoint/restore is the only feature that
864 		 * requires that. And checkpoint/restore does not work
865 		 * when a device driver is involved (you cannot easily
866 		 * save and restore device driver state).
867 		 */
868 		if (is_writable_device_private_entry(entry) &&
869 		    is_cow_mapping(vm_flags)) {
870 			entry = make_readable_device_private_entry(
871 							swp_offset(entry));
872 			pte = swp_entry_to_pte(entry);
873 			if (pte_swp_uffd_wp(orig_pte))
874 				pte = pte_swp_mkuffd_wp(pte);
875 			set_pte_at(src_mm, addr, src_pte, pte);
876 		}
877 	} else if (is_device_exclusive_entry(entry)) {
878 		/*
879 		 * Make device exclusive entries present by restoring the
880 		 * original entry then copying as for a present pte. Device
881 		 * exclusive entries currently only support private writable
882 		 * (ie. COW) mappings.
883 		 */
884 		VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags));
885 		if (try_restore_exclusive_pte(src_pte, src_vma, addr))
886 			return -EBUSY;
887 		return -ENOENT;
888 	} else if (is_pte_marker_entry(entry)) {
889 		pte_marker marker = copy_pte_marker(entry, dst_vma);
890 
891 		if (marker)
892 			set_pte_at(dst_mm, addr, dst_pte,
893 				   make_pte_marker(marker));
894 		return 0;
895 	}
896 	if (!userfaultfd_wp(dst_vma))
897 		pte = pte_swp_clear_uffd_wp(pte);
898 	set_pte_at(dst_mm, addr, dst_pte, pte);
899 	return 0;
900 }
901 
902 /*
903  * Copy a present and normal page.
904  *
905  * NOTE! The usual case is that this isn't required;
906  * instead, the caller can just increase the page refcount
907  * and re-use the pte the traditional way.
908  *
909  * And if we need a pre-allocated page but don't yet have
910  * one, return a negative error to let the preallocation
911  * code know so that it can do so outside the page table
912  * lock.
913  */
914 static inline int
915 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
916 		  pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
917 		  struct folio **prealloc, struct page *page)
918 {
919 	struct folio *new_folio;
920 	pte_t pte;
921 
922 	new_folio = *prealloc;
923 	if (!new_folio)
924 		return -EAGAIN;
925 
926 	/*
927 	 * We have a prealloc page, all good!  Take it
928 	 * over and copy the page & arm it.
929 	 */
930 	*prealloc = NULL;
931 	copy_user_highpage(&new_folio->page, page, addr, src_vma);
932 	__folio_mark_uptodate(new_folio);
933 	folio_add_new_anon_rmap(new_folio, dst_vma, addr, RMAP_EXCLUSIVE);
934 	folio_add_lru_vma(new_folio, dst_vma);
935 	rss[MM_ANONPAGES]++;
936 
937 	/* All done, just insert the new page copy in the child */
938 	pte = mk_pte(&new_folio->page, dst_vma->vm_page_prot);
939 	pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma);
940 	if (userfaultfd_pte_wp(dst_vma, ptep_get(src_pte)))
941 		/* Uffd-wp needs to be delivered to dest pte as well */
942 		pte = pte_mkuffd_wp(pte);
943 	set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
944 	return 0;
945 }
946 
947 static __always_inline void __copy_present_ptes(struct vm_area_struct *dst_vma,
948 		struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte,
949 		pte_t pte, unsigned long addr, int nr)
950 {
951 	struct mm_struct *src_mm = src_vma->vm_mm;
952 
953 	/* If it's a COW mapping, write protect it both processes. */
954 	if (is_cow_mapping(src_vma->vm_flags) && pte_write(pte)) {
955 		wrprotect_ptes(src_mm, addr, src_pte, nr);
956 		pte = pte_wrprotect(pte);
957 	}
958 
959 	/* If it's a shared mapping, mark it clean in the child. */
960 	if (src_vma->vm_flags & VM_SHARED)
961 		pte = pte_mkclean(pte);
962 	pte = pte_mkold(pte);
963 
964 	if (!userfaultfd_wp(dst_vma))
965 		pte = pte_clear_uffd_wp(pte);
966 
967 	set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr);
968 }
969 
970 /*
971  * Copy one present PTE, trying to batch-process subsequent PTEs that map
972  * consecutive pages of the same folio by copying them as well.
973  *
974  * Returns -EAGAIN if one preallocated page is required to copy the next PTE.
975  * Otherwise, returns the number of copied PTEs (at least 1).
976  */
977 static inline int
978 copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
979 		 pte_t *dst_pte, pte_t *src_pte, pte_t pte, unsigned long addr,
980 		 int max_nr, int *rss, struct folio **prealloc)
981 {
982 	struct page *page;
983 	struct folio *folio;
984 	bool any_writable;
985 	fpb_t flags = 0;
986 	int err, nr;
987 
988 	page = vm_normal_page(src_vma, addr, pte);
989 	if (unlikely(!page))
990 		goto copy_pte;
991 
992 	folio = page_folio(page);
993 
994 	/*
995 	 * If we likely have to copy, just don't bother with batching. Make
996 	 * sure that the common "small folio" case is as fast as possible
997 	 * by keeping the batching logic separate.
998 	 */
999 	if (unlikely(!*prealloc && folio_test_large(folio) && max_nr != 1)) {
1000 		if (src_vma->vm_flags & VM_SHARED)
1001 			flags |= FPB_IGNORE_DIRTY;
1002 		if (!vma_soft_dirty_enabled(src_vma))
1003 			flags |= FPB_IGNORE_SOFT_DIRTY;
1004 
1005 		nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr, flags,
1006 				     &any_writable, NULL, NULL);
1007 		folio_ref_add(folio, nr);
1008 		if (folio_test_anon(folio)) {
1009 			if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page,
1010 								  nr, src_vma))) {
1011 				folio_ref_sub(folio, nr);
1012 				return -EAGAIN;
1013 			}
1014 			rss[MM_ANONPAGES] += nr;
1015 			VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio);
1016 		} else {
1017 			folio_dup_file_rmap_ptes(folio, page, nr);
1018 			rss[mm_counter_file(folio)] += nr;
1019 		}
1020 		if (any_writable)
1021 			pte = pte_mkwrite(pte, src_vma);
1022 		__copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte,
1023 				    addr, nr);
1024 		return nr;
1025 	}
1026 
1027 	folio_get(folio);
1028 	if (folio_test_anon(folio)) {
1029 		/*
1030 		 * If this page may have been pinned by the parent process,
1031 		 * copy the page immediately for the child so that we'll always
1032 		 * guarantee the pinned page won't be randomly replaced in the
1033 		 * future.
1034 		 */
1035 		if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, src_vma))) {
1036 			/* Page may be pinned, we have to copy. */
1037 			folio_put(folio);
1038 			err = copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
1039 						addr, rss, prealloc, page);
1040 			return err ? err : 1;
1041 		}
1042 		rss[MM_ANONPAGES]++;
1043 		VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio);
1044 	} else {
1045 		folio_dup_file_rmap_pte(folio, page);
1046 		rss[mm_counter_file(folio)]++;
1047 	}
1048 
1049 copy_pte:
1050 	__copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte, addr, 1);
1051 	return 1;
1052 }
1053 
1054 static inline struct folio *folio_prealloc(struct mm_struct *src_mm,
1055 		struct vm_area_struct *vma, unsigned long addr, bool need_zero)
1056 {
1057 	struct folio *new_folio;
1058 
1059 	if (need_zero)
1060 		new_folio = vma_alloc_zeroed_movable_folio(vma, addr);
1061 	else
1062 		new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
1063 					    addr, false);
1064 
1065 	if (!new_folio)
1066 		return NULL;
1067 
1068 	if (mem_cgroup_charge(new_folio, src_mm, GFP_KERNEL)) {
1069 		folio_put(new_folio);
1070 		return NULL;
1071 	}
1072 	folio_throttle_swaprate(new_folio, GFP_KERNEL);
1073 
1074 	return new_folio;
1075 }
1076 
1077 static int
1078 copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1079 	       pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1080 	       unsigned long end)
1081 {
1082 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1083 	struct mm_struct *src_mm = src_vma->vm_mm;
1084 	pte_t *orig_src_pte, *orig_dst_pte;
1085 	pte_t *src_pte, *dst_pte;
1086 	pte_t ptent;
1087 	spinlock_t *src_ptl, *dst_ptl;
1088 	int progress, max_nr, ret = 0;
1089 	int rss[NR_MM_COUNTERS];
1090 	swp_entry_t entry = (swp_entry_t){0};
1091 	struct folio *prealloc = NULL;
1092 	int nr;
1093 
1094 again:
1095 	progress = 0;
1096 	init_rss_vec(rss);
1097 
1098 	/*
1099 	 * copy_pmd_range()'s prior pmd_none_or_clear_bad(src_pmd), and the
1100 	 * error handling here, assume that exclusive mmap_lock on dst and src
1101 	 * protects anon from unexpected THP transitions; with shmem and file
1102 	 * protected by mmap_lock-less collapse skipping areas with anon_vma
1103 	 * (whereas vma_needs_copy() skips areas without anon_vma).  A rework
1104 	 * can remove such assumptions later, but this is good enough for now.
1105 	 */
1106 	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
1107 	if (!dst_pte) {
1108 		ret = -ENOMEM;
1109 		goto out;
1110 	}
1111 	src_pte = pte_offset_map_nolock(src_mm, src_pmd, addr, &src_ptl);
1112 	if (!src_pte) {
1113 		pte_unmap_unlock(dst_pte, dst_ptl);
1114 		/* ret == 0 */
1115 		goto out;
1116 	}
1117 	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1118 	orig_src_pte = src_pte;
1119 	orig_dst_pte = dst_pte;
1120 	arch_enter_lazy_mmu_mode();
1121 
1122 	do {
1123 		nr = 1;
1124 
1125 		/*
1126 		 * We are holding two locks at this point - either of them
1127 		 * could generate latencies in another task on another CPU.
1128 		 */
1129 		if (progress >= 32) {
1130 			progress = 0;
1131 			if (need_resched() ||
1132 			    spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
1133 				break;
1134 		}
1135 		ptent = ptep_get(src_pte);
1136 		if (pte_none(ptent)) {
1137 			progress++;
1138 			continue;
1139 		}
1140 		if (unlikely(!pte_present(ptent))) {
1141 			ret = copy_nonpresent_pte(dst_mm, src_mm,
1142 						  dst_pte, src_pte,
1143 						  dst_vma, src_vma,
1144 						  addr, rss);
1145 			if (ret == -EIO) {
1146 				entry = pte_to_swp_entry(ptep_get(src_pte));
1147 				break;
1148 			} else if (ret == -EBUSY) {
1149 				break;
1150 			} else if (!ret) {
1151 				progress += 8;
1152 				continue;
1153 			}
1154 			ptent = ptep_get(src_pte);
1155 			VM_WARN_ON_ONCE(!pte_present(ptent));
1156 
1157 			/*
1158 			 * Device exclusive entry restored, continue by copying
1159 			 * the now present pte.
1160 			 */
1161 			WARN_ON_ONCE(ret != -ENOENT);
1162 		}
1163 		/* copy_present_ptes() will clear `*prealloc' if consumed */
1164 		max_nr = (end - addr) / PAGE_SIZE;
1165 		ret = copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte,
1166 					ptent, addr, max_nr, rss, &prealloc);
1167 		/*
1168 		 * If we need a pre-allocated page for this pte, drop the
1169 		 * locks, allocate, and try again.
1170 		 */
1171 		if (unlikely(ret == -EAGAIN))
1172 			break;
1173 		if (unlikely(prealloc)) {
1174 			/*
1175 			 * pre-alloc page cannot be reused by next time so as
1176 			 * to strictly follow mempolicy (e.g., alloc_page_vma()
1177 			 * will allocate page according to address).  This
1178 			 * could only happen if one pinned pte changed.
1179 			 */
1180 			folio_put(prealloc);
1181 			prealloc = NULL;
1182 		}
1183 		nr = ret;
1184 		progress += 8 * nr;
1185 	} while (dst_pte += nr, src_pte += nr, addr += PAGE_SIZE * nr,
1186 		 addr != end);
1187 
1188 	arch_leave_lazy_mmu_mode();
1189 	pte_unmap_unlock(orig_src_pte, src_ptl);
1190 	add_mm_rss_vec(dst_mm, rss);
1191 	pte_unmap_unlock(orig_dst_pte, dst_ptl);
1192 	cond_resched();
1193 
1194 	if (ret == -EIO) {
1195 		VM_WARN_ON_ONCE(!entry.val);
1196 		if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) {
1197 			ret = -ENOMEM;
1198 			goto out;
1199 		}
1200 		entry.val = 0;
1201 	} else if (ret == -EBUSY) {
1202 		goto out;
1203 	} else if (ret ==  -EAGAIN) {
1204 		prealloc = folio_prealloc(src_mm, src_vma, addr, false);
1205 		if (!prealloc)
1206 			return -ENOMEM;
1207 	} else if (ret < 0) {
1208 		VM_WARN_ON_ONCE(1);
1209 	}
1210 
1211 	/* We've captured and resolved the error. Reset, try again. */
1212 	ret = 0;
1213 
1214 	if (addr != end)
1215 		goto again;
1216 out:
1217 	if (unlikely(prealloc))
1218 		folio_put(prealloc);
1219 	return ret;
1220 }
1221 
1222 static inline int
1223 copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1224 	       pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1225 	       unsigned long end)
1226 {
1227 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1228 	struct mm_struct *src_mm = src_vma->vm_mm;
1229 	pmd_t *src_pmd, *dst_pmd;
1230 	unsigned long next;
1231 
1232 	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
1233 	if (!dst_pmd)
1234 		return -ENOMEM;
1235 	src_pmd = pmd_offset(src_pud, addr);
1236 	do {
1237 		next = pmd_addr_end(addr, end);
1238 		if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
1239 			|| pmd_devmap(*src_pmd)) {
1240 			int err;
1241 			VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
1242 			err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd,
1243 					    addr, dst_vma, src_vma);
1244 			if (err == -ENOMEM)
1245 				return -ENOMEM;
1246 			if (!err)
1247 				continue;
1248 			/* fall through */
1249 		}
1250 		if (pmd_none_or_clear_bad(src_pmd))
1251 			continue;
1252 		if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd,
1253 				   addr, next))
1254 			return -ENOMEM;
1255 	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
1256 	return 0;
1257 }
1258 
1259 static inline int
1260 copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1261 	       p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr,
1262 	       unsigned long end)
1263 {
1264 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1265 	struct mm_struct *src_mm = src_vma->vm_mm;
1266 	pud_t *src_pud, *dst_pud;
1267 	unsigned long next;
1268 
1269 	dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
1270 	if (!dst_pud)
1271 		return -ENOMEM;
1272 	src_pud = pud_offset(src_p4d, addr);
1273 	do {
1274 		next = pud_addr_end(addr, end);
1275 		if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
1276 			int err;
1277 
1278 			VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma);
1279 			err = copy_huge_pud(dst_mm, src_mm,
1280 					    dst_pud, src_pud, addr, src_vma);
1281 			if (err == -ENOMEM)
1282 				return -ENOMEM;
1283 			if (!err)
1284 				continue;
1285 			/* fall through */
1286 		}
1287 		if (pud_none_or_clear_bad(src_pud))
1288 			continue;
1289 		if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud,
1290 				   addr, next))
1291 			return -ENOMEM;
1292 	} while (dst_pud++, src_pud++, addr = next, addr != end);
1293 	return 0;
1294 }
1295 
1296 static inline int
1297 copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1298 	       pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr,
1299 	       unsigned long end)
1300 {
1301 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1302 	p4d_t *src_p4d, *dst_p4d;
1303 	unsigned long next;
1304 
1305 	dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
1306 	if (!dst_p4d)
1307 		return -ENOMEM;
1308 	src_p4d = p4d_offset(src_pgd, addr);
1309 	do {
1310 		next = p4d_addr_end(addr, end);
1311 		if (p4d_none_or_clear_bad(src_p4d))
1312 			continue;
1313 		if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d,
1314 				   addr, next))
1315 			return -ENOMEM;
1316 	} while (dst_p4d++, src_p4d++, addr = next, addr != end);
1317 	return 0;
1318 }
1319 
1320 /*
1321  * Return true if the vma needs to copy the pgtable during this fork().  Return
1322  * false when we can speed up fork() by allowing lazy page faults later until
1323  * when the child accesses the memory range.
1324  */
1325 static bool
1326 vma_needs_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1327 {
1328 	/*
1329 	 * Always copy pgtables when dst_vma has uffd-wp enabled even if it's
1330 	 * file-backed (e.g. shmem). Because when uffd-wp is enabled, pgtable
1331 	 * contains uffd-wp protection information, that's something we can't
1332 	 * retrieve from page cache, and skip copying will lose those info.
1333 	 */
1334 	if (userfaultfd_wp(dst_vma))
1335 		return true;
1336 
1337 	if (src_vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
1338 		return true;
1339 
1340 	if (src_vma->anon_vma)
1341 		return true;
1342 
1343 	/*
1344 	 * Don't copy ptes where a page fault will fill them correctly.  Fork
1345 	 * becomes much lighter when there are big shared or private readonly
1346 	 * mappings. The tradeoff is that copy_page_range is more efficient
1347 	 * than faulting.
1348 	 */
1349 	return false;
1350 }
1351 
1352 int
1353 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1354 {
1355 	pgd_t *src_pgd, *dst_pgd;
1356 	unsigned long next;
1357 	unsigned long addr = src_vma->vm_start;
1358 	unsigned long end = src_vma->vm_end;
1359 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1360 	struct mm_struct *src_mm = src_vma->vm_mm;
1361 	struct mmu_notifier_range range;
1362 	bool is_cow;
1363 	int ret;
1364 
1365 	if (!vma_needs_copy(dst_vma, src_vma))
1366 		return 0;
1367 
1368 	if (is_vm_hugetlb_page(src_vma))
1369 		return copy_hugetlb_page_range(dst_mm, src_mm, dst_vma, src_vma);
1370 
1371 	if (unlikely(src_vma->vm_flags & VM_PFNMAP)) {
1372 		/*
1373 		 * We do not free on error cases below as remove_vma
1374 		 * gets called on error from higher level routine
1375 		 */
1376 		ret = track_pfn_copy(src_vma);
1377 		if (ret)
1378 			return ret;
1379 	}
1380 
1381 	/*
1382 	 * We need to invalidate the secondary MMU mappings only when
1383 	 * there could be a permission downgrade on the ptes of the
1384 	 * parent mm. And a permission downgrade will only happen if
1385 	 * is_cow_mapping() returns true.
1386 	 */
1387 	is_cow = is_cow_mapping(src_vma->vm_flags);
1388 
1389 	if (is_cow) {
1390 		mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
1391 					0, src_mm, addr, end);
1392 		mmu_notifier_invalidate_range_start(&range);
1393 		/*
1394 		 * Disabling preemption is not needed for the write side, as
1395 		 * the read side doesn't spin, but goes to the mmap_lock.
1396 		 *
1397 		 * Use the raw variant of the seqcount_t write API to avoid
1398 		 * lockdep complaining about preemptibility.
1399 		 */
1400 		vma_assert_write_locked(src_vma);
1401 		raw_write_seqcount_begin(&src_mm->write_protect_seq);
1402 	}
1403 
1404 	ret = 0;
1405 	dst_pgd = pgd_offset(dst_mm, addr);
1406 	src_pgd = pgd_offset(src_mm, addr);
1407 	do {
1408 		next = pgd_addr_end(addr, end);
1409 		if (pgd_none_or_clear_bad(src_pgd))
1410 			continue;
1411 		if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,
1412 					    addr, next))) {
1413 			untrack_pfn_clear(dst_vma);
1414 			ret = -ENOMEM;
1415 			break;
1416 		}
1417 	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
1418 
1419 	if (is_cow) {
1420 		raw_write_seqcount_end(&src_mm->write_protect_seq);
1421 		mmu_notifier_invalidate_range_end(&range);
1422 	}
1423 	return ret;
1424 }
1425 
1426 /* Whether we should zap all COWed (private) pages too */
1427 static inline bool should_zap_cows(struct zap_details *details)
1428 {
1429 	/* By default, zap all pages */
1430 	if (!details)
1431 		return true;
1432 
1433 	/* Or, we zap COWed pages only if the caller wants to */
1434 	return details->even_cows;
1435 }
1436 
1437 /* Decides whether we should zap this folio with the folio pointer specified */
1438 static inline bool should_zap_folio(struct zap_details *details,
1439 				    struct folio *folio)
1440 {
1441 	/* If we can make a decision without *folio.. */
1442 	if (should_zap_cows(details))
1443 		return true;
1444 
1445 	/* Otherwise we should only zap non-anon folios */
1446 	return !folio_test_anon(folio);
1447 }
1448 
1449 static inline bool zap_drop_file_uffd_wp(struct zap_details *details)
1450 {
1451 	if (!details)
1452 		return false;
1453 
1454 	return details->zap_flags & ZAP_FLAG_DROP_MARKER;
1455 }
1456 
1457 /*
1458  * This function makes sure that we'll replace the none pte with an uffd-wp
1459  * swap special pte marker when necessary. Must be with the pgtable lock held.
1460  */
1461 static inline void
1462 zap_install_uffd_wp_if_needed(struct vm_area_struct *vma,
1463 			      unsigned long addr, pte_t *pte, int nr,
1464 			      struct zap_details *details, pte_t pteval)
1465 {
1466 	/* Zap on anonymous always means dropping everything */
1467 	if (vma_is_anonymous(vma))
1468 		return;
1469 
1470 	if (zap_drop_file_uffd_wp(details))
1471 		return;
1472 
1473 	for (;;) {
1474 		/* the PFN in the PTE is irrelevant. */
1475 		pte_install_uffd_wp_if_needed(vma, addr, pte, pteval);
1476 		if (--nr == 0)
1477 			break;
1478 		pte++;
1479 		addr += PAGE_SIZE;
1480 	}
1481 }
1482 
1483 static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb,
1484 		struct vm_area_struct *vma, struct folio *folio,
1485 		struct page *page, pte_t *pte, pte_t ptent, unsigned int nr,
1486 		unsigned long addr, struct zap_details *details, int *rss,
1487 		bool *force_flush, bool *force_break)
1488 {
1489 	struct mm_struct *mm = tlb->mm;
1490 	bool delay_rmap = false;
1491 
1492 	if (!folio_test_anon(folio)) {
1493 		ptent = get_and_clear_full_ptes(mm, addr, pte, nr, tlb->fullmm);
1494 		if (pte_dirty(ptent)) {
1495 			folio_mark_dirty(folio);
1496 			if (tlb_delay_rmap(tlb)) {
1497 				delay_rmap = true;
1498 				*force_flush = true;
1499 			}
1500 		}
1501 		if (pte_young(ptent) && likely(vma_has_recency(vma)))
1502 			folio_mark_accessed(folio);
1503 		rss[mm_counter(folio)] -= nr;
1504 	} else {
1505 		/* We don't need up-to-date accessed/dirty bits. */
1506 		clear_full_ptes(mm, addr, pte, nr, tlb->fullmm);
1507 		rss[MM_ANONPAGES] -= nr;
1508 	}
1509 	/* Checking a single PTE in a batch is sufficient. */
1510 	arch_check_zapped_pte(vma, ptent);
1511 	tlb_remove_tlb_entries(tlb, pte, nr, addr);
1512 	if (unlikely(userfaultfd_pte_wp(vma, ptent)))
1513 		zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details,
1514 					      ptent);
1515 
1516 	if (!delay_rmap) {
1517 		folio_remove_rmap_ptes(folio, page, nr, vma);
1518 
1519 		if (unlikely(folio_mapcount(folio) < 0))
1520 			print_bad_pte(vma, addr, ptent, page);
1521 	}
1522 	if (unlikely(__tlb_remove_folio_pages(tlb, page, nr, delay_rmap))) {
1523 		*force_flush = true;
1524 		*force_break = true;
1525 	}
1526 }
1527 
1528 /*
1529  * Zap or skip at least one present PTE, trying to batch-process subsequent
1530  * PTEs that map consecutive pages of the same folio.
1531  *
1532  * Returns the number of processed (skipped or zapped) PTEs (at least 1).
1533  */
1534 static inline int zap_present_ptes(struct mmu_gather *tlb,
1535 		struct vm_area_struct *vma, pte_t *pte, pte_t ptent,
1536 		unsigned int max_nr, unsigned long addr,
1537 		struct zap_details *details, int *rss, bool *force_flush,
1538 		bool *force_break)
1539 {
1540 	const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY;
1541 	struct mm_struct *mm = tlb->mm;
1542 	struct folio *folio;
1543 	struct page *page;
1544 	int nr;
1545 
1546 	page = vm_normal_page(vma, addr, ptent);
1547 	if (!page) {
1548 		/* We don't need up-to-date accessed/dirty bits. */
1549 		ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm);
1550 		arch_check_zapped_pte(vma, ptent);
1551 		tlb_remove_tlb_entry(tlb, pte, addr);
1552 		if (userfaultfd_pte_wp(vma, ptent))
1553 			zap_install_uffd_wp_if_needed(vma, addr, pte, 1,
1554 						      details, ptent);
1555 		ksm_might_unmap_zero_page(mm, ptent);
1556 		return 1;
1557 	}
1558 
1559 	folio = page_folio(page);
1560 	if (unlikely(!should_zap_folio(details, folio)))
1561 		return 1;
1562 
1563 	/*
1564 	 * Make sure that the common "small folio" case is as fast as possible
1565 	 * by keeping the batching logic separate.
1566 	 */
1567 	if (unlikely(folio_test_large(folio) && max_nr != 1)) {
1568 		nr = folio_pte_batch(folio, addr, pte, ptent, max_nr, fpb_flags,
1569 				     NULL, NULL, NULL);
1570 
1571 		zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr,
1572 				       addr, details, rss, force_flush,
1573 				       force_break);
1574 		return nr;
1575 	}
1576 	zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, 1, addr,
1577 			       details, rss, force_flush, force_break);
1578 	return 1;
1579 }
1580 
1581 static unsigned long zap_pte_range(struct mmu_gather *tlb,
1582 				struct vm_area_struct *vma, pmd_t *pmd,
1583 				unsigned long addr, unsigned long end,
1584 				struct zap_details *details)
1585 {
1586 	bool force_flush = false, force_break = false;
1587 	struct mm_struct *mm = tlb->mm;
1588 	int rss[NR_MM_COUNTERS];
1589 	spinlock_t *ptl;
1590 	pte_t *start_pte;
1591 	pte_t *pte;
1592 	swp_entry_t entry;
1593 	int nr;
1594 
1595 	tlb_change_page_size(tlb, PAGE_SIZE);
1596 	init_rss_vec(rss);
1597 	start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1598 	if (!pte)
1599 		return addr;
1600 
1601 	flush_tlb_batched_pending(mm);
1602 	arch_enter_lazy_mmu_mode();
1603 	do {
1604 		pte_t ptent = ptep_get(pte);
1605 		struct folio *folio;
1606 		struct page *page;
1607 		int max_nr;
1608 
1609 		nr = 1;
1610 		if (pte_none(ptent))
1611 			continue;
1612 
1613 		if (need_resched())
1614 			break;
1615 
1616 		if (pte_present(ptent)) {
1617 			max_nr = (end - addr) / PAGE_SIZE;
1618 			nr = zap_present_ptes(tlb, vma, pte, ptent, max_nr,
1619 					      addr, details, rss, &force_flush,
1620 					      &force_break);
1621 			if (unlikely(force_break)) {
1622 				addr += nr * PAGE_SIZE;
1623 				break;
1624 			}
1625 			continue;
1626 		}
1627 
1628 		entry = pte_to_swp_entry(ptent);
1629 		if (is_device_private_entry(entry) ||
1630 		    is_device_exclusive_entry(entry)) {
1631 			page = pfn_swap_entry_to_page(entry);
1632 			folio = page_folio(page);
1633 			if (unlikely(!should_zap_folio(details, folio)))
1634 				continue;
1635 			/*
1636 			 * Both device private/exclusive mappings should only
1637 			 * work with anonymous page so far, so we don't need to
1638 			 * consider uffd-wp bit when zap. For more information,
1639 			 * see zap_install_uffd_wp_if_needed().
1640 			 */
1641 			WARN_ON_ONCE(!vma_is_anonymous(vma));
1642 			rss[mm_counter(folio)]--;
1643 			if (is_device_private_entry(entry))
1644 				folio_remove_rmap_pte(folio, page, vma);
1645 			folio_put(folio);
1646 		} else if (!non_swap_entry(entry)) {
1647 			max_nr = (end - addr) / PAGE_SIZE;
1648 			nr = swap_pte_batch(pte, max_nr, ptent);
1649 			/* Genuine swap entries, hence a private anon pages */
1650 			if (!should_zap_cows(details))
1651 				continue;
1652 			rss[MM_SWAPENTS] -= nr;
1653 			free_swap_and_cache_nr(entry, nr);
1654 		} else if (is_migration_entry(entry)) {
1655 			folio = pfn_swap_entry_folio(entry);
1656 			if (!should_zap_folio(details, folio))
1657 				continue;
1658 			rss[mm_counter(folio)]--;
1659 		} else if (pte_marker_entry_uffd_wp(entry)) {
1660 			/*
1661 			 * For anon: always drop the marker; for file: only
1662 			 * drop the marker if explicitly requested.
1663 			 */
1664 			if (!vma_is_anonymous(vma) &&
1665 			    !zap_drop_file_uffd_wp(details))
1666 				continue;
1667 		} else if (is_hwpoison_entry(entry) ||
1668 			   is_poisoned_swp_entry(entry)) {
1669 			if (!should_zap_cows(details))
1670 				continue;
1671 		} else {
1672 			/* We should have covered all the swap entry types */
1673 			pr_alert("unrecognized swap entry 0x%lx\n", entry.val);
1674 			WARN_ON_ONCE(1);
1675 		}
1676 		clear_not_present_full_ptes(mm, addr, pte, nr, tlb->fullmm);
1677 		zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, ptent);
1678 	} while (pte += nr, addr += PAGE_SIZE * nr, addr != end);
1679 
1680 	add_mm_rss_vec(mm, rss);
1681 	arch_leave_lazy_mmu_mode();
1682 
1683 	/* Do the actual TLB flush before dropping ptl */
1684 	if (force_flush) {
1685 		tlb_flush_mmu_tlbonly(tlb);
1686 		tlb_flush_rmaps(tlb, vma);
1687 	}
1688 	pte_unmap_unlock(start_pte, ptl);
1689 
1690 	/*
1691 	 * If we forced a TLB flush (either due to running out of
1692 	 * batch buffers or because we needed to flush dirty TLB
1693 	 * entries before releasing the ptl), free the batched
1694 	 * memory too. Come back again if we didn't do everything.
1695 	 */
1696 	if (force_flush)
1697 		tlb_flush_mmu(tlb);
1698 
1699 	return addr;
1700 }
1701 
1702 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1703 				struct vm_area_struct *vma, pud_t *pud,
1704 				unsigned long addr, unsigned long end,
1705 				struct zap_details *details)
1706 {
1707 	pmd_t *pmd;
1708 	unsigned long next;
1709 
1710 	pmd = pmd_offset(pud, addr);
1711 	do {
1712 		next = pmd_addr_end(addr, end);
1713 		if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
1714 			if (next - addr != HPAGE_PMD_SIZE)
1715 				__split_huge_pmd(vma, pmd, addr, false, NULL);
1716 			else if (zap_huge_pmd(tlb, vma, pmd, addr)) {
1717 				addr = next;
1718 				continue;
1719 			}
1720 			/* fall through */
1721 		} else if (details && details->single_folio &&
1722 			   folio_test_pmd_mappable(details->single_folio) &&
1723 			   next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
1724 			spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
1725 			/*
1726 			 * Take and drop THP pmd lock so that we cannot return
1727 			 * prematurely, while zap_huge_pmd() has cleared *pmd,
1728 			 * but not yet decremented compound_mapcount().
1729 			 */
1730 			spin_unlock(ptl);
1731 		}
1732 		if (pmd_none(*pmd)) {
1733 			addr = next;
1734 			continue;
1735 		}
1736 		addr = zap_pte_range(tlb, vma, pmd, addr, next, details);
1737 		if (addr != next)
1738 			pmd--;
1739 	} while (pmd++, cond_resched(), addr != end);
1740 
1741 	return addr;
1742 }
1743 
1744 static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1745 				struct vm_area_struct *vma, p4d_t *p4d,
1746 				unsigned long addr, unsigned long end,
1747 				struct zap_details *details)
1748 {
1749 	pud_t *pud;
1750 	unsigned long next;
1751 
1752 	pud = pud_offset(p4d, addr);
1753 	do {
1754 		next = pud_addr_end(addr, end);
1755 		if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
1756 			if (next - addr != HPAGE_PUD_SIZE) {
1757 				mmap_assert_locked(tlb->mm);
1758 				split_huge_pud(vma, pud, addr);
1759 			} else if (zap_huge_pud(tlb, vma, pud, addr))
1760 				goto next;
1761 			/* fall through */
1762 		}
1763 		if (pud_none_or_clear_bad(pud))
1764 			continue;
1765 		next = zap_pmd_range(tlb, vma, pud, addr, next, details);
1766 next:
1767 		cond_resched();
1768 	} while (pud++, addr = next, addr != end);
1769 
1770 	return addr;
1771 }
1772 
1773 static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
1774 				struct vm_area_struct *vma, pgd_t *pgd,
1775 				unsigned long addr, unsigned long end,
1776 				struct zap_details *details)
1777 {
1778 	p4d_t *p4d;
1779 	unsigned long next;
1780 
1781 	p4d = p4d_offset(pgd, addr);
1782 	do {
1783 		next = p4d_addr_end(addr, end);
1784 		if (p4d_none_or_clear_bad(p4d))
1785 			continue;
1786 		next = zap_pud_range(tlb, vma, p4d, addr, next, details);
1787 	} while (p4d++, addr = next, addr != end);
1788 
1789 	return addr;
1790 }
1791 
1792 void unmap_page_range(struct mmu_gather *tlb,
1793 			     struct vm_area_struct *vma,
1794 			     unsigned long addr, unsigned long end,
1795 			     struct zap_details *details)
1796 {
1797 	pgd_t *pgd;
1798 	unsigned long next;
1799 
1800 	BUG_ON(addr >= end);
1801 	tlb_start_vma(tlb, vma);
1802 	pgd = pgd_offset(vma->vm_mm, addr);
1803 	do {
1804 		next = pgd_addr_end(addr, end);
1805 		if (pgd_none_or_clear_bad(pgd))
1806 			continue;
1807 		next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
1808 	} while (pgd++, addr = next, addr != end);
1809 	tlb_end_vma(tlb, vma);
1810 }
1811 
1812 
1813 static void unmap_single_vma(struct mmu_gather *tlb,
1814 		struct vm_area_struct *vma, unsigned long start_addr,
1815 		unsigned long end_addr,
1816 		struct zap_details *details, bool mm_wr_locked)
1817 {
1818 	unsigned long start = max(vma->vm_start, start_addr);
1819 	unsigned long end;
1820 
1821 	if (start >= vma->vm_end)
1822 		return;
1823 	end = min(vma->vm_end, end_addr);
1824 	if (end <= vma->vm_start)
1825 		return;
1826 
1827 	if (vma->vm_file)
1828 		uprobe_munmap(vma, start, end);
1829 
1830 	if (unlikely(vma->vm_flags & VM_PFNMAP))
1831 		untrack_pfn(vma, 0, 0, mm_wr_locked);
1832 
1833 	if (start != end) {
1834 		if (unlikely(is_vm_hugetlb_page(vma))) {
1835 			/*
1836 			 * It is undesirable to test vma->vm_file as it
1837 			 * should be non-null for valid hugetlb area.
1838 			 * However, vm_file will be NULL in the error
1839 			 * cleanup path of mmap_region. When
1840 			 * hugetlbfs ->mmap method fails,
1841 			 * mmap_region() nullifies vma->vm_file
1842 			 * before calling this function to clean up.
1843 			 * Since no pte has actually been setup, it is
1844 			 * safe to do nothing in this case.
1845 			 */
1846 			if (vma->vm_file) {
1847 				zap_flags_t zap_flags = details ?
1848 				    details->zap_flags : 0;
1849 				__unmap_hugepage_range(tlb, vma, start, end,
1850 							     NULL, zap_flags);
1851 			}
1852 		} else
1853 			unmap_page_range(tlb, vma, start, end, details);
1854 	}
1855 }
1856 
1857 /**
1858  * unmap_vmas - unmap a range of memory covered by a list of vma's
1859  * @tlb: address of the caller's struct mmu_gather
1860  * @mas: the maple state
1861  * @vma: the starting vma
1862  * @start_addr: virtual address at which to start unmapping
1863  * @end_addr: virtual address at which to end unmapping
1864  * @tree_end: The maximum index to check
1865  * @mm_wr_locked: lock flag
1866  *
1867  * Unmap all pages in the vma list.
1868  *
1869  * Only addresses between `start' and `end' will be unmapped.
1870  *
1871  * The VMA list must be sorted in ascending virtual address order.
1872  *
1873  * unmap_vmas() assumes that the caller will flush the whole unmapped address
1874  * range after unmap_vmas() returns.  So the only responsibility here is to
1875  * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1876  * drops the lock and schedules.
1877  */
1878 void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
1879 		struct vm_area_struct *vma, unsigned long start_addr,
1880 		unsigned long end_addr, unsigned long tree_end,
1881 		bool mm_wr_locked)
1882 {
1883 	struct mmu_notifier_range range;
1884 	struct zap_details details = {
1885 		.zap_flags = ZAP_FLAG_DROP_MARKER | ZAP_FLAG_UNMAP,
1886 		/* Careful - we need to zap private pages too! */
1887 		.even_cows = true,
1888 	};
1889 
1890 	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm,
1891 				start_addr, end_addr);
1892 	mmu_notifier_invalidate_range_start(&range);
1893 	do {
1894 		unsigned long start = start_addr;
1895 		unsigned long end = end_addr;
1896 		hugetlb_zap_begin(vma, &start, &end);
1897 		unmap_single_vma(tlb, vma, start, end, &details,
1898 				 mm_wr_locked);
1899 		hugetlb_zap_end(vma, &details);
1900 		vma = mas_find(mas, tree_end - 1);
1901 	} while (vma && likely(!xa_is_zero(vma)));
1902 	mmu_notifier_invalidate_range_end(&range);
1903 }
1904 
1905 /**
1906  * zap_page_range_single - remove user pages in a given range
1907  * @vma: vm_area_struct holding the applicable pages
1908  * @address: starting address of pages to zap
1909  * @size: number of bytes to zap
1910  * @details: details of shared cache invalidation
1911  *
1912  * The range must fit into one VMA.
1913  */
1914 void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
1915 		unsigned long size, struct zap_details *details)
1916 {
1917 	const unsigned long end = address + size;
1918 	struct mmu_notifier_range range;
1919 	struct mmu_gather tlb;
1920 
1921 	lru_add_drain();
1922 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
1923 				address, end);
1924 	hugetlb_zap_begin(vma, &range.start, &range.end);
1925 	tlb_gather_mmu(&tlb, vma->vm_mm);
1926 	update_hiwater_rss(vma->vm_mm);
1927 	mmu_notifier_invalidate_range_start(&range);
1928 	/*
1929 	 * unmap 'address-end' not 'range.start-range.end' as range
1930 	 * could have been expanded for hugetlb pmd sharing.
1931 	 */
1932 	unmap_single_vma(&tlb, vma, address, end, details, false);
1933 	mmu_notifier_invalidate_range_end(&range);
1934 	tlb_finish_mmu(&tlb);
1935 	hugetlb_zap_end(vma, details);
1936 }
1937 
1938 /**
1939  * zap_vma_ptes - remove ptes mapping the vma
1940  * @vma: vm_area_struct holding ptes to be zapped
1941  * @address: starting address of pages to zap
1942  * @size: number of bytes to zap
1943  *
1944  * This function only unmaps ptes assigned to VM_PFNMAP vmas.
1945  *
1946  * The entire address range must be fully contained within the vma.
1947  *
1948  */
1949 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1950 		unsigned long size)
1951 {
1952 	if (!range_in_vma(vma, address, address + size) ||
1953 	    		!(vma->vm_flags & VM_PFNMAP))
1954 		return;
1955 
1956 	zap_page_range_single(vma, address, size, NULL);
1957 }
1958 EXPORT_SYMBOL_GPL(zap_vma_ptes);
1959 
1960 static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
1961 {
1962 	pgd_t *pgd;
1963 	p4d_t *p4d;
1964 	pud_t *pud;
1965 	pmd_t *pmd;
1966 
1967 	pgd = pgd_offset(mm, addr);
1968 	p4d = p4d_alloc(mm, pgd, addr);
1969 	if (!p4d)
1970 		return NULL;
1971 	pud = pud_alloc(mm, p4d, addr);
1972 	if (!pud)
1973 		return NULL;
1974 	pmd = pmd_alloc(mm, pud, addr);
1975 	if (!pmd)
1976 		return NULL;
1977 
1978 	VM_BUG_ON(pmd_trans_huge(*pmd));
1979 	return pmd;
1980 }
1981 
1982 pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1983 			spinlock_t **ptl)
1984 {
1985 	pmd_t *pmd = walk_to_pmd(mm, addr);
1986 
1987 	if (!pmd)
1988 		return NULL;
1989 	return pte_alloc_map_lock(mm, pmd, addr, ptl);
1990 }
1991 
1992 static bool vm_mixed_zeropage_allowed(struct vm_area_struct *vma)
1993 {
1994 	VM_WARN_ON_ONCE(vma->vm_flags & VM_PFNMAP);
1995 	/*
1996 	 * Whoever wants to forbid the zeropage after some zeropages
1997 	 * might already have been mapped has to scan the page tables and
1998 	 * bail out on any zeropages. Zeropages in COW mappings can
1999 	 * be unshared using FAULT_FLAG_UNSHARE faults.
2000 	 */
2001 	if (mm_forbids_zeropage(vma->vm_mm))
2002 		return false;
2003 	/* zeropages in COW mappings are common and unproblematic. */
2004 	if (is_cow_mapping(vma->vm_flags))
2005 		return true;
2006 	/* Mappings that do not allow for writable PTEs are unproblematic. */
2007 	if (!(vma->vm_flags & (VM_WRITE | VM_MAYWRITE)))
2008 		return true;
2009 	/*
2010 	 * Why not allow any VMA that has vm_ops->pfn_mkwrite? GUP could
2011 	 * find the shared zeropage and longterm-pin it, which would
2012 	 * be problematic as soon as the zeropage gets replaced by a different
2013 	 * page due to vma->vm_ops->pfn_mkwrite, because what's mapped would
2014 	 * now differ to what GUP looked up. FSDAX is incompatible to
2015 	 * FOLL_LONGTERM and VM_IO is incompatible to GUP completely (see
2016 	 * check_vma_flags).
2017 	 */
2018 	return vma->vm_ops && vma->vm_ops->pfn_mkwrite &&
2019 	       (vma_is_fsdax(vma) || vma->vm_flags & VM_IO);
2020 }
2021 
2022 static int validate_page_before_insert(struct vm_area_struct *vma,
2023 				       struct page *page)
2024 {
2025 	struct folio *folio = page_folio(page);
2026 
2027 	if (!folio_ref_count(folio))
2028 		return -EINVAL;
2029 	if (unlikely(is_zero_folio(folio))) {
2030 		if (!vm_mixed_zeropage_allowed(vma))
2031 			return -EINVAL;
2032 		return 0;
2033 	}
2034 	if (folio_test_anon(folio) || folio_test_slab(folio) ||
2035 	    page_has_type(page))
2036 		return -EINVAL;
2037 	flush_dcache_folio(folio);
2038 	return 0;
2039 }
2040 
2041 static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte,
2042 			unsigned long addr, struct page *page, pgprot_t prot)
2043 {
2044 	struct folio *folio = page_folio(page);
2045 	pte_t pteval;
2046 
2047 	if (!pte_none(ptep_get(pte)))
2048 		return -EBUSY;
2049 	/* Ok, finally just insert the thing.. */
2050 	pteval = mk_pte(page, prot);
2051 	if (unlikely(is_zero_folio(folio))) {
2052 		pteval = pte_mkspecial(pteval);
2053 	} else {
2054 		folio_get(folio);
2055 		inc_mm_counter(vma->vm_mm, mm_counter_file(folio));
2056 		folio_add_file_rmap_pte(folio, page, vma);
2057 	}
2058 	set_pte_at(vma->vm_mm, addr, pte, pteval);
2059 	return 0;
2060 }
2061 
2062 static int insert_page(struct vm_area_struct *vma, unsigned long addr,
2063 			struct page *page, pgprot_t prot)
2064 {
2065 	int retval;
2066 	pte_t *pte;
2067 	spinlock_t *ptl;
2068 
2069 	retval = validate_page_before_insert(vma, page);
2070 	if (retval)
2071 		goto out;
2072 	retval = -ENOMEM;
2073 	pte = get_locked_pte(vma->vm_mm, addr, &ptl);
2074 	if (!pte)
2075 		goto out;
2076 	retval = insert_page_into_pte_locked(vma, pte, addr, page, prot);
2077 	pte_unmap_unlock(pte, ptl);
2078 out:
2079 	return retval;
2080 }
2081 
2082 static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte,
2083 			unsigned long addr, struct page *page, pgprot_t prot)
2084 {
2085 	int err;
2086 
2087 	err = validate_page_before_insert(vma, page);
2088 	if (err)
2089 		return err;
2090 	return insert_page_into_pte_locked(vma, pte, addr, page, prot);
2091 }
2092 
2093 /* insert_pages() amortizes the cost of spinlock operations
2094  * when inserting pages in a loop.
2095  */
2096 static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
2097 			struct page **pages, unsigned long *num, pgprot_t prot)
2098 {
2099 	pmd_t *pmd = NULL;
2100 	pte_t *start_pte, *pte;
2101 	spinlock_t *pte_lock;
2102 	struct mm_struct *const mm = vma->vm_mm;
2103 	unsigned long curr_page_idx = 0;
2104 	unsigned long remaining_pages_total = *num;
2105 	unsigned long pages_to_write_in_pmd;
2106 	int ret;
2107 more:
2108 	ret = -EFAULT;
2109 	pmd = walk_to_pmd(mm, addr);
2110 	if (!pmd)
2111 		goto out;
2112 
2113 	pages_to_write_in_pmd = min_t(unsigned long,
2114 		remaining_pages_total, PTRS_PER_PTE - pte_index(addr));
2115 
2116 	/* Allocate the PTE if necessary; takes PMD lock once only. */
2117 	ret = -ENOMEM;
2118 	if (pte_alloc(mm, pmd))
2119 		goto out;
2120 
2121 	while (pages_to_write_in_pmd) {
2122 		int pte_idx = 0;
2123 		const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
2124 
2125 		start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
2126 		if (!start_pte) {
2127 			ret = -EFAULT;
2128 			goto out;
2129 		}
2130 		for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
2131 			int err = insert_page_in_batch_locked(vma, pte,
2132 				addr, pages[curr_page_idx], prot);
2133 			if (unlikely(err)) {
2134 				pte_unmap_unlock(start_pte, pte_lock);
2135 				ret = err;
2136 				remaining_pages_total -= pte_idx;
2137 				goto out;
2138 			}
2139 			addr += PAGE_SIZE;
2140 			++curr_page_idx;
2141 		}
2142 		pte_unmap_unlock(start_pte, pte_lock);
2143 		pages_to_write_in_pmd -= batch_size;
2144 		remaining_pages_total -= batch_size;
2145 	}
2146 	if (remaining_pages_total)
2147 		goto more;
2148 	ret = 0;
2149 out:
2150 	*num = remaining_pages_total;
2151 	return ret;
2152 }
2153 
2154 /**
2155  * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
2156  * @vma: user vma to map to
2157  * @addr: target start user address of these pages
2158  * @pages: source kernel pages
2159  * @num: in: number of pages to map. out: number of pages that were *not*
2160  * mapped. (0 means all pages were successfully mapped).
2161  *
2162  * Preferred over vm_insert_page() when inserting multiple pages.
2163  *
2164  * In case of error, we may have mapped a subset of the provided
2165  * pages. It is the caller's responsibility to account for this case.
2166  *
2167  * The same restrictions apply as in vm_insert_page().
2168  */
2169 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
2170 			struct page **pages, unsigned long *num)
2171 {
2172 	const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
2173 
2174 	if (addr < vma->vm_start || end_addr >= vma->vm_end)
2175 		return -EFAULT;
2176 	if (!(vma->vm_flags & VM_MIXEDMAP)) {
2177 		BUG_ON(mmap_read_trylock(vma->vm_mm));
2178 		BUG_ON(vma->vm_flags & VM_PFNMAP);
2179 		vm_flags_set(vma, VM_MIXEDMAP);
2180 	}
2181 	/* Defer page refcount checking till we're about to map that page. */
2182 	return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
2183 }
2184 EXPORT_SYMBOL(vm_insert_pages);
2185 
2186 /**
2187  * vm_insert_page - insert single page into user vma
2188  * @vma: user vma to map to
2189  * @addr: target user address of this page
2190  * @page: source kernel page
2191  *
2192  * This allows drivers to insert individual pages they've allocated
2193  * into a user vma. The zeropage is supported in some VMAs,
2194  * see vm_mixed_zeropage_allowed().
2195  *
2196  * The page has to be a nice clean _individual_ kernel allocation.
2197  * If you allocate a compound page, you need to have marked it as
2198  * such (__GFP_COMP), or manually just split the page up yourself
2199  * (see split_page()).
2200  *
2201  * NOTE! Traditionally this was done with "remap_pfn_range()" which
2202  * took an arbitrary page protection parameter. This doesn't allow
2203  * that. Your vma protection will have to be set up correctly, which
2204  * means that if you want a shared writable mapping, you'd better
2205  * ask for a shared writable mapping!
2206  *
2207  * The page does not need to be reserved.
2208  *
2209  * Usually this function is called from f_op->mmap() handler
2210  * under mm->mmap_lock write-lock, so it can change vma->vm_flags.
2211  * Caller must set VM_MIXEDMAP on vma if it wants to call this
2212  * function from other places, for example from page-fault handler.
2213  *
2214  * Return: %0 on success, negative error code otherwise.
2215  */
2216 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
2217 			struct page *page)
2218 {
2219 	if (addr < vma->vm_start || addr >= vma->vm_end)
2220 		return -EFAULT;
2221 	if (!(vma->vm_flags & VM_MIXEDMAP)) {
2222 		BUG_ON(mmap_read_trylock(vma->vm_mm));
2223 		BUG_ON(vma->vm_flags & VM_PFNMAP);
2224 		vm_flags_set(vma, VM_MIXEDMAP);
2225 	}
2226 	return insert_page(vma, addr, page, vma->vm_page_prot);
2227 }
2228 EXPORT_SYMBOL(vm_insert_page);
2229 
2230 /*
2231  * __vm_map_pages - maps range of kernel pages into user vma
2232  * @vma: user vma to map to
2233  * @pages: pointer to array of source kernel pages
2234  * @num: number of pages in page array
2235  * @offset: user's requested vm_pgoff
2236  *
2237  * This allows drivers to map range of kernel pages into a user vma.
2238  * The zeropage is supported in some VMAs, see
2239  * vm_mixed_zeropage_allowed().
2240  *
2241  * Return: 0 on success and error code otherwise.
2242  */
2243 static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2244 				unsigned long num, unsigned long offset)
2245 {
2246 	unsigned long count = vma_pages(vma);
2247 	unsigned long uaddr = vma->vm_start;
2248 	int ret, i;
2249 
2250 	/* Fail if the user requested offset is beyond the end of the object */
2251 	if (offset >= num)
2252 		return -ENXIO;
2253 
2254 	/* Fail if the user requested size exceeds available object size */
2255 	if (count > num - offset)
2256 		return -ENXIO;
2257 
2258 	for (i = 0; i < count; i++) {
2259 		ret = vm_insert_page(vma, uaddr, pages[offset + i]);
2260 		if (ret < 0)
2261 			return ret;
2262 		uaddr += PAGE_SIZE;
2263 	}
2264 
2265 	return 0;
2266 }
2267 
2268 /**
2269  * vm_map_pages - maps range of kernel pages starts with non zero offset
2270  * @vma: user vma to map to
2271  * @pages: pointer to array of source kernel pages
2272  * @num: number of pages in page array
2273  *
2274  * Maps an object consisting of @num pages, catering for the user's
2275  * requested vm_pgoff
2276  *
2277  * If we fail to insert any page into the vma, the function will return
2278  * immediately leaving any previously inserted pages present.  Callers
2279  * from the mmap handler may immediately return the error as their caller
2280  * will destroy the vma, removing any successfully inserted pages. Other
2281  * callers should make their own arrangements for calling unmap_region().
2282  *
2283  * Context: Process context. Called by mmap handlers.
2284  * Return: 0 on success and error code otherwise.
2285  */
2286 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2287 				unsigned long num)
2288 {
2289 	return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
2290 }
2291 EXPORT_SYMBOL(vm_map_pages);
2292 
2293 /**
2294  * vm_map_pages_zero - map range of kernel pages starts with zero offset
2295  * @vma: user vma to map to
2296  * @pages: pointer to array of source kernel pages
2297  * @num: number of pages in page array
2298  *
2299  * Similar to vm_map_pages(), except that it explicitly sets the offset
2300  * to 0. This function is intended for the drivers that did not consider
2301  * vm_pgoff.
2302  *
2303  * Context: Process context. Called by mmap handlers.
2304  * Return: 0 on success and error code otherwise.
2305  */
2306 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
2307 				unsigned long num)
2308 {
2309 	return __vm_map_pages(vma, pages, num, 0);
2310 }
2311 EXPORT_SYMBOL(vm_map_pages_zero);
2312 
2313 static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2314 			pfn_t pfn, pgprot_t prot, bool mkwrite)
2315 {
2316 	struct mm_struct *mm = vma->vm_mm;
2317 	pte_t *pte, entry;
2318 	spinlock_t *ptl;
2319 
2320 	pte = get_locked_pte(mm, addr, &ptl);
2321 	if (!pte)
2322 		return VM_FAULT_OOM;
2323 	entry = ptep_get(pte);
2324 	if (!pte_none(entry)) {
2325 		if (mkwrite) {
2326 			/*
2327 			 * For read faults on private mappings the PFN passed
2328 			 * in may not match the PFN we have mapped if the
2329 			 * mapped PFN is a writeable COW page.  In the mkwrite
2330 			 * case we are creating a writable PTE for a shared
2331 			 * mapping and we expect the PFNs to match. If they
2332 			 * don't match, we are likely racing with block
2333 			 * allocation and mapping invalidation so just skip the
2334 			 * update.
2335 			 */
2336 			if (pte_pfn(entry) != pfn_t_to_pfn(pfn)) {
2337 				WARN_ON_ONCE(!is_zero_pfn(pte_pfn(entry)));
2338 				goto out_unlock;
2339 			}
2340 			entry = pte_mkyoung(entry);
2341 			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2342 			if (ptep_set_access_flags(vma, addr, pte, entry, 1))
2343 				update_mmu_cache(vma, addr, pte);
2344 		}
2345 		goto out_unlock;
2346 	}
2347 
2348 	/* Ok, finally just insert the thing.. */
2349 	if (pfn_t_devmap(pfn))
2350 		entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
2351 	else
2352 		entry = pte_mkspecial(pfn_t_pte(pfn, prot));
2353 
2354 	if (mkwrite) {
2355 		entry = pte_mkyoung(entry);
2356 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2357 	}
2358 
2359 	set_pte_at(mm, addr, pte, entry);
2360 	update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
2361 
2362 out_unlock:
2363 	pte_unmap_unlock(pte, ptl);
2364 	return VM_FAULT_NOPAGE;
2365 }
2366 
2367 /**
2368  * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
2369  * @vma: user vma to map to
2370  * @addr: target user address of this page
2371  * @pfn: source kernel pfn
2372  * @pgprot: pgprot flags for the inserted page
2373  *
2374  * This is exactly like vmf_insert_pfn(), except that it allows drivers
2375  * to override pgprot on a per-page basis.
2376  *
2377  * This only makes sense for IO mappings, and it makes no sense for
2378  * COW mappings.  In general, using multiple vmas is preferable;
2379  * vmf_insert_pfn_prot should only be used if using multiple VMAs is
2380  * impractical.
2381  *
2382  * pgprot typically only differs from @vma->vm_page_prot when drivers set
2383  * caching- and encryption bits different than those of @vma->vm_page_prot,
2384  * because the caching- or encryption mode may not be known at mmap() time.
2385  *
2386  * This is ok as long as @vma->vm_page_prot is not used by the core vm
2387  * to set caching and encryption bits for those vmas (except for COW pages).
2388  * This is ensured by core vm only modifying these page table entries using
2389  * functions that don't touch caching- or encryption bits, using pte_modify()
2390  * if needed. (See for example mprotect()).
2391  *
2392  * Also when new page-table entries are created, this is only done using the
2393  * fault() callback, and never using the value of vma->vm_page_prot,
2394  * except for page-table entries that point to anonymous pages as the result
2395  * of COW.
2396  *
2397  * Context: Process context.  May allocate using %GFP_KERNEL.
2398  * Return: vm_fault_t value.
2399  */
2400 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2401 			unsigned long pfn, pgprot_t pgprot)
2402 {
2403 	/*
2404 	 * Technically, architectures with pte_special can avoid all these
2405 	 * restrictions (same for remap_pfn_range).  However we would like
2406 	 * consistency in testing and feature parity among all, so we should
2407 	 * try to keep these invariants in place for everybody.
2408 	 */
2409 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
2410 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
2411 						(VM_PFNMAP|VM_MIXEDMAP));
2412 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
2413 	BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
2414 
2415 	if (addr < vma->vm_start || addr >= vma->vm_end)
2416 		return VM_FAULT_SIGBUS;
2417 
2418 	if (!pfn_modify_allowed(pfn, pgprot))
2419 		return VM_FAULT_SIGBUS;
2420 
2421 	track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
2422 
2423 	return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
2424 			false);
2425 }
2426 EXPORT_SYMBOL(vmf_insert_pfn_prot);
2427 
2428 /**
2429  * vmf_insert_pfn - insert single pfn into user vma
2430  * @vma: user vma to map to
2431  * @addr: target user address of this page
2432  * @pfn: source kernel pfn
2433  *
2434  * Similar to vm_insert_page, this allows drivers to insert individual pages
2435  * they've allocated into a user vma. Same comments apply.
2436  *
2437  * This function should only be called from a vm_ops->fault handler, and
2438  * in that case the handler should return the result of this function.
2439  *
2440  * vma cannot be a COW mapping.
2441  *
2442  * As this is called only for pages that do not currently exist, we
2443  * do not need to flush old virtual caches or the TLB.
2444  *
2445  * Context: Process context.  May allocate using %GFP_KERNEL.
2446  * Return: vm_fault_t value.
2447  */
2448 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2449 			unsigned long pfn)
2450 {
2451 	return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
2452 }
2453 EXPORT_SYMBOL(vmf_insert_pfn);
2454 
2455 static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn, bool mkwrite)
2456 {
2457 	if (unlikely(is_zero_pfn(pfn_t_to_pfn(pfn))) &&
2458 	    (mkwrite || !vm_mixed_zeropage_allowed(vma)))
2459 		return false;
2460 	/* these checks mirror the abort conditions in vm_normal_page */
2461 	if (vma->vm_flags & VM_MIXEDMAP)
2462 		return true;
2463 	if (pfn_t_devmap(pfn))
2464 		return true;
2465 	if (pfn_t_special(pfn))
2466 		return true;
2467 	if (is_zero_pfn(pfn_t_to_pfn(pfn)))
2468 		return true;
2469 	return false;
2470 }
2471 
2472 static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
2473 		unsigned long addr, pfn_t pfn, bool mkwrite)
2474 {
2475 	pgprot_t pgprot = vma->vm_page_prot;
2476 	int err;
2477 
2478 	if (!vm_mixed_ok(vma, pfn, mkwrite))
2479 		return VM_FAULT_SIGBUS;
2480 
2481 	if (addr < vma->vm_start || addr >= vma->vm_end)
2482 		return VM_FAULT_SIGBUS;
2483 
2484 	track_pfn_insert(vma, &pgprot, pfn);
2485 
2486 	if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
2487 		return VM_FAULT_SIGBUS;
2488 
2489 	/*
2490 	 * If we don't have pte special, then we have to use the pfn_valid()
2491 	 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
2492 	 * refcount the page if pfn_valid is true (hence insert_page rather
2493 	 * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
2494 	 * without pte special, it would there be refcounted as a normal page.
2495 	 */
2496 	if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
2497 	    !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
2498 		struct page *page;
2499 
2500 		/*
2501 		 * At this point we are committed to insert_page()
2502 		 * regardless of whether the caller specified flags that
2503 		 * result in pfn_t_has_page() == false.
2504 		 */
2505 		page = pfn_to_page(pfn_t_to_pfn(pfn));
2506 		err = insert_page(vma, addr, page, pgprot);
2507 	} else {
2508 		return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
2509 	}
2510 
2511 	if (err == -ENOMEM)
2512 		return VM_FAULT_OOM;
2513 	if (err < 0 && err != -EBUSY)
2514 		return VM_FAULT_SIGBUS;
2515 
2516 	return VM_FAULT_NOPAGE;
2517 }
2518 
2519 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2520 		pfn_t pfn)
2521 {
2522 	return __vm_insert_mixed(vma, addr, pfn, false);
2523 }
2524 EXPORT_SYMBOL(vmf_insert_mixed);
2525 
2526 /*
2527  *  If the insertion of PTE failed because someone else already added a
2528  *  different entry in the mean time, we treat that as success as we assume
2529  *  the same entry was actually inserted.
2530  */
2531 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2532 		unsigned long addr, pfn_t pfn)
2533 {
2534 	return __vm_insert_mixed(vma, addr, pfn, true);
2535 }
2536 
2537 /*
2538  * maps a range of physical memory into the requested pages. the old
2539  * mappings are removed. any references to nonexistent pages results
2540  * in null mappings (currently treated as "copy-on-access")
2541  */
2542 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
2543 			unsigned long addr, unsigned long end,
2544 			unsigned long pfn, pgprot_t prot)
2545 {
2546 	pte_t *pte, *mapped_pte;
2547 	spinlock_t *ptl;
2548 	int err = 0;
2549 
2550 	mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
2551 	if (!pte)
2552 		return -ENOMEM;
2553 	arch_enter_lazy_mmu_mode();
2554 	do {
2555 		BUG_ON(!pte_none(ptep_get(pte)));
2556 		if (!pfn_modify_allowed(pfn, prot)) {
2557 			err = -EACCES;
2558 			break;
2559 		}
2560 		set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
2561 		pfn++;
2562 	} while (pte++, addr += PAGE_SIZE, addr != end);
2563 	arch_leave_lazy_mmu_mode();
2564 	pte_unmap_unlock(mapped_pte, ptl);
2565 	return err;
2566 }
2567 
2568 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
2569 			unsigned long addr, unsigned long end,
2570 			unsigned long pfn, pgprot_t prot)
2571 {
2572 	pmd_t *pmd;
2573 	unsigned long next;
2574 	int err;
2575 
2576 	pfn -= addr >> PAGE_SHIFT;
2577 	pmd = pmd_alloc(mm, pud, addr);
2578 	if (!pmd)
2579 		return -ENOMEM;
2580 	VM_BUG_ON(pmd_trans_huge(*pmd));
2581 	do {
2582 		next = pmd_addr_end(addr, end);
2583 		err = remap_pte_range(mm, pmd, addr, next,
2584 				pfn + (addr >> PAGE_SHIFT), prot);
2585 		if (err)
2586 			return err;
2587 	} while (pmd++, addr = next, addr != end);
2588 	return 0;
2589 }
2590 
2591 static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
2592 			unsigned long addr, unsigned long end,
2593 			unsigned long pfn, pgprot_t prot)
2594 {
2595 	pud_t *pud;
2596 	unsigned long next;
2597 	int err;
2598 
2599 	pfn -= addr >> PAGE_SHIFT;
2600 	pud = pud_alloc(mm, p4d, addr);
2601 	if (!pud)
2602 		return -ENOMEM;
2603 	do {
2604 		next = pud_addr_end(addr, end);
2605 		err = remap_pmd_range(mm, pud, addr, next,
2606 				pfn + (addr >> PAGE_SHIFT), prot);
2607 		if (err)
2608 			return err;
2609 	} while (pud++, addr = next, addr != end);
2610 	return 0;
2611 }
2612 
2613 static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2614 			unsigned long addr, unsigned long end,
2615 			unsigned long pfn, pgprot_t prot)
2616 {
2617 	p4d_t *p4d;
2618 	unsigned long next;
2619 	int err;
2620 
2621 	pfn -= addr >> PAGE_SHIFT;
2622 	p4d = p4d_alloc(mm, pgd, addr);
2623 	if (!p4d)
2624 		return -ENOMEM;
2625 	do {
2626 		next = p4d_addr_end(addr, end);
2627 		err = remap_pud_range(mm, p4d, addr, next,
2628 				pfn + (addr >> PAGE_SHIFT), prot);
2629 		if (err)
2630 			return err;
2631 	} while (p4d++, addr = next, addr != end);
2632 	return 0;
2633 }
2634 
2635 static int remap_pfn_range_internal(struct vm_area_struct *vma, unsigned long addr,
2636 		unsigned long pfn, unsigned long size, pgprot_t prot)
2637 {
2638 	pgd_t *pgd;
2639 	unsigned long next;
2640 	unsigned long end = addr + PAGE_ALIGN(size);
2641 	struct mm_struct *mm = vma->vm_mm;
2642 	int err;
2643 
2644 	if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
2645 		return -EINVAL;
2646 
2647 	/*
2648 	 * Physically remapped pages are special. Tell the
2649 	 * rest of the world about it:
2650 	 *   VM_IO tells people not to look at these pages
2651 	 *	(accesses can have side effects).
2652 	 *   VM_PFNMAP tells the core MM that the base pages are just
2653 	 *	raw PFN mappings, and do not have a "struct page" associated
2654 	 *	with them.
2655 	 *   VM_DONTEXPAND
2656 	 *      Disable vma merging and expanding with mremap().
2657 	 *   VM_DONTDUMP
2658 	 *      Omit vma from core dump, even when VM_IO turned off.
2659 	 *
2660 	 * There's a horrible special case to handle copy-on-write
2661 	 * behaviour that some programs depend on. We mark the "original"
2662 	 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
2663 	 * See vm_normal_page() for details.
2664 	 */
2665 	if (is_cow_mapping(vma->vm_flags)) {
2666 		if (addr != vma->vm_start || end != vma->vm_end)
2667 			return -EINVAL;
2668 		vma->vm_pgoff = pfn;
2669 	}
2670 
2671 	vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
2672 
2673 	BUG_ON(addr >= end);
2674 	pfn -= addr >> PAGE_SHIFT;
2675 	pgd = pgd_offset(mm, addr);
2676 	flush_cache_range(vma, addr, end);
2677 	do {
2678 		next = pgd_addr_end(addr, end);
2679 		err = remap_p4d_range(mm, pgd, addr, next,
2680 				pfn + (addr >> PAGE_SHIFT), prot);
2681 		if (err)
2682 			return err;
2683 	} while (pgd++, addr = next, addr != end);
2684 
2685 	return 0;
2686 }
2687 
2688 /*
2689  * Variant of remap_pfn_range that does not call track_pfn_remap.  The caller
2690  * must have pre-validated the caching bits of the pgprot_t.
2691  */
2692 int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
2693 		unsigned long pfn, unsigned long size, pgprot_t prot)
2694 {
2695 	int error = remap_pfn_range_internal(vma, addr, pfn, size, prot);
2696 
2697 	if (!error)
2698 		return 0;
2699 
2700 	/*
2701 	 * A partial pfn range mapping is dangerous: it does not
2702 	 * maintain page reference counts, and callers may free
2703 	 * pages due to the error. So zap it early.
2704 	 */
2705 	zap_page_range_single(vma, addr, size, NULL);
2706 	return error;
2707 }
2708 
2709 /**
2710  * remap_pfn_range - remap kernel memory to userspace
2711  * @vma: user vma to map to
2712  * @addr: target page aligned user address to start at
2713  * @pfn: page frame number of kernel physical memory address
2714  * @size: size of mapping area
2715  * @prot: page protection flags for this mapping
2716  *
2717  * Note: this is only safe if the mm semaphore is held when called.
2718  *
2719  * Return: %0 on success, negative error code otherwise.
2720  */
2721 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
2722 		    unsigned long pfn, unsigned long size, pgprot_t prot)
2723 {
2724 	int err;
2725 
2726 	err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size));
2727 	if (err)
2728 		return -EINVAL;
2729 
2730 	err = remap_pfn_range_notrack(vma, addr, pfn, size, prot);
2731 	if (err)
2732 		untrack_pfn(vma, pfn, PAGE_ALIGN(size), true);
2733 	return err;
2734 }
2735 EXPORT_SYMBOL(remap_pfn_range);
2736 
2737 /**
2738  * vm_iomap_memory - remap memory to userspace
2739  * @vma: user vma to map to
2740  * @start: start of the physical memory to be mapped
2741  * @len: size of area
2742  *
2743  * This is a simplified io_remap_pfn_range() for common driver use. The
2744  * driver just needs to give us the physical memory range to be mapped,
2745  * we'll figure out the rest from the vma information.
2746  *
2747  * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
2748  * whatever write-combining details or similar.
2749  *
2750  * Return: %0 on success, negative error code otherwise.
2751  */
2752 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
2753 {
2754 	unsigned long vm_len, pfn, pages;
2755 
2756 	/* Check that the physical memory area passed in looks valid */
2757 	if (start + len < start)
2758 		return -EINVAL;
2759 	/*
2760 	 * You *really* shouldn't map things that aren't page-aligned,
2761 	 * but we've historically allowed it because IO memory might
2762 	 * just have smaller alignment.
2763 	 */
2764 	len += start & ~PAGE_MASK;
2765 	pfn = start >> PAGE_SHIFT;
2766 	pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
2767 	if (pfn + pages < pfn)
2768 		return -EINVAL;
2769 
2770 	/* We start the mapping 'vm_pgoff' pages into the area */
2771 	if (vma->vm_pgoff > pages)
2772 		return -EINVAL;
2773 	pfn += vma->vm_pgoff;
2774 	pages -= vma->vm_pgoff;
2775 
2776 	/* Can we fit all of the mapping? */
2777 	vm_len = vma->vm_end - vma->vm_start;
2778 	if (vm_len >> PAGE_SHIFT > pages)
2779 		return -EINVAL;
2780 
2781 	/* Ok, let it rip */
2782 	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
2783 }
2784 EXPORT_SYMBOL(vm_iomap_memory);
2785 
2786 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2787 				     unsigned long addr, unsigned long end,
2788 				     pte_fn_t fn, void *data, bool create,
2789 				     pgtbl_mod_mask *mask)
2790 {
2791 	pte_t *pte, *mapped_pte;
2792 	int err = 0;
2793 	spinlock_t *ptl;
2794 
2795 	if (create) {
2796 		mapped_pte = pte = (mm == &init_mm) ?
2797 			pte_alloc_kernel_track(pmd, addr, mask) :
2798 			pte_alloc_map_lock(mm, pmd, addr, &ptl);
2799 		if (!pte)
2800 			return -ENOMEM;
2801 	} else {
2802 		mapped_pte = pte = (mm == &init_mm) ?
2803 			pte_offset_kernel(pmd, addr) :
2804 			pte_offset_map_lock(mm, pmd, addr, &ptl);
2805 		if (!pte)
2806 			return -EINVAL;
2807 	}
2808 
2809 	arch_enter_lazy_mmu_mode();
2810 
2811 	if (fn) {
2812 		do {
2813 			if (create || !pte_none(ptep_get(pte))) {
2814 				err = fn(pte++, addr, data);
2815 				if (err)
2816 					break;
2817 			}
2818 		} while (addr += PAGE_SIZE, addr != end);
2819 	}
2820 	*mask |= PGTBL_PTE_MODIFIED;
2821 
2822 	arch_leave_lazy_mmu_mode();
2823 
2824 	if (mm != &init_mm)
2825 		pte_unmap_unlock(mapped_pte, ptl);
2826 	return err;
2827 }
2828 
2829 static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
2830 				     unsigned long addr, unsigned long end,
2831 				     pte_fn_t fn, void *data, bool create,
2832 				     pgtbl_mod_mask *mask)
2833 {
2834 	pmd_t *pmd;
2835 	unsigned long next;
2836 	int err = 0;
2837 
2838 	BUG_ON(pud_leaf(*pud));
2839 
2840 	if (create) {
2841 		pmd = pmd_alloc_track(mm, pud, addr, mask);
2842 		if (!pmd)
2843 			return -ENOMEM;
2844 	} else {
2845 		pmd = pmd_offset(pud, addr);
2846 	}
2847 	do {
2848 		next = pmd_addr_end(addr, end);
2849 		if (pmd_none(*pmd) && !create)
2850 			continue;
2851 		if (WARN_ON_ONCE(pmd_leaf(*pmd)))
2852 			return -EINVAL;
2853 		if (!pmd_none(*pmd) && WARN_ON_ONCE(pmd_bad(*pmd))) {
2854 			if (!create)
2855 				continue;
2856 			pmd_clear_bad(pmd);
2857 		}
2858 		err = apply_to_pte_range(mm, pmd, addr, next,
2859 					 fn, data, create, mask);
2860 		if (err)
2861 			break;
2862 	} while (pmd++, addr = next, addr != end);
2863 
2864 	return err;
2865 }
2866 
2867 static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
2868 				     unsigned long addr, unsigned long end,
2869 				     pte_fn_t fn, void *data, bool create,
2870 				     pgtbl_mod_mask *mask)
2871 {
2872 	pud_t *pud;
2873 	unsigned long next;
2874 	int err = 0;
2875 
2876 	if (create) {
2877 		pud = pud_alloc_track(mm, p4d, addr, mask);
2878 		if (!pud)
2879 			return -ENOMEM;
2880 	} else {
2881 		pud = pud_offset(p4d, addr);
2882 	}
2883 	do {
2884 		next = pud_addr_end(addr, end);
2885 		if (pud_none(*pud) && !create)
2886 			continue;
2887 		if (WARN_ON_ONCE(pud_leaf(*pud)))
2888 			return -EINVAL;
2889 		if (!pud_none(*pud) && WARN_ON_ONCE(pud_bad(*pud))) {
2890 			if (!create)
2891 				continue;
2892 			pud_clear_bad(pud);
2893 		}
2894 		err = apply_to_pmd_range(mm, pud, addr, next,
2895 					 fn, data, create, mask);
2896 		if (err)
2897 			break;
2898 	} while (pud++, addr = next, addr != end);
2899 
2900 	return err;
2901 }
2902 
2903 static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2904 				     unsigned long addr, unsigned long end,
2905 				     pte_fn_t fn, void *data, bool create,
2906 				     pgtbl_mod_mask *mask)
2907 {
2908 	p4d_t *p4d;
2909 	unsigned long next;
2910 	int err = 0;
2911 
2912 	if (create) {
2913 		p4d = p4d_alloc_track(mm, pgd, addr, mask);
2914 		if (!p4d)
2915 			return -ENOMEM;
2916 	} else {
2917 		p4d = p4d_offset(pgd, addr);
2918 	}
2919 	do {
2920 		next = p4d_addr_end(addr, end);
2921 		if (p4d_none(*p4d) && !create)
2922 			continue;
2923 		if (WARN_ON_ONCE(p4d_leaf(*p4d)))
2924 			return -EINVAL;
2925 		if (!p4d_none(*p4d) && WARN_ON_ONCE(p4d_bad(*p4d))) {
2926 			if (!create)
2927 				continue;
2928 			p4d_clear_bad(p4d);
2929 		}
2930 		err = apply_to_pud_range(mm, p4d, addr, next,
2931 					 fn, data, create, mask);
2932 		if (err)
2933 			break;
2934 	} while (p4d++, addr = next, addr != end);
2935 
2936 	return err;
2937 }
2938 
2939 static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2940 				 unsigned long size, pte_fn_t fn,
2941 				 void *data, bool create)
2942 {
2943 	pgd_t *pgd;
2944 	unsigned long start = addr, next;
2945 	unsigned long end = addr + size;
2946 	pgtbl_mod_mask mask = 0;
2947 	int err = 0;
2948 
2949 	if (WARN_ON(addr >= end))
2950 		return -EINVAL;
2951 
2952 	pgd = pgd_offset(mm, addr);
2953 	do {
2954 		next = pgd_addr_end(addr, end);
2955 		if (pgd_none(*pgd) && !create)
2956 			continue;
2957 		if (WARN_ON_ONCE(pgd_leaf(*pgd)))
2958 			return -EINVAL;
2959 		if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) {
2960 			if (!create)
2961 				continue;
2962 			pgd_clear_bad(pgd);
2963 		}
2964 		err = apply_to_p4d_range(mm, pgd, addr, next,
2965 					 fn, data, create, &mask);
2966 		if (err)
2967 			break;
2968 	} while (pgd++, addr = next, addr != end);
2969 
2970 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
2971 		arch_sync_kernel_mappings(start, start + size);
2972 
2973 	return err;
2974 }
2975 
2976 /*
2977  * Scan a region of virtual memory, filling in page tables as necessary
2978  * and calling a provided function on each leaf page table.
2979  */
2980 int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2981 			unsigned long size, pte_fn_t fn, void *data)
2982 {
2983 	return __apply_to_page_range(mm, addr, size, fn, data, true);
2984 }
2985 EXPORT_SYMBOL_GPL(apply_to_page_range);
2986 
2987 /*
2988  * Scan a region of virtual memory, calling a provided function on
2989  * each leaf page table where it exists.
2990  *
2991  * Unlike apply_to_page_range, this does _not_ fill in page tables
2992  * where they are absent.
2993  */
2994 int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
2995 				 unsigned long size, pte_fn_t fn, void *data)
2996 {
2997 	return __apply_to_page_range(mm, addr, size, fn, data, false);
2998 }
2999 EXPORT_SYMBOL_GPL(apply_to_existing_page_range);
3000 
3001 /*
3002  * handle_pte_fault chooses page fault handler according to an entry which was
3003  * read non-atomically.  Before making any commitment, on those architectures
3004  * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
3005  * parts, do_swap_page must check under lock before unmapping the pte and
3006  * proceeding (but do_wp_page is only called after already making such a check;
3007  * and do_anonymous_page can safely check later on).
3008  */
3009 static inline int pte_unmap_same(struct vm_fault *vmf)
3010 {
3011 	int same = 1;
3012 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION)
3013 	if (sizeof(pte_t) > sizeof(unsigned long)) {
3014 		spin_lock(vmf->ptl);
3015 		same = pte_same(ptep_get(vmf->pte), vmf->orig_pte);
3016 		spin_unlock(vmf->ptl);
3017 	}
3018 #endif
3019 	pte_unmap(vmf->pte);
3020 	vmf->pte = NULL;
3021 	return same;
3022 }
3023 
3024 /*
3025  * Return:
3026  *	0:		copied succeeded
3027  *	-EHWPOISON:	copy failed due to hwpoison in source page
3028  *	-EAGAIN:	copied failed (some other reason)
3029  */
3030 static inline int __wp_page_copy_user(struct page *dst, struct page *src,
3031 				      struct vm_fault *vmf)
3032 {
3033 	int ret;
3034 	void *kaddr;
3035 	void __user *uaddr;
3036 	struct vm_area_struct *vma = vmf->vma;
3037 	struct mm_struct *mm = vma->vm_mm;
3038 	unsigned long addr = vmf->address;
3039 
3040 	if (likely(src)) {
3041 		if (copy_mc_user_highpage(dst, src, addr, vma))
3042 			return -EHWPOISON;
3043 		return 0;
3044 	}
3045 
3046 	/*
3047 	 * If the source page was a PFN mapping, we don't have
3048 	 * a "struct page" for it. We do a best-effort copy by
3049 	 * just copying from the original user address. If that
3050 	 * fails, we just zero-fill it. Live with it.
3051 	 */
3052 	kaddr = kmap_local_page(dst);
3053 	pagefault_disable();
3054 	uaddr = (void __user *)(addr & PAGE_MASK);
3055 
3056 	/*
3057 	 * On architectures with software "accessed" bits, we would
3058 	 * take a double page fault, so mark it accessed here.
3059 	 */
3060 	vmf->pte = NULL;
3061 	if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) {
3062 		pte_t entry;
3063 
3064 		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
3065 		if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
3066 			/*
3067 			 * Other thread has already handled the fault
3068 			 * and update local tlb only
3069 			 */
3070 			if (vmf->pte)
3071 				update_mmu_tlb(vma, addr, vmf->pte);
3072 			ret = -EAGAIN;
3073 			goto pte_unlock;
3074 		}
3075 
3076 		entry = pte_mkyoung(vmf->orig_pte);
3077 		if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
3078 			update_mmu_cache_range(vmf, vma, addr, vmf->pte, 1);
3079 	}
3080 
3081 	/*
3082 	 * This really shouldn't fail, because the page is there
3083 	 * in the page tables. But it might just be unreadable,
3084 	 * in which case we just give up and fill the result with
3085 	 * zeroes.
3086 	 */
3087 	if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
3088 		if (vmf->pte)
3089 			goto warn;
3090 
3091 		/* Re-validate under PTL if the page is still mapped */
3092 		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
3093 		if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
3094 			/* The PTE changed under us, update local tlb */
3095 			if (vmf->pte)
3096 				update_mmu_tlb(vma, addr, vmf->pte);
3097 			ret = -EAGAIN;
3098 			goto pte_unlock;
3099 		}
3100 
3101 		/*
3102 		 * The same page can be mapped back since last copy attempt.
3103 		 * Try to copy again under PTL.
3104 		 */
3105 		if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
3106 			/*
3107 			 * Give a warn in case there can be some obscure
3108 			 * use-case
3109 			 */
3110 warn:
3111 			WARN_ON_ONCE(1);
3112 			clear_page(kaddr);
3113 		}
3114 	}
3115 
3116 	ret = 0;
3117 
3118 pte_unlock:
3119 	if (vmf->pte)
3120 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3121 	pagefault_enable();
3122 	kunmap_local(kaddr);
3123 	flush_dcache_page(dst);
3124 
3125 	return ret;
3126 }
3127 
3128 static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
3129 {
3130 	struct file *vm_file = vma->vm_file;
3131 
3132 	if (vm_file)
3133 		return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
3134 
3135 	/*
3136 	 * Special mappings (e.g. VDSO) do not have any file so fake
3137 	 * a default GFP_KERNEL for them.
3138 	 */
3139 	return GFP_KERNEL;
3140 }
3141 
3142 /*
3143  * Notify the address space that the page is about to become writable so that
3144  * it can prohibit this or wait for the page to get into an appropriate state.
3145  *
3146  * We do this without the lock held, so that it can sleep if it needs to.
3147  */
3148 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf, struct folio *folio)
3149 {
3150 	vm_fault_t ret;
3151 	unsigned int old_flags = vmf->flags;
3152 
3153 	vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
3154 
3155 	if (vmf->vma->vm_file &&
3156 	    IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
3157 		return VM_FAULT_SIGBUS;
3158 
3159 	ret = vmf->vma->vm_ops->page_mkwrite(vmf);
3160 	/* Restore original flags so that caller is not surprised */
3161 	vmf->flags = old_flags;
3162 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
3163 		return ret;
3164 	if (unlikely(!(ret & VM_FAULT_LOCKED))) {
3165 		folio_lock(folio);
3166 		if (!folio->mapping) {
3167 			folio_unlock(folio);
3168 			return 0; /* retry */
3169 		}
3170 		ret |= VM_FAULT_LOCKED;
3171 	} else
3172 		VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
3173 	return ret;
3174 }
3175 
3176 /*
3177  * Handle dirtying of a page in shared file mapping on a write fault.
3178  *
3179  * The function expects the page to be locked and unlocks it.
3180  */
3181 static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
3182 {
3183 	struct vm_area_struct *vma = vmf->vma;
3184 	struct address_space *mapping;
3185 	struct folio *folio = page_folio(vmf->page);
3186 	bool dirtied;
3187 	bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
3188 
3189 	dirtied = folio_mark_dirty(folio);
3190 	VM_BUG_ON_FOLIO(folio_test_anon(folio), folio);
3191 	/*
3192 	 * Take a local copy of the address_space - folio.mapping may be zeroed
3193 	 * by truncate after folio_unlock().   The address_space itself remains
3194 	 * pinned by vma->vm_file's reference.  We rely on folio_unlock()'s
3195 	 * release semantics to prevent the compiler from undoing this copying.
3196 	 */
3197 	mapping = folio_raw_mapping(folio);
3198 	folio_unlock(folio);
3199 
3200 	if (!page_mkwrite)
3201 		file_update_time(vma->vm_file);
3202 
3203 	/*
3204 	 * Throttle page dirtying rate down to writeback speed.
3205 	 *
3206 	 * mapping may be NULL here because some device drivers do not
3207 	 * set page.mapping but still dirty their pages
3208 	 *
3209 	 * Drop the mmap_lock before waiting on IO, if we can. The file
3210 	 * is pinning the mapping, as per above.
3211 	 */
3212 	if ((dirtied || page_mkwrite) && mapping) {
3213 		struct file *fpin;
3214 
3215 		fpin = maybe_unlock_mmap_for_io(vmf, NULL);
3216 		balance_dirty_pages_ratelimited(mapping);
3217 		if (fpin) {
3218 			fput(fpin);
3219 			return VM_FAULT_COMPLETED;
3220 		}
3221 	}
3222 
3223 	return 0;
3224 }
3225 
3226 /*
3227  * Handle write page faults for pages that can be reused in the current vma
3228  *
3229  * This can happen either due to the mapping being with the VM_SHARED flag,
3230  * or due to us being the last reference standing to the page. In either
3231  * case, all we need to do here is to mark the page as writable and update
3232  * any related book-keeping.
3233  */
3234 static inline void wp_page_reuse(struct vm_fault *vmf, struct folio *folio)
3235 	__releases(vmf->ptl)
3236 {
3237 	struct vm_area_struct *vma = vmf->vma;
3238 	pte_t entry;
3239 
3240 	VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE));
3241 	VM_WARN_ON(is_zero_pfn(pte_pfn(vmf->orig_pte)));
3242 
3243 	if (folio) {
3244 		VM_BUG_ON(folio_test_anon(folio) &&
3245 			  !PageAnonExclusive(vmf->page));
3246 		/*
3247 		 * Clear the folio's cpupid information as the existing
3248 		 * information potentially belongs to a now completely
3249 		 * unrelated process.
3250 		 */
3251 		folio_xchg_last_cpupid(folio, (1 << LAST_CPUPID_SHIFT) - 1);
3252 	}
3253 
3254 	flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3255 	entry = pte_mkyoung(vmf->orig_pte);
3256 	entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3257 	if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
3258 		update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
3259 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3260 	count_vm_event(PGREUSE);
3261 }
3262 
3263 /*
3264  * We could add a bitflag somewhere, but for now, we know that all
3265  * vm_ops that have a ->map_pages have been audited and don't need
3266  * the mmap_lock to be held.
3267  */
3268 static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf)
3269 {
3270 	struct vm_area_struct *vma = vmf->vma;
3271 
3272 	if (vma->vm_ops->map_pages || !(vmf->flags & FAULT_FLAG_VMA_LOCK))
3273 		return 0;
3274 	vma_end_read(vma);
3275 	return VM_FAULT_RETRY;
3276 }
3277 
3278 /**
3279  * __vmf_anon_prepare - Prepare to handle an anonymous fault.
3280  * @vmf: The vm_fault descriptor passed from the fault handler.
3281  *
3282  * When preparing to insert an anonymous page into a VMA from a
3283  * fault handler, call this function rather than anon_vma_prepare().
3284  * If this vma does not already have an associated anon_vma and we are
3285  * only protected by the per-VMA lock, the caller must retry with the
3286  * mmap_lock held.  __anon_vma_prepare() will look at adjacent VMAs to
3287  * determine if this VMA can share its anon_vma, and that's not safe to
3288  * do with only the per-VMA lock held for this VMA.
3289  *
3290  * Return: 0 if fault handling can proceed.  Any other value should be
3291  * returned to the caller.
3292  */
3293 vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf)
3294 {
3295 	struct vm_area_struct *vma = vmf->vma;
3296 	vm_fault_t ret = 0;
3297 
3298 	if (likely(vma->anon_vma))
3299 		return 0;
3300 	if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
3301 		if (!mmap_read_trylock(vma->vm_mm))
3302 			return VM_FAULT_RETRY;
3303 	}
3304 	if (__anon_vma_prepare(vma))
3305 		ret = VM_FAULT_OOM;
3306 	if (vmf->flags & FAULT_FLAG_VMA_LOCK)
3307 		mmap_read_unlock(vma->vm_mm);
3308 	return ret;
3309 }
3310 
3311 /*
3312  * Handle the case of a page which we actually need to copy to a new page,
3313  * either due to COW or unsharing.
3314  *
3315  * Called with mmap_lock locked and the old page referenced, but
3316  * without the ptl held.
3317  *
3318  * High level logic flow:
3319  *
3320  * - Allocate a page, copy the content of the old page to the new one.
3321  * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
3322  * - Take the PTL. If the pte changed, bail out and release the allocated page
3323  * - If the pte is still the way we remember it, update the page table and all
3324  *   relevant references. This includes dropping the reference the page-table
3325  *   held to the old page, as well as updating the rmap.
3326  * - In any case, unlock the PTL and drop the reference we took to the old page.
3327  */
3328 static vm_fault_t wp_page_copy(struct vm_fault *vmf)
3329 {
3330 	const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
3331 	struct vm_area_struct *vma = vmf->vma;
3332 	struct mm_struct *mm = vma->vm_mm;
3333 	struct folio *old_folio = NULL;
3334 	struct folio *new_folio = NULL;
3335 	pte_t entry;
3336 	int page_copied = 0;
3337 	struct mmu_notifier_range range;
3338 	vm_fault_t ret;
3339 	bool pfn_is_zero;
3340 
3341 	delayacct_wpcopy_start();
3342 
3343 	if (vmf->page)
3344 		old_folio = page_folio(vmf->page);
3345 	ret = vmf_anon_prepare(vmf);
3346 	if (unlikely(ret))
3347 		goto out;
3348 
3349 	pfn_is_zero = is_zero_pfn(pte_pfn(vmf->orig_pte));
3350 	new_folio = folio_prealloc(mm, vma, vmf->address, pfn_is_zero);
3351 	if (!new_folio)
3352 		goto oom;
3353 
3354 	if (!pfn_is_zero) {
3355 		int err;
3356 
3357 		err = __wp_page_copy_user(&new_folio->page, vmf->page, vmf);
3358 		if (err) {
3359 			/*
3360 			 * COW failed, if the fault was solved by other,
3361 			 * it's fine. If not, userspace would re-fault on
3362 			 * the same address and we will handle the fault
3363 			 * from the second attempt.
3364 			 * The -EHWPOISON case will not be retried.
3365 			 */
3366 			folio_put(new_folio);
3367 			if (old_folio)
3368 				folio_put(old_folio);
3369 
3370 			delayacct_wpcopy_end();
3371 			return err == -EHWPOISON ? VM_FAULT_HWPOISON : 0;
3372 		}
3373 		kmsan_copy_page_meta(&new_folio->page, vmf->page);
3374 	}
3375 
3376 	__folio_mark_uptodate(new_folio);
3377 
3378 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
3379 				vmf->address & PAGE_MASK,
3380 				(vmf->address & PAGE_MASK) + PAGE_SIZE);
3381 	mmu_notifier_invalidate_range_start(&range);
3382 
3383 	/*
3384 	 * Re-check the pte - we dropped the lock
3385 	 */
3386 	vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
3387 	if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
3388 		if (old_folio) {
3389 			if (!folio_test_anon(old_folio)) {
3390 				dec_mm_counter(mm, mm_counter_file(old_folio));
3391 				inc_mm_counter(mm, MM_ANONPAGES);
3392 			}
3393 		} else {
3394 			ksm_might_unmap_zero_page(mm, vmf->orig_pte);
3395 			inc_mm_counter(mm, MM_ANONPAGES);
3396 		}
3397 		flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3398 		entry = mk_pte(&new_folio->page, vma->vm_page_prot);
3399 		entry = pte_sw_mkyoung(entry);
3400 		if (unlikely(unshare)) {
3401 			if (pte_soft_dirty(vmf->orig_pte))
3402 				entry = pte_mksoft_dirty(entry);
3403 			if (pte_uffd_wp(vmf->orig_pte))
3404 				entry = pte_mkuffd_wp(entry);
3405 		} else {
3406 			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3407 		}
3408 
3409 		/*
3410 		 * Clear the pte entry and flush it first, before updating the
3411 		 * pte with the new entry, to keep TLBs on different CPUs in
3412 		 * sync. This code used to set the new PTE then flush TLBs, but
3413 		 * that left a window where the new PTE could be loaded into
3414 		 * some TLBs while the old PTE remains in others.
3415 		 */
3416 		ptep_clear_flush(vma, vmf->address, vmf->pte);
3417 		folio_add_new_anon_rmap(new_folio, vma, vmf->address, RMAP_EXCLUSIVE);
3418 		folio_add_lru_vma(new_folio, vma);
3419 		BUG_ON(unshare && pte_write(entry));
3420 		set_pte_at(mm, vmf->address, vmf->pte, entry);
3421 		update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
3422 		if (old_folio) {
3423 			/*
3424 			 * Only after switching the pte to the new page may
3425 			 * we remove the mapcount here. Otherwise another
3426 			 * process may come and find the rmap count decremented
3427 			 * before the pte is switched to the new page, and
3428 			 * "reuse" the old page writing into it while our pte
3429 			 * here still points into it and can be read by other
3430 			 * threads.
3431 			 *
3432 			 * The critical issue is to order this
3433 			 * folio_remove_rmap_pte() with the ptp_clear_flush
3434 			 * above. Those stores are ordered by (if nothing else,)
3435 			 * the barrier present in the atomic_add_negative
3436 			 * in folio_remove_rmap_pte();
3437 			 *
3438 			 * Then the TLB flush in ptep_clear_flush ensures that
3439 			 * no process can access the old page before the
3440 			 * decremented mapcount is visible. And the old page
3441 			 * cannot be reused until after the decremented
3442 			 * mapcount is visible. So transitively, TLBs to
3443 			 * old page will be flushed before it can be reused.
3444 			 */
3445 			folio_remove_rmap_pte(old_folio, vmf->page, vma);
3446 		}
3447 
3448 		/* Free the old page.. */
3449 		new_folio = old_folio;
3450 		page_copied = 1;
3451 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3452 	} else if (vmf->pte) {
3453 		update_mmu_tlb(vma, vmf->address, vmf->pte);
3454 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3455 	}
3456 
3457 	mmu_notifier_invalidate_range_end(&range);
3458 
3459 	if (new_folio)
3460 		folio_put(new_folio);
3461 	if (old_folio) {
3462 		if (page_copied)
3463 			free_swap_cache(old_folio);
3464 		folio_put(old_folio);
3465 	}
3466 
3467 	delayacct_wpcopy_end();
3468 	return 0;
3469 oom:
3470 	ret = VM_FAULT_OOM;
3471 out:
3472 	if (old_folio)
3473 		folio_put(old_folio);
3474 
3475 	delayacct_wpcopy_end();
3476 	return ret;
3477 }
3478 
3479 /**
3480  * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
3481  *			  writeable once the page is prepared
3482  *
3483  * @vmf: structure describing the fault
3484  * @folio: the folio of vmf->page
3485  *
3486  * This function handles all that is needed to finish a write page fault in a
3487  * shared mapping due to PTE being read-only once the mapped page is prepared.
3488  * It handles locking of PTE and modifying it.
3489  *
3490  * The function expects the page to be locked or other protection against
3491  * concurrent faults / writeback (such as DAX radix tree locks).
3492  *
3493  * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before
3494  * we acquired PTE lock.
3495  */
3496 static vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf, struct folio *folio)
3497 {
3498 	WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
3499 	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
3500 				       &vmf->ptl);
3501 	if (!vmf->pte)
3502 		return VM_FAULT_NOPAGE;
3503 	/*
3504 	 * We might have raced with another page fault while we released the
3505 	 * pte_offset_map_lock.
3506 	 */
3507 	if (!pte_same(ptep_get(vmf->pte), vmf->orig_pte)) {
3508 		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
3509 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3510 		return VM_FAULT_NOPAGE;
3511 	}
3512 	wp_page_reuse(vmf, folio);
3513 	return 0;
3514 }
3515 
3516 /*
3517  * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
3518  * mapping
3519  */
3520 static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
3521 {
3522 	struct vm_area_struct *vma = vmf->vma;
3523 
3524 	if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
3525 		vm_fault_t ret;
3526 
3527 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3528 		ret = vmf_can_call_fault(vmf);
3529 		if (ret)
3530 			return ret;
3531 
3532 		vmf->flags |= FAULT_FLAG_MKWRITE;
3533 		ret = vma->vm_ops->pfn_mkwrite(vmf);
3534 		if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
3535 			return ret;
3536 		return finish_mkwrite_fault(vmf, NULL);
3537 	}
3538 	wp_page_reuse(vmf, NULL);
3539 	return 0;
3540 }
3541 
3542 static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio)
3543 	__releases(vmf->ptl)
3544 {
3545 	struct vm_area_struct *vma = vmf->vma;
3546 	vm_fault_t ret = 0;
3547 
3548 	folio_get(folio);
3549 
3550 	if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
3551 		vm_fault_t tmp;
3552 
3553 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3554 		tmp = vmf_can_call_fault(vmf);
3555 		if (tmp) {
3556 			folio_put(folio);
3557 			return tmp;
3558 		}
3559 
3560 		tmp = do_page_mkwrite(vmf, folio);
3561 		if (unlikely(!tmp || (tmp &
3562 				      (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
3563 			folio_put(folio);
3564 			return tmp;
3565 		}
3566 		tmp = finish_mkwrite_fault(vmf, folio);
3567 		if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
3568 			folio_unlock(folio);
3569 			folio_put(folio);
3570 			return tmp;
3571 		}
3572 	} else {
3573 		wp_page_reuse(vmf, folio);
3574 		folio_lock(folio);
3575 	}
3576 	ret |= fault_dirty_shared_page(vmf);
3577 	folio_put(folio);
3578 
3579 	return ret;
3580 }
3581 
3582 static bool wp_can_reuse_anon_folio(struct folio *folio,
3583 				    struct vm_area_struct *vma)
3584 {
3585 	/*
3586 	 * We could currently only reuse a subpage of a large folio if no
3587 	 * other subpages of the large folios are still mapped. However,
3588 	 * let's just consistently not reuse subpages even if we could
3589 	 * reuse in that scenario, and give back a large folio a bit
3590 	 * sooner.
3591 	 */
3592 	if (folio_test_large(folio))
3593 		return false;
3594 
3595 	/*
3596 	 * We have to verify under folio lock: these early checks are
3597 	 * just an optimization to avoid locking the folio and freeing
3598 	 * the swapcache if there is little hope that we can reuse.
3599 	 *
3600 	 * KSM doesn't necessarily raise the folio refcount.
3601 	 */
3602 	if (folio_test_ksm(folio) || folio_ref_count(folio) > 3)
3603 		return false;
3604 	if (!folio_test_lru(folio))
3605 		/*
3606 		 * We cannot easily detect+handle references from
3607 		 * remote LRU caches or references to LRU folios.
3608 		 */
3609 		lru_add_drain();
3610 	if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio))
3611 		return false;
3612 	if (!folio_trylock(folio))
3613 		return false;
3614 	if (folio_test_swapcache(folio))
3615 		folio_free_swap(folio);
3616 	if (folio_test_ksm(folio) || folio_ref_count(folio) != 1) {
3617 		folio_unlock(folio);
3618 		return false;
3619 	}
3620 	/*
3621 	 * Ok, we've got the only folio reference from our mapping
3622 	 * and the folio is locked, it's dark out, and we're wearing
3623 	 * sunglasses. Hit it.
3624 	 */
3625 	folio_move_anon_rmap(folio, vma);
3626 	folio_unlock(folio);
3627 	return true;
3628 }
3629 
3630 /*
3631  * This routine handles present pages, when
3632  * * users try to write to a shared page (FAULT_FLAG_WRITE)
3633  * * GUP wants to take a R/O pin on a possibly shared anonymous page
3634  *   (FAULT_FLAG_UNSHARE)
3635  *
3636  * It is done by copying the page to a new address and decrementing the
3637  * shared-page counter for the old page.
3638  *
3639  * Note that this routine assumes that the protection checks have been
3640  * done by the caller (the low-level page fault routine in most cases).
3641  * Thus, with FAULT_FLAG_WRITE, we can safely just mark it writable once we've
3642  * done any necessary COW.
3643  *
3644  * In case of FAULT_FLAG_WRITE, we also mark the page dirty at this point even
3645  * though the page will change only once the write actually happens. This
3646  * avoids a few races, and potentially makes it more efficient.
3647  *
3648  * We enter with non-exclusive mmap_lock (to exclude vma changes,
3649  * but allow concurrent faults), with pte both mapped and locked.
3650  * We return with mmap_lock still held, but pte unmapped and unlocked.
3651  */
3652 static vm_fault_t do_wp_page(struct vm_fault *vmf)
3653 	__releases(vmf->ptl)
3654 {
3655 	const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
3656 	struct vm_area_struct *vma = vmf->vma;
3657 	struct folio *folio = NULL;
3658 	pte_t pte;
3659 
3660 	if (likely(!unshare)) {
3661 		if (userfaultfd_pte_wp(vma, ptep_get(vmf->pte))) {
3662 			if (!userfaultfd_wp_async(vma)) {
3663 				pte_unmap_unlock(vmf->pte, vmf->ptl);
3664 				return handle_userfault(vmf, VM_UFFD_WP);
3665 			}
3666 
3667 			/*
3668 			 * Nothing needed (cache flush, TLB invalidations,
3669 			 * etc.) because we're only removing the uffd-wp bit,
3670 			 * which is completely invisible to the user.
3671 			 */
3672 			pte = pte_clear_uffd_wp(ptep_get(vmf->pte));
3673 
3674 			set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
3675 			/*
3676 			 * Update this to be prepared for following up CoW
3677 			 * handling
3678 			 */
3679 			vmf->orig_pte = pte;
3680 		}
3681 
3682 		/*
3683 		 * Userfaultfd write-protect can defer flushes. Ensure the TLB
3684 		 * is flushed in this case before copying.
3685 		 */
3686 		if (unlikely(userfaultfd_wp(vmf->vma) &&
3687 			     mm_tlb_flush_pending(vmf->vma->vm_mm)))
3688 			flush_tlb_page(vmf->vma, vmf->address);
3689 	}
3690 
3691 	vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
3692 
3693 	if (vmf->page)
3694 		folio = page_folio(vmf->page);
3695 
3696 	/*
3697 	 * Shared mapping: we are guaranteed to have VM_WRITE and
3698 	 * FAULT_FLAG_WRITE set at this point.
3699 	 */
3700 	if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
3701 		/*
3702 		 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
3703 		 * VM_PFNMAP VMA.
3704 		 *
3705 		 * We should not cow pages in a shared writeable mapping.
3706 		 * Just mark the pages writable and/or call ops->pfn_mkwrite.
3707 		 */
3708 		if (!vmf->page)
3709 			return wp_pfn_shared(vmf);
3710 		return wp_page_shared(vmf, folio);
3711 	}
3712 
3713 	/*
3714 	 * Private mapping: create an exclusive anonymous page copy if reuse
3715 	 * is impossible. We might miss VM_WRITE for FOLL_FORCE handling.
3716 	 *
3717 	 * If we encounter a page that is marked exclusive, we must reuse
3718 	 * the page without further checks.
3719 	 */
3720 	if (folio && folio_test_anon(folio) &&
3721 	    (PageAnonExclusive(vmf->page) || wp_can_reuse_anon_folio(folio, vma))) {
3722 		if (!PageAnonExclusive(vmf->page))
3723 			SetPageAnonExclusive(vmf->page);
3724 		if (unlikely(unshare)) {
3725 			pte_unmap_unlock(vmf->pte, vmf->ptl);
3726 			return 0;
3727 		}
3728 		wp_page_reuse(vmf, folio);
3729 		return 0;
3730 	}
3731 	/*
3732 	 * Ok, we need to copy. Oh, well..
3733 	 */
3734 	if (folio)
3735 		folio_get(folio);
3736 
3737 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3738 #ifdef CONFIG_KSM
3739 	if (folio && folio_test_ksm(folio))
3740 		count_vm_event(COW_KSM);
3741 #endif
3742 	return wp_page_copy(vmf);
3743 }
3744 
3745 static void unmap_mapping_range_vma(struct vm_area_struct *vma,
3746 		unsigned long start_addr, unsigned long end_addr,
3747 		struct zap_details *details)
3748 {
3749 	zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
3750 }
3751 
3752 static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
3753 					    pgoff_t first_index,
3754 					    pgoff_t last_index,
3755 					    struct zap_details *details)
3756 {
3757 	struct vm_area_struct *vma;
3758 	pgoff_t vba, vea, zba, zea;
3759 
3760 	vma_interval_tree_foreach(vma, root, first_index, last_index) {
3761 		vba = vma->vm_pgoff;
3762 		vea = vba + vma_pages(vma) - 1;
3763 		zba = max(first_index, vba);
3764 		zea = min(last_index, vea);
3765 
3766 		unmap_mapping_range_vma(vma,
3767 			((zba - vba) << PAGE_SHIFT) + vma->vm_start,
3768 			((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
3769 				details);
3770 	}
3771 }
3772 
3773 /**
3774  * unmap_mapping_folio() - Unmap single folio from processes.
3775  * @folio: The locked folio to be unmapped.
3776  *
3777  * Unmap this folio from any userspace process which still has it mmaped.
3778  * Typically, for efficiency, the range of nearby pages has already been
3779  * unmapped by unmap_mapping_pages() or unmap_mapping_range().  But once
3780  * truncation or invalidation holds the lock on a folio, it may find that
3781  * the page has been remapped again: and then uses unmap_mapping_folio()
3782  * to unmap it finally.
3783  */
3784 void unmap_mapping_folio(struct folio *folio)
3785 {
3786 	struct address_space *mapping = folio->mapping;
3787 	struct zap_details details = { };
3788 	pgoff_t	first_index;
3789 	pgoff_t	last_index;
3790 
3791 	VM_BUG_ON(!folio_test_locked(folio));
3792 
3793 	first_index = folio->index;
3794 	last_index = folio_next_index(folio) - 1;
3795 
3796 	details.even_cows = false;
3797 	details.single_folio = folio;
3798 	details.zap_flags = ZAP_FLAG_DROP_MARKER;
3799 
3800 	i_mmap_lock_read(mapping);
3801 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3802 		unmap_mapping_range_tree(&mapping->i_mmap, first_index,
3803 					 last_index, &details);
3804 	i_mmap_unlock_read(mapping);
3805 }
3806 
3807 /**
3808  * unmap_mapping_pages() - Unmap pages from processes.
3809  * @mapping: The address space containing pages to be unmapped.
3810  * @start: Index of first page to be unmapped.
3811  * @nr: Number of pages to be unmapped.  0 to unmap to end of file.
3812  * @even_cows: Whether to unmap even private COWed pages.
3813  *
3814  * Unmap the pages in this address space from any userspace process which
3815  * has them mmaped.  Generally, you want to remove COWed pages as well when
3816  * a file is being truncated, but not when invalidating pages from the page
3817  * cache.
3818  */
3819 void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
3820 		pgoff_t nr, bool even_cows)
3821 {
3822 	struct zap_details details = { };
3823 	pgoff_t	first_index = start;
3824 	pgoff_t	last_index = start + nr - 1;
3825 
3826 	details.even_cows = even_cows;
3827 	if (last_index < first_index)
3828 		last_index = ULONG_MAX;
3829 
3830 	i_mmap_lock_read(mapping);
3831 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3832 		unmap_mapping_range_tree(&mapping->i_mmap, first_index,
3833 					 last_index, &details);
3834 	i_mmap_unlock_read(mapping);
3835 }
3836 EXPORT_SYMBOL_GPL(unmap_mapping_pages);
3837 
3838 /**
3839  * unmap_mapping_range - unmap the portion of all mmaps in the specified
3840  * address_space corresponding to the specified byte range in the underlying
3841  * file.
3842  *
3843  * @mapping: the address space containing mmaps to be unmapped.
3844  * @holebegin: byte in first page to unmap, relative to the start of
3845  * the underlying file.  This will be rounded down to a PAGE_SIZE
3846  * boundary.  Note that this is different from truncate_pagecache(), which
3847  * must keep the partial page.  In contrast, we must get rid of
3848  * partial pages.
3849  * @holelen: size of prospective hole in bytes.  This will be rounded
3850  * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
3851  * end of the file.
3852  * @even_cows: 1 when truncating a file, unmap even private COWed pages;
3853  * but 0 when invalidating pagecache, don't throw away private data.
3854  */
3855 void unmap_mapping_range(struct address_space *mapping,
3856 		loff_t const holebegin, loff_t const holelen, int even_cows)
3857 {
3858 	pgoff_t hba = (pgoff_t)(holebegin) >> PAGE_SHIFT;
3859 	pgoff_t hlen = ((pgoff_t)(holelen) + PAGE_SIZE - 1) >> PAGE_SHIFT;
3860 
3861 	/* Check for overflow. */
3862 	if (sizeof(holelen) > sizeof(hlen)) {
3863 		long long holeend =
3864 			(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3865 		if (holeend & ~(long long)ULONG_MAX)
3866 			hlen = ULONG_MAX - hba + 1;
3867 	}
3868 
3869 	unmap_mapping_pages(mapping, hba, hlen, even_cows);
3870 }
3871 EXPORT_SYMBOL(unmap_mapping_range);
3872 
3873 /*
3874  * Restore a potential device exclusive pte to a working pte entry
3875  */
3876 static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
3877 {
3878 	struct folio *folio = page_folio(vmf->page);
3879 	struct vm_area_struct *vma = vmf->vma;
3880 	struct mmu_notifier_range range;
3881 	vm_fault_t ret;
3882 
3883 	/*
3884 	 * We need a reference to lock the folio because we don't hold
3885 	 * the PTL so a racing thread can remove the device-exclusive
3886 	 * entry and unmap it. If the folio is free the entry must
3887 	 * have been removed already. If it happens to have already
3888 	 * been re-allocated after being freed all we do is lock and
3889 	 * unlock it.
3890 	 */
3891 	if (!folio_try_get(folio))
3892 		return 0;
3893 
3894 	ret = folio_lock_or_retry(folio, vmf);
3895 	if (ret) {
3896 		folio_put(folio);
3897 		return ret;
3898 	}
3899 	mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0,
3900 				vma->vm_mm, vmf->address & PAGE_MASK,
3901 				(vmf->address & PAGE_MASK) + PAGE_SIZE, NULL);
3902 	mmu_notifier_invalidate_range_start(&range);
3903 
3904 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3905 				&vmf->ptl);
3906 	if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
3907 		restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte);
3908 
3909 	if (vmf->pte)
3910 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3911 	folio_unlock(folio);
3912 	folio_put(folio);
3913 
3914 	mmu_notifier_invalidate_range_end(&range);
3915 	return 0;
3916 }
3917 
3918 static inline bool should_try_to_free_swap(struct folio *folio,
3919 					   struct vm_area_struct *vma,
3920 					   unsigned int fault_flags)
3921 {
3922 	if (!folio_test_swapcache(folio))
3923 		return false;
3924 	if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) ||
3925 	    folio_test_mlocked(folio))
3926 		return true;
3927 	/*
3928 	 * If we want to map a page that's in the swapcache writable, we
3929 	 * have to detect via the refcount if we're really the exclusive
3930 	 * user. Try freeing the swapcache to get rid of the swapcache
3931 	 * reference only in case it's likely that we'll be the exlusive user.
3932 	 */
3933 	return (fault_flags & FAULT_FLAG_WRITE) && !folio_test_ksm(folio) &&
3934 		folio_ref_count(folio) == (1 + folio_nr_pages(folio));
3935 }
3936 
3937 static vm_fault_t pte_marker_clear(struct vm_fault *vmf)
3938 {
3939 	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
3940 				       vmf->address, &vmf->ptl);
3941 	if (!vmf->pte)
3942 		return 0;
3943 	/*
3944 	 * Be careful so that we will only recover a special uffd-wp pte into a
3945 	 * none pte.  Otherwise it means the pte could have changed, so retry.
3946 	 *
3947 	 * This should also cover the case where e.g. the pte changed
3948 	 * quickly from a PTE_MARKER_UFFD_WP into PTE_MARKER_POISONED.
3949 	 * So is_pte_marker() check is not enough to safely drop the pte.
3950 	 */
3951 	if (pte_same(vmf->orig_pte, ptep_get(vmf->pte)))
3952 		pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte);
3953 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3954 	return 0;
3955 }
3956 
3957 static vm_fault_t do_pte_missing(struct vm_fault *vmf)
3958 {
3959 	if (vma_is_anonymous(vmf->vma))
3960 		return do_anonymous_page(vmf);
3961 	else
3962 		return do_fault(vmf);
3963 }
3964 
3965 /*
3966  * This is actually a page-missing access, but with uffd-wp special pte
3967  * installed.  It means this pte was wr-protected before being unmapped.
3968  */
3969 static vm_fault_t pte_marker_handle_uffd_wp(struct vm_fault *vmf)
3970 {
3971 	/*
3972 	 * Just in case there're leftover special ptes even after the region
3973 	 * got unregistered - we can simply clear them.
3974 	 */
3975 	if (unlikely(!userfaultfd_wp(vmf->vma)))
3976 		return pte_marker_clear(vmf);
3977 
3978 	return do_pte_missing(vmf);
3979 }
3980 
3981 static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
3982 {
3983 	swp_entry_t entry = pte_to_swp_entry(vmf->orig_pte);
3984 	unsigned long marker = pte_marker_get(entry);
3985 
3986 	/*
3987 	 * PTE markers should never be empty.  If anything weird happened,
3988 	 * the best thing to do is to kill the process along with its mm.
3989 	 */
3990 	if (WARN_ON_ONCE(!marker))
3991 		return VM_FAULT_SIGBUS;
3992 
3993 	/* Higher priority than uffd-wp when data corrupted */
3994 	if (marker & PTE_MARKER_POISONED)
3995 		return VM_FAULT_HWPOISON;
3996 
3997 	if (pte_marker_entry_uffd_wp(entry))
3998 		return pte_marker_handle_uffd_wp(vmf);
3999 
4000 	/* This is an unknown pte marker */
4001 	return VM_FAULT_SIGBUS;
4002 }
4003 
4004 /*
4005  * We enter with non-exclusive mmap_lock (to exclude vma changes,
4006  * but allow concurrent faults), and pte mapped but not yet locked.
4007  * We return with pte unmapped and unlocked.
4008  *
4009  * We return with the mmap_lock locked or unlocked in the same cases
4010  * as does filemap_fault().
4011  */
4012 vm_fault_t do_swap_page(struct vm_fault *vmf)
4013 {
4014 	struct vm_area_struct *vma = vmf->vma;
4015 	struct folio *swapcache, *folio = NULL;
4016 	struct page *page;
4017 	struct swap_info_struct *si = NULL;
4018 	rmap_t rmap_flags = RMAP_NONE;
4019 	bool need_clear_cache = false;
4020 	bool exclusive = false;
4021 	swp_entry_t entry;
4022 	pte_t pte;
4023 	vm_fault_t ret = 0;
4024 	void *shadow = NULL;
4025 	int nr_pages;
4026 	unsigned long page_idx;
4027 	unsigned long address;
4028 	pte_t *ptep;
4029 
4030 	if (!pte_unmap_same(vmf))
4031 		goto out;
4032 
4033 	entry = pte_to_swp_entry(vmf->orig_pte);
4034 	if (unlikely(non_swap_entry(entry))) {
4035 		if (is_migration_entry(entry)) {
4036 			migration_entry_wait(vma->vm_mm, vmf->pmd,
4037 					     vmf->address);
4038 		} else if (is_device_exclusive_entry(entry)) {
4039 			vmf->page = pfn_swap_entry_to_page(entry);
4040 			ret = remove_device_exclusive_entry(vmf);
4041 		} else if (is_device_private_entry(entry)) {
4042 			if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
4043 				/*
4044 				 * migrate_to_ram is not yet ready to operate
4045 				 * under VMA lock.
4046 				 */
4047 				vma_end_read(vma);
4048 				ret = VM_FAULT_RETRY;
4049 				goto out;
4050 			}
4051 
4052 			vmf->page = pfn_swap_entry_to_page(entry);
4053 			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4054 					vmf->address, &vmf->ptl);
4055 			if (unlikely(!vmf->pte ||
4056 				     !pte_same(ptep_get(vmf->pte),
4057 							vmf->orig_pte)))
4058 				goto unlock;
4059 
4060 			/*
4061 			 * Get a page reference while we know the page can't be
4062 			 * freed.
4063 			 */
4064 			get_page(vmf->page);
4065 			pte_unmap_unlock(vmf->pte, vmf->ptl);
4066 			ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
4067 			put_page(vmf->page);
4068 		} else if (is_hwpoison_entry(entry)) {
4069 			ret = VM_FAULT_HWPOISON;
4070 		} else if (is_pte_marker_entry(entry)) {
4071 			ret = handle_pte_marker(vmf);
4072 		} else {
4073 			print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
4074 			ret = VM_FAULT_SIGBUS;
4075 		}
4076 		goto out;
4077 	}
4078 
4079 	/* Prevent swapoff from happening to us. */
4080 	si = get_swap_device(entry);
4081 	if (unlikely(!si))
4082 		goto out;
4083 
4084 	folio = swap_cache_get_folio(entry, vma, vmf->address);
4085 	if (folio)
4086 		page = folio_file_page(folio, swp_offset(entry));
4087 	swapcache = folio;
4088 
4089 	if (!folio) {
4090 		if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
4091 		    __swap_count(entry) == 1) {
4092 			/*
4093 			 * Prevent parallel swapin from proceeding with
4094 			 * the cache flag. Otherwise, another thread may
4095 			 * finish swapin first, free the entry, and swapout
4096 			 * reusing the same entry. It's undetectable as
4097 			 * pte_same() returns true due to entry reuse.
4098 			 */
4099 			if (swapcache_prepare(entry)) {
4100 				/* Relax a bit to prevent rapid repeated page faults */
4101 				schedule_timeout_uninterruptible(1);
4102 				goto out;
4103 			}
4104 			need_clear_cache = true;
4105 
4106 			/* skip swapcache */
4107 			folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
4108 						vma, vmf->address, false);
4109 			page = &folio->page;
4110 			if (folio) {
4111 				__folio_set_locked(folio);
4112 				__folio_set_swapbacked(folio);
4113 
4114 				if (mem_cgroup_swapin_charge_folio(folio,
4115 							vma->vm_mm, GFP_KERNEL,
4116 							entry)) {
4117 					ret = VM_FAULT_OOM;
4118 					goto out_page;
4119 				}
4120 				mem_cgroup_swapin_uncharge_swap(entry);
4121 
4122 				shadow = get_shadow_from_swap_cache(entry);
4123 				if (shadow)
4124 					workingset_refault(folio, shadow);
4125 
4126 				folio_add_lru(folio);
4127 
4128 				/* To provide entry to swap_read_folio() */
4129 				folio->swap = entry;
4130 				swap_read_folio(folio, NULL);
4131 				folio->private = NULL;
4132 			}
4133 		} else {
4134 			page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
4135 						vmf);
4136 			if (page)
4137 				folio = page_folio(page);
4138 			swapcache = folio;
4139 		}
4140 
4141 		if (!folio) {
4142 			/*
4143 			 * Back out if somebody else faulted in this pte
4144 			 * while we released the pte lock.
4145 			 */
4146 			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4147 					vmf->address, &vmf->ptl);
4148 			if (likely(vmf->pte &&
4149 				   pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
4150 				ret = VM_FAULT_OOM;
4151 			goto unlock;
4152 		}
4153 
4154 		/* Had to read the page from swap area: Major fault */
4155 		ret = VM_FAULT_MAJOR;
4156 		count_vm_event(PGMAJFAULT);
4157 		count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
4158 	} else if (PageHWPoison(page)) {
4159 		/*
4160 		 * hwpoisoned dirty swapcache pages are kept for killing
4161 		 * owner processes (which may be unknown at hwpoison time)
4162 		 */
4163 		ret = VM_FAULT_HWPOISON;
4164 		goto out_release;
4165 	}
4166 
4167 	ret |= folio_lock_or_retry(folio, vmf);
4168 	if (ret & VM_FAULT_RETRY)
4169 		goto out_release;
4170 
4171 	if (swapcache) {
4172 		/*
4173 		 * Make sure folio_free_swap() or swapoff did not release the
4174 		 * swapcache from under us.  The page pin, and pte_same test
4175 		 * below, are not enough to exclude that.  Even if it is still
4176 		 * swapcache, we need to check that the page's swap has not
4177 		 * changed.
4178 		 */
4179 		if (unlikely(!folio_test_swapcache(folio) ||
4180 			     page_swap_entry(page).val != entry.val))
4181 			goto out_page;
4182 
4183 		/*
4184 		 * KSM sometimes has to copy on read faults, for example, if
4185 		 * page->index of !PageKSM() pages would be nonlinear inside the
4186 		 * anon VMA -- PageKSM() is lost on actual swapout.
4187 		 */
4188 		folio = ksm_might_need_to_copy(folio, vma, vmf->address);
4189 		if (unlikely(!folio)) {
4190 			ret = VM_FAULT_OOM;
4191 			folio = swapcache;
4192 			goto out_page;
4193 		} else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
4194 			ret = VM_FAULT_HWPOISON;
4195 			folio = swapcache;
4196 			goto out_page;
4197 		}
4198 		if (folio != swapcache)
4199 			page = folio_page(folio, 0);
4200 
4201 		/*
4202 		 * If we want to map a page that's in the swapcache writable, we
4203 		 * have to detect via the refcount if we're really the exclusive
4204 		 * owner. Try removing the extra reference from the local LRU
4205 		 * caches if required.
4206 		 */
4207 		if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache &&
4208 		    !folio_test_ksm(folio) && !folio_test_lru(folio))
4209 			lru_add_drain();
4210 	}
4211 
4212 	folio_throttle_swaprate(folio, GFP_KERNEL);
4213 
4214 	/*
4215 	 * Back out if somebody else already faulted in this pte.
4216 	 */
4217 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
4218 			&vmf->ptl);
4219 	if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
4220 		goto out_nomap;
4221 
4222 	if (unlikely(!folio_test_uptodate(folio))) {
4223 		ret = VM_FAULT_SIGBUS;
4224 		goto out_nomap;
4225 	}
4226 
4227 	nr_pages = 1;
4228 	page_idx = 0;
4229 	address = vmf->address;
4230 	ptep = vmf->pte;
4231 	if (folio_test_large(folio) && folio_test_swapcache(folio)) {
4232 		int nr = folio_nr_pages(folio);
4233 		unsigned long idx = folio_page_idx(folio, page);
4234 		unsigned long folio_start = address - idx * PAGE_SIZE;
4235 		unsigned long folio_end = folio_start + nr * PAGE_SIZE;
4236 		pte_t *folio_ptep;
4237 		pte_t folio_pte;
4238 
4239 		if (unlikely(folio_start < max(address & PMD_MASK, vma->vm_start)))
4240 			goto check_folio;
4241 		if (unlikely(folio_end > pmd_addr_end(address, vma->vm_end)))
4242 			goto check_folio;
4243 
4244 		folio_ptep = vmf->pte - idx;
4245 		folio_pte = ptep_get(folio_ptep);
4246 		if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
4247 		    swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
4248 			goto check_folio;
4249 
4250 		page_idx = idx;
4251 		address = folio_start;
4252 		ptep = folio_ptep;
4253 		nr_pages = nr;
4254 		entry = folio->swap;
4255 		page = &folio->page;
4256 	}
4257 
4258 check_folio:
4259 	/*
4260 	 * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte
4261 	 * must never point at an anonymous page in the swapcache that is
4262 	 * PG_anon_exclusive. Sanity check that this holds and especially, that
4263 	 * no filesystem set PG_mappedtodisk on a page in the swapcache. Sanity
4264 	 * check after taking the PT lock and making sure that nobody
4265 	 * concurrently faulted in this page and set PG_anon_exclusive.
4266 	 */
4267 	BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio));
4268 	BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page));
4269 
4270 	/*
4271 	 * Check under PT lock (to protect against concurrent fork() sharing
4272 	 * the swap entry concurrently) for certainly exclusive pages.
4273 	 */
4274 	if (!folio_test_ksm(folio)) {
4275 		exclusive = pte_swp_exclusive(vmf->orig_pte);
4276 		if (folio != swapcache) {
4277 			/*
4278 			 * We have a fresh page that is not exposed to the
4279 			 * swapcache -> certainly exclusive.
4280 			 */
4281 			exclusive = true;
4282 		} else if (exclusive && folio_test_writeback(folio) &&
4283 			  data_race(si->flags & SWP_STABLE_WRITES)) {
4284 			/*
4285 			 * This is tricky: not all swap backends support
4286 			 * concurrent page modifications while under writeback.
4287 			 *
4288 			 * So if we stumble over such a page in the swapcache
4289 			 * we must not set the page exclusive, otherwise we can
4290 			 * map it writable without further checks and modify it
4291 			 * while still under writeback.
4292 			 *
4293 			 * For these problematic swap backends, simply drop the
4294 			 * exclusive marker: this is perfectly fine as we start
4295 			 * writeback only if we fully unmapped the page and
4296 			 * there are no unexpected references on the page after
4297 			 * unmapping succeeded. After fully unmapped, no
4298 			 * further GUP references (FOLL_GET and FOLL_PIN) can
4299 			 * appear, so dropping the exclusive marker and mapping
4300 			 * it only R/O is fine.
4301 			 */
4302 			exclusive = false;
4303 		}
4304 	}
4305 
4306 	/*
4307 	 * Some architectures may have to restore extra metadata to the page
4308 	 * when reading from swap. This metadata may be indexed by swap entry
4309 	 * so this must be called before swap_free().
4310 	 */
4311 	arch_swap_restore(folio_swap(entry, folio), folio);
4312 
4313 	/*
4314 	 * Remove the swap entry and conditionally try to free up the swapcache.
4315 	 * We're already holding a reference on the page but haven't mapped it
4316 	 * yet.
4317 	 */
4318 	swap_free_nr(entry, nr_pages);
4319 	if (should_try_to_free_swap(folio, vma, vmf->flags))
4320 		folio_free_swap(folio);
4321 
4322 	add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
4323 	add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
4324 	pte = mk_pte(page, vma->vm_page_prot);
4325 	if (pte_swp_soft_dirty(vmf->orig_pte))
4326 		pte = pte_mksoft_dirty(pte);
4327 	if (pte_swp_uffd_wp(vmf->orig_pte))
4328 		pte = pte_mkuffd_wp(pte);
4329 
4330 	/*
4331 	 * Same logic as in do_wp_page(); however, optimize for pages that are
4332 	 * certainly not shared either because we just allocated them without
4333 	 * exposing them to the swapcache or because the swap entry indicates
4334 	 * exclusivity.
4335 	 */
4336 	if (!folio_test_ksm(folio) &&
4337 	    (exclusive || folio_ref_count(folio) == 1)) {
4338 		if ((vma->vm_flags & VM_WRITE) && !userfaultfd_pte_wp(vma, pte) &&
4339 		    !pte_needs_soft_dirty_wp(vma, pte)) {
4340 			pte = pte_mkwrite(pte, vma);
4341 			if (vmf->flags & FAULT_FLAG_WRITE) {
4342 				pte = pte_mkdirty(pte);
4343 				vmf->flags &= ~FAULT_FLAG_WRITE;
4344 			}
4345 		}
4346 		rmap_flags |= RMAP_EXCLUSIVE;
4347 	}
4348 	folio_ref_add(folio, nr_pages - 1);
4349 	flush_icache_pages(vma, page, nr_pages);
4350 	vmf->orig_pte = pte_advance_pfn(pte, page_idx);
4351 
4352 	/* ksm created a completely new copy */
4353 	if (unlikely(folio != swapcache && swapcache)) {
4354 		folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE);
4355 		folio_add_lru_vma(folio, vma);
4356 	} else if (!folio_test_anon(folio)) {
4357 		/*
4358 		 * We currently only expect small !anon folios, which are either
4359 		 * fully exclusive or fully shared. If we ever get large folios
4360 		 * here, we have to be careful.
4361 		 */
4362 		VM_WARN_ON_ONCE(folio_test_large(folio));
4363 		VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
4364 		folio_add_new_anon_rmap(folio, vma, address, rmap_flags);
4365 	} else {
4366 		folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address,
4367 					rmap_flags);
4368 	}
4369 
4370 	VM_BUG_ON(!folio_test_anon(folio) ||
4371 			(pte_write(pte) && !PageAnonExclusive(page)));
4372 	set_ptes(vma->vm_mm, address, ptep, pte, nr_pages);
4373 	arch_do_swap_page_nr(vma->vm_mm, vma, address,
4374 			pte, pte, nr_pages);
4375 
4376 	folio_unlock(folio);
4377 	if (folio != swapcache && swapcache) {
4378 		/*
4379 		 * Hold the lock to avoid the swap entry to be reused
4380 		 * until we take the PT lock for the pte_same() check
4381 		 * (to avoid false positives from pte_same). For
4382 		 * further safety release the lock after the swap_free
4383 		 * so that the swap count won't change under a
4384 		 * parallel locked swapcache.
4385 		 */
4386 		folio_unlock(swapcache);
4387 		folio_put(swapcache);
4388 	}
4389 
4390 	if (vmf->flags & FAULT_FLAG_WRITE) {
4391 		ret |= do_wp_page(vmf);
4392 		if (ret & VM_FAULT_ERROR)
4393 			ret &= VM_FAULT_ERROR;
4394 		goto out;
4395 	}
4396 
4397 	/* No need to invalidate - it was non-present before */
4398 	update_mmu_cache_range(vmf, vma, address, ptep, nr_pages);
4399 unlock:
4400 	if (vmf->pte)
4401 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4402 out:
4403 	/* Clear the swap cache pin for direct swapin after PTL unlock */
4404 	if (need_clear_cache)
4405 		swapcache_clear(si, entry);
4406 	if (si)
4407 		put_swap_device(si);
4408 	return ret;
4409 out_nomap:
4410 	if (vmf->pte)
4411 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4412 out_page:
4413 	folio_unlock(folio);
4414 out_release:
4415 	folio_put(folio);
4416 	if (folio != swapcache && swapcache) {
4417 		folio_unlock(swapcache);
4418 		folio_put(swapcache);
4419 	}
4420 	if (need_clear_cache)
4421 		swapcache_clear(si, entry);
4422 	if (si)
4423 		put_swap_device(si);
4424 	return ret;
4425 }
4426 
4427 static bool pte_range_none(pte_t *pte, int nr_pages)
4428 {
4429 	int i;
4430 
4431 	for (i = 0; i < nr_pages; i++) {
4432 		if (!pte_none(ptep_get_lockless(pte + i)))
4433 			return false;
4434 	}
4435 
4436 	return true;
4437 }
4438 
4439 static struct folio *alloc_anon_folio(struct vm_fault *vmf)
4440 {
4441 	struct vm_area_struct *vma = vmf->vma;
4442 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4443 	unsigned long orders;
4444 	struct folio *folio;
4445 	unsigned long addr;
4446 	pte_t *pte;
4447 	gfp_t gfp;
4448 	int order;
4449 
4450 	/*
4451 	 * If uffd is active for the vma we need per-page fault fidelity to
4452 	 * maintain the uffd semantics.
4453 	 */
4454 	if (unlikely(userfaultfd_armed(vma)))
4455 		goto fallback;
4456 
4457 	/*
4458 	 * Get a list of all the (large) orders below PMD_ORDER that are enabled
4459 	 * for this vma. Then filter out the orders that can't be allocated over
4460 	 * the faulting address and still be fully contained in the vma.
4461 	 */
4462 	orders = thp_vma_allowable_orders(vma, vma->vm_flags,
4463 			TVA_IN_PF | TVA_ENFORCE_SYSFS, BIT(PMD_ORDER) - 1);
4464 	orders = thp_vma_suitable_orders(vma, vmf->address, orders);
4465 
4466 	if (!orders)
4467 		goto fallback;
4468 
4469 	pte = pte_offset_map(vmf->pmd, vmf->address & PMD_MASK);
4470 	if (!pte)
4471 		return ERR_PTR(-EAGAIN);
4472 
4473 	/*
4474 	 * Find the highest order where the aligned range is completely
4475 	 * pte_none(). Note that all remaining orders will be completely
4476 	 * pte_none().
4477 	 */
4478 	order = highest_order(orders);
4479 	while (orders) {
4480 		addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
4481 		if (pte_range_none(pte + pte_index(addr), 1 << order))
4482 			break;
4483 		order = next_order(&orders, order);
4484 	}
4485 
4486 	pte_unmap(pte);
4487 
4488 	if (!orders)
4489 		goto fallback;
4490 
4491 	/* Try allocating the highest of the remaining orders. */
4492 	gfp = vma_thp_gfp_mask(vma);
4493 	while (orders) {
4494 		addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
4495 		folio = vma_alloc_folio(gfp, order, vma, addr, true);
4496 		if (folio) {
4497 			if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
4498 				count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
4499 				folio_put(folio);
4500 				goto next;
4501 			}
4502 			folio_throttle_swaprate(folio, gfp);
4503 			folio_zero_user(folio, vmf->address);
4504 			return folio;
4505 		}
4506 next:
4507 		count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK);
4508 		order = next_order(&orders, order);
4509 	}
4510 
4511 fallback:
4512 #endif
4513 	return folio_prealloc(vma->vm_mm, vma, vmf->address, true);
4514 }
4515 
4516 /*
4517  * We enter with non-exclusive mmap_lock (to exclude vma changes,
4518  * but allow concurrent faults), and pte mapped but not yet locked.
4519  * We return with mmap_lock still held, but pte unmapped and unlocked.
4520  */
4521 static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
4522 {
4523 	struct vm_area_struct *vma = vmf->vma;
4524 	unsigned long addr = vmf->address;
4525 	struct folio *folio;
4526 	vm_fault_t ret = 0;
4527 	int nr_pages = 1;
4528 	pte_t entry;
4529 
4530 	/* File mapping without ->vm_ops ? */
4531 	if (vma->vm_flags & VM_SHARED)
4532 		return VM_FAULT_SIGBUS;
4533 
4534 	/*
4535 	 * Use pte_alloc() instead of pte_alloc_map(), so that OOM can
4536 	 * be distinguished from a transient failure of pte_offset_map().
4537 	 */
4538 	if (pte_alloc(vma->vm_mm, vmf->pmd))
4539 		return VM_FAULT_OOM;
4540 
4541 	/* Use the zero-page for reads */
4542 	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
4543 			!mm_forbids_zeropage(vma->vm_mm)) {
4544 		entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
4545 						vma->vm_page_prot));
4546 		vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4547 				vmf->address, &vmf->ptl);
4548 		if (!vmf->pte)
4549 			goto unlock;
4550 		if (vmf_pte_changed(vmf)) {
4551 			update_mmu_tlb(vma, vmf->address, vmf->pte);
4552 			goto unlock;
4553 		}
4554 		ret = check_stable_address_space(vma->vm_mm);
4555 		if (ret)
4556 			goto unlock;
4557 		/* Deliver the page fault to userland, check inside PT lock */
4558 		if (userfaultfd_missing(vma)) {
4559 			pte_unmap_unlock(vmf->pte, vmf->ptl);
4560 			return handle_userfault(vmf, VM_UFFD_MISSING);
4561 		}
4562 		goto setpte;
4563 	}
4564 
4565 	/* Allocate our own private page. */
4566 	ret = vmf_anon_prepare(vmf);
4567 	if (ret)
4568 		return ret;
4569 	/* Returns NULL on OOM or ERR_PTR(-EAGAIN) if we must retry the fault */
4570 	folio = alloc_anon_folio(vmf);
4571 	if (IS_ERR(folio))
4572 		return 0;
4573 	if (!folio)
4574 		goto oom;
4575 
4576 	nr_pages = folio_nr_pages(folio);
4577 	addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE);
4578 
4579 	/*
4580 	 * The memory barrier inside __folio_mark_uptodate makes sure that
4581 	 * preceding stores to the page contents become visible before
4582 	 * the set_pte_at() write.
4583 	 */
4584 	__folio_mark_uptodate(folio);
4585 
4586 	entry = mk_pte(&folio->page, vma->vm_page_prot);
4587 	entry = pte_sw_mkyoung(entry);
4588 	if (vma->vm_flags & VM_WRITE)
4589 		entry = pte_mkwrite(pte_mkdirty(entry), vma);
4590 
4591 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
4592 	if (!vmf->pte)
4593 		goto release;
4594 	if (nr_pages == 1 && vmf_pte_changed(vmf)) {
4595 		update_mmu_tlb(vma, addr, vmf->pte);
4596 		goto release;
4597 	} else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) {
4598 		update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages);
4599 		goto release;
4600 	}
4601 
4602 	ret = check_stable_address_space(vma->vm_mm);
4603 	if (ret)
4604 		goto release;
4605 
4606 	/* Deliver the page fault to userland, check inside PT lock */
4607 	if (userfaultfd_missing(vma)) {
4608 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4609 		folio_put(folio);
4610 		return handle_userfault(vmf, VM_UFFD_MISSING);
4611 	}
4612 
4613 	folio_ref_add(folio, nr_pages - 1);
4614 	add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
4615 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4616 	count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_FAULT_ALLOC);
4617 #endif
4618 	folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
4619 	folio_add_lru_vma(folio, vma);
4620 setpte:
4621 	if (vmf_orig_pte_uffd_wp(vmf))
4622 		entry = pte_mkuffd_wp(entry);
4623 	set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr_pages);
4624 
4625 	/* No need to invalidate - it was non-present before */
4626 	update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr_pages);
4627 unlock:
4628 	if (vmf->pte)
4629 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4630 	return ret;
4631 release:
4632 	folio_put(folio);
4633 	goto unlock;
4634 oom:
4635 	return VM_FAULT_OOM;
4636 }
4637 
4638 /*
4639  * The mmap_lock must have been held on entry, and may have been
4640  * released depending on flags and vma->vm_ops->fault() return value.
4641  * See filemap_fault() and __lock_page_retry().
4642  */
4643 static vm_fault_t __do_fault(struct vm_fault *vmf)
4644 {
4645 	struct vm_area_struct *vma = vmf->vma;
4646 	struct folio *folio;
4647 	vm_fault_t ret;
4648 
4649 	/*
4650 	 * Preallocate pte before we take page_lock because this might lead to
4651 	 * deadlocks for memcg reclaim which waits for pages under writeback:
4652 	 *				lock_page(A)
4653 	 *				SetPageWriteback(A)
4654 	 *				unlock_page(A)
4655 	 * lock_page(B)
4656 	 *				lock_page(B)
4657 	 * pte_alloc_one
4658 	 *   shrink_folio_list
4659 	 *     wait_on_page_writeback(A)
4660 	 *				SetPageWriteback(B)
4661 	 *				unlock_page(B)
4662 	 *				# flush A, B to clear the writeback
4663 	 */
4664 	if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
4665 		vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
4666 		if (!vmf->prealloc_pte)
4667 			return VM_FAULT_OOM;
4668 	}
4669 
4670 	ret = vma->vm_ops->fault(vmf);
4671 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
4672 			    VM_FAULT_DONE_COW)))
4673 		return ret;
4674 
4675 	folio = page_folio(vmf->page);
4676 	if (unlikely(PageHWPoison(vmf->page))) {
4677 		vm_fault_t poisonret = VM_FAULT_HWPOISON;
4678 		if (ret & VM_FAULT_LOCKED) {
4679 			if (page_mapped(vmf->page))
4680 				unmap_mapping_folio(folio);
4681 			/* Retry if a clean folio was removed from the cache. */
4682 			if (mapping_evict_folio(folio->mapping, folio))
4683 				poisonret = VM_FAULT_NOPAGE;
4684 			folio_unlock(folio);
4685 		}
4686 		folio_put(folio);
4687 		vmf->page = NULL;
4688 		return poisonret;
4689 	}
4690 
4691 	if (unlikely(!(ret & VM_FAULT_LOCKED)))
4692 		folio_lock(folio);
4693 	else
4694 		VM_BUG_ON_PAGE(!folio_test_locked(folio), vmf->page);
4695 
4696 	return ret;
4697 }
4698 
4699 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4700 static void deposit_prealloc_pte(struct vm_fault *vmf)
4701 {
4702 	struct vm_area_struct *vma = vmf->vma;
4703 
4704 	pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
4705 	/*
4706 	 * We are going to consume the prealloc table,
4707 	 * count that as nr_ptes.
4708 	 */
4709 	mm_inc_nr_ptes(vma->vm_mm);
4710 	vmf->prealloc_pte = NULL;
4711 }
4712 
4713 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
4714 {
4715 	struct folio *folio = page_folio(page);
4716 	struct vm_area_struct *vma = vmf->vma;
4717 	bool write = vmf->flags & FAULT_FLAG_WRITE;
4718 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
4719 	pmd_t entry;
4720 	vm_fault_t ret = VM_FAULT_FALLBACK;
4721 
4722 	if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
4723 		return ret;
4724 
4725 	if (folio_order(folio) != HPAGE_PMD_ORDER)
4726 		return ret;
4727 	page = &folio->page;
4728 
4729 	/*
4730 	 * Just backoff if any subpage of a THP is corrupted otherwise
4731 	 * the corrupted page may mapped by PMD silently to escape the
4732 	 * check.  This kind of THP just can be PTE mapped.  Access to
4733 	 * the corrupted subpage should trigger SIGBUS as expected.
4734 	 */
4735 	if (unlikely(folio_test_has_hwpoisoned(folio)))
4736 		return ret;
4737 
4738 	/*
4739 	 * Archs like ppc64 need additional space to store information
4740 	 * related to pte entry. Use the preallocated table for that.
4741 	 */
4742 	if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
4743 		vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
4744 		if (!vmf->prealloc_pte)
4745 			return VM_FAULT_OOM;
4746 	}
4747 
4748 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
4749 	if (unlikely(!pmd_none(*vmf->pmd)))
4750 		goto out;
4751 
4752 	flush_icache_pages(vma, page, HPAGE_PMD_NR);
4753 
4754 	entry = mk_huge_pmd(page, vma->vm_page_prot);
4755 	if (write)
4756 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
4757 
4758 	add_mm_counter(vma->vm_mm, mm_counter_file(folio), HPAGE_PMD_NR);
4759 	folio_add_file_rmap_pmd(folio, page, vma);
4760 
4761 	/*
4762 	 * deposit and withdraw with pmd lock held
4763 	 */
4764 	if (arch_needs_pgtable_deposit())
4765 		deposit_prealloc_pte(vmf);
4766 
4767 	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
4768 
4769 	update_mmu_cache_pmd(vma, haddr, vmf->pmd);
4770 
4771 	/* fault is handled */
4772 	ret = 0;
4773 	count_vm_event(THP_FILE_MAPPED);
4774 out:
4775 	spin_unlock(vmf->ptl);
4776 	return ret;
4777 }
4778 #else
4779 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
4780 {
4781 	return VM_FAULT_FALLBACK;
4782 }
4783 #endif
4784 
4785 /**
4786  * set_pte_range - Set a range of PTEs to point to pages in a folio.
4787  * @vmf: Fault decription.
4788  * @folio: The folio that contains @page.
4789  * @page: The first page to create a PTE for.
4790  * @nr: The number of PTEs to create.
4791  * @addr: The first address to create a PTE for.
4792  */
4793 void set_pte_range(struct vm_fault *vmf, struct folio *folio,
4794 		struct page *page, unsigned int nr, unsigned long addr)
4795 {
4796 	struct vm_area_struct *vma = vmf->vma;
4797 	bool write = vmf->flags & FAULT_FLAG_WRITE;
4798 	bool prefault = !in_range(vmf->address, addr, nr * PAGE_SIZE);
4799 	pte_t entry;
4800 
4801 	flush_icache_pages(vma, page, nr);
4802 	entry = mk_pte(page, vma->vm_page_prot);
4803 
4804 	if (prefault && arch_wants_old_prefaulted_pte())
4805 		entry = pte_mkold(entry);
4806 	else
4807 		entry = pte_sw_mkyoung(entry);
4808 
4809 	if (write)
4810 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
4811 	if (unlikely(vmf_orig_pte_uffd_wp(vmf)))
4812 		entry = pte_mkuffd_wp(entry);
4813 	/* copy-on-write page */
4814 	if (write && !(vma->vm_flags & VM_SHARED)) {
4815 		VM_BUG_ON_FOLIO(nr != 1, folio);
4816 		folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
4817 		folio_add_lru_vma(folio, vma);
4818 	} else {
4819 		folio_add_file_rmap_ptes(folio, page, nr, vma);
4820 	}
4821 	set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr);
4822 
4823 	/* no need to invalidate: a not-present page won't be cached */
4824 	update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr);
4825 }
4826 
4827 static bool vmf_pte_changed(struct vm_fault *vmf)
4828 {
4829 	if (vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)
4830 		return !pte_same(ptep_get(vmf->pte), vmf->orig_pte);
4831 
4832 	return !pte_none(ptep_get(vmf->pte));
4833 }
4834 
4835 /**
4836  * finish_fault - finish page fault once we have prepared the page to fault
4837  *
4838  * @vmf: structure describing the fault
4839  *
4840  * This function handles all that is needed to finish a page fault once the
4841  * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
4842  * given page, adds reverse page mapping, handles memcg charges and LRU
4843  * addition.
4844  *
4845  * The function expects the page to be locked and on success it consumes a
4846  * reference of a page being mapped (for the PTE which maps it).
4847  *
4848  * Return: %0 on success, %VM_FAULT_ code in case of error.
4849  */
4850 vm_fault_t finish_fault(struct vm_fault *vmf)
4851 {
4852 	struct vm_area_struct *vma = vmf->vma;
4853 	struct page *page;
4854 	struct folio *folio;
4855 	vm_fault_t ret;
4856 	bool is_cow = (vmf->flags & FAULT_FLAG_WRITE) &&
4857 		      !(vma->vm_flags & VM_SHARED);
4858 	int type, nr_pages;
4859 	unsigned long addr = vmf->address;
4860 
4861 	/* Did we COW the page? */
4862 	if (is_cow)
4863 		page = vmf->cow_page;
4864 	else
4865 		page = vmf->page;
4866 
4867 	/*
4868 	 * check even for read faults because we might have lost our CoWed
4869 	 * page
4870 	 */
4871 	if (!(vma->vm_flags & VM_SHARED)) {
4872 		ret = check_stable_address_space(vma->vm_mm);
4873 		if (ret)
4874 			return ret;
4875 	}
4876 
4877 	if (pmd_none(*vmf->pmd)) {
4878 		if (PageTransCompound(page)) {
4879 			ret = do_set_pmd(vmf, page);
4880 			if (ret != VM_FAULT_FALLBACK)
4881 				return ret;
4882 		}
4883 
4884 		if (vmf->prealloc_pte)
4885 			pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte);
4886 		else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd)))
4887 			return VM_FAULT_OOM;
4888 	}
4889 
4890 	folio = page_folio(page);
4891 	nr_pages = folio_nr_pages(folio);
4892 
4893 	/*
4894 	 * Using per-page fault to maintain the uffd semantics, and same
4895 	 * approach also applies to non-anonymous-shmem faults to avoid
4896 	 * inflating the RSS of the process.
4897 	 */
4898 	if (!vma_is_anon_shmem(vma) || unlikely(userfaultfd_armed(vma))) {
4899 		nr_pages = 1;
4900 	} else if (nr_pages > 1) {
4901 		pgoff_t idx = folio_page_idx(folio, page);
4902 		/* The page offset of vmf->address within the VMA. */
4903 		pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff;
4904 		/* The index of the entry in the pagetable for fault page. */
4905 		pgoff_t pte_off = pte_index(vmf->address);
4906 
4907 		/*
4908 		 * Fallback to per-page fault in case the folio size in page
4909 		 * cache beyond the VMA limits and PMD pagetable limits.
4910 		 */
4911 		if (unlikely(vma_off < idx ||
4912 			    vma_off + (nr_pages - idx) > vma_pages(vma) ||
4913 			    pte_off < idx ||
4914 			    pte_off + (nr_pages - idx)  > PTRS_PER_PTE)) {
4915 			nr_pages = 1;
4916 		} else {
4917 			/* Now we can set mappings for the whole large folio. */
4918 			addr = vmf->address - idx * PAGE_SIZE;
4919 			page = &folio->page;
4920 		}
4921 	}
4922 
4923 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4924 				       addr, &vmf->ptl);
4925 	if (!vmf->pte)
4926 		return VM_FAULT_NOPAGE;
4927 
4928 	/* Re-check under ptl */
4929 	if (nr_pages == 1 && unlikely(vmf_pte_changed(vmf))) {
4930 		update_mmu_tlb(vma, addr, vmf->pte);
4931 		ret = VM_FAULT_NOPAGE;
4932 		goto unlock;
4933 	} else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) {
4934 		update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages);
4935 		ret = VM_FAULT_NOPAGE;
4936 		goto unlock;
4937 	}
4938 
4939 	folio_ref_add(folio, nr_pages - 1);
4940 	set_pte_range(vmf, folio, page, nr_pages, addr);
4941 	type = is_cow ? MM_ANONPAGES : mm_counter_file(folio);
4942 	add_mm_counter(vma->vm_mm, type, nr_pages);
4943 	ret = 0;
4944 
4945 unlock:
4946 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4947 	return ret;
4948 }
4949 
4950 static unsigned long fault_around_pages __read_mostly =
4951 	65536 >> PAGE_SHIFT;
4952 
4953 #ifdef CONFIG_DEBUG_FS
4954 static int fault_around_bytes_get(void *data, u64 *val)
4955 {
4956 	*val = fault_around_pages << PAGE_SHIFT;
4957 	return 0;
4958 }
4959 
4960 /*
4961  * fault_around_bytes must be rounded down to the nearest page order as it's
4962  * what do_fault_around() expects to see.
4963  */
4964 static int fault_around_bytes_set(void *data, u64 val)
4965 {
4966 	if (val / PAGE_SIZE > PTRS_PER_PTE)
4967 		return -EINVAL;
4968 
4969 	/*
4970 	 * The minimum value is 1 page, however this results in no fault-around
4971 	 * at all. See should_fault_around().
4972 	 */
4973 	val = max(val, PAGE_SIZE);
4974 	fault_around_pages = rounddown_pow_of_two(val) >> PAGE_SHIFT;
4975 
4976 	return 0;
4977 }
4978 DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
4979 		fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
4980 
4981 static int __init fault_around_debugfs(void)
4982 {
4983 	debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
4984 				   &fault_around_bytes_fops);
4985 	return 0;
4986 }
4987 late_initcall(fault_around_debugfs);
4988 #endif
4989 
4990 /*
4991  * do_fault_around() tries to map few pages around the fault address. The hope
4992  * is that the pages will be needed soon and this will lower the number of
4993  * faults to handle.
4994  *
4995  * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
4996  * not ready to be mapped: not up-to-date, locked, etc.
4997  *
4998  * This function doesn't cross VMA or page table boundaries, in order to call
4999  * map_pages() and acquire a PTE lock only once.
5000  *
5001  * fault_around_pages defines how many pages we'll try to map.
5002  * do_fault_around() expects it to be set to a power of two less than or equal
5003  * to PTRS_PER_PTE.
5004  *
5005  * The virtual address of the area that we map is naturally aligned to
5006  * fault_around_pages * PAGE_SIZE rounded down to the machine page size
5007  * (and therefore to page order).  This way it's easier to guarantee
5008  * that we don't cross page table boundaries.
5009  */
5010 static vm_fault_t do_fault_around(struct vm_fault *vmf)
5011 {
5012 	pgoff_t nr_pages = READ_ONCE(fault_around_pages);
5013 	pgoff_t pte_off = pte_index(vmf->address);
5014 	/* The page offset of vmf->address within the VMA. */
5015 	pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff;
5016 	pgoff_t from_pte, to_pte;
5017 	vm_fault_t ret;
5018 
5019 	/* The PTE offset of the start address, clamped to the VMA. */
5020 	from_pte = max(ALIGN_DOWN(pte_off, nr_pages),
5021 		       pte_off - min(pte_off, vma_off));
5022 
5023 	/* The PTE offset of the end address, clamped to the VMA and PTE. */
5024 	to_pte = min3(from_pte + nr_pages, (pgoff_t)PTRS_PER_PTE,
5025 		      pte_off + vma_pages(vmf->vma) - vma_off) - 1;
5026 
5027 	if (pmd_none(*vmf->pmd)) {
5028 		vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
5029 		if (!vmf->prealloc_pte)
5030 			return VM_FAULT_OOM;
5031 	}
5032 
5033 	rcu_read_lock();
5034 	ret = vmf->vma->vm_ops->map_pages(vmf,
5035 			vmf->pgoff + from_pte - pte_off,
5036 			vmf->pgoff + to_pte - pte_off);
5037 	rcu_read_unlock();
5038 
5039 	return ret;
5040 }
5041 
5042 /* Return true if we should do read fault-around, false otherwise */
5043 static inline bool should_fault_around(struct vm_fault *vmf)
5044 {
5045 	/* No ->map_pages?  No way to fault around... */
5046 	if (!vmf->vma->vm_ops->map_pages)
5047 		return false;
5048 
5049 	if (uffd_disable_fault_around(vmf->vma))
5050 		return false;
5051 
5052 	/* A single page implies no faulting 'around' at all. */
5053 	return fault_around_pages > 1;
5054 }
5055 
5056 static vm_fault_t do_read_fault(struct vm_fault *vmf)
5057 {
5058 	vm_fault_t ret = 0;
5059 	struct folio *folio;
5060 
5061 	/*
5062 	 * Let's call ->map_pages() first and use ->fault() as fallback
5063 	 * if page by the offset is not ready to be mapped (cold cache or
5064 	 * something).
5065 	 */
5066 	if (should_fault_around(vmf)) {
5067 		ret = do_fault_around(vmf);
5068 		if (ret)
5069 			return ret;
5070 	}
5071 
5072 	ret = vmf_can_call_fault(vmf);
5073 	if (ret)
5074 		return ret;
5075 
5076 	ret = __do_fault(vmf);
5077 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5078 		return ret;
5079 
5080 	ret |= finish_fault(vmf);
5081 	folio = page_folio(vmf->page);
5082 	folio_unlock(folio);
5083 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5084 		folio_put(folio);
5085 	return ret;
5086 }
5087 
5088 static vm_fault_t do_cow_fault(struct vm_fault *vmf)
5089 {
5090 	struct vm_area_struct *vma = vmf->vma;
5091 	struct folio *folio;
5092 	vm_fault_t ret;
5093 
5094 	ret = vmf_can_call_fault(vmf);
5095 	if (!ret)
5096 		ret = vmf_anon_prepare(vmf);
5097 	if (ret)
5098 		return ret;
5099 
5100 	folio = folio_prealloc(vma->vm_mm, vma, vmf->address, false);
5101 	if (!folio)
5102 		return VM_FAULT_OOM;
5103 
5104 	vmf->cow_page = &folio->page;
5105 
5106 	ret = __do_fault(vmf);
5107 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5108 		goto uncharge_out;
5109 	if (ret & VM_FAULT_DONE_COW)
5110 		return ret;
5111 
5112 	copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
5113 	__folio_mark_uptodate(folio);
5114 
5115 	ret |= finish_fault(vmf);
5116 	unlock_page(vmf->page);
5117 	put_page(vmf->page);
5118 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5119 		goto uncharge_out;
5120 	return ret;
5121 uncharge_out:
5122 	folio_put(folio);
5123 	return ret;
5124 }
5125 
5126 static vm_fault_t do_shared_fault(struct vm_fault *vmf)
5127 {
5128 	struct vm_area_struct *vma = vmf->vma;
5129 	vm_fault_t ret, tmp;
5130 	struct folio *folio;
5131 
5132 	ret = vmf_can_call_fault(vmf);
5133 	if (ret)
5134 		return ret;
5135 
5136 	ret = __do_fault(vmf);
5137 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5138 		return ret;
5139 
5140 	folio = page_folio(vmf->page);
5141 
5142 	/*
5143 	 * Check if the backing address space wants to know that the page is
5144 	 * about to become writable
5145 	 */
5146 	if (vma->vm_ops->page_mkwrite) {
5147 		folio_unlock(folio);
5148 		tmp = do_page_mkwrite(vmf, folio);
5149 		if (unlikely(!tmp ||
5150 				(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
5151 			folio_put(folio);
5152 			return tmp;
5153 		}
5154 	}
5155 
5156 	ret |= finish_fault(vmf);
5157 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
5158 					VM_FAULT_RETRY))) {
5159 		folio_unlock(folio);
5160 		folio_put(folio);
5161 		return ret;
5162 	}
5163 
5164 	ret |= fault_dirty_shared_page(vmf);
5165 	return ret;
5166 }
5167 
5168 /*
5169  * We enter with non-exclusive mmap_lock (to exclude vma changes,
5170  * but allow concurrent faults).
5171  * The mmap_lock may have been released depending on flags and our
5172  * return value.  See filemap_fault() and __folio_lock_or_retry().
5173  * If mmap_lock is released, vma may become invalid (for example
5174  * by other thread calling munmap()).
5175  */
5176 static vm_fault_t do_fault(struct vm_fault *vmf)
5177 {
5178 	struct vm_area_struct *vma = vmf->vma;
5179 	struct mm_struct *vm_mm = vma->vm_mm;
5180 	vm_fault_t ret;
5181 
5182 	/*
5183 	 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
5184 	 */
5185 	if (!vma->vm_ops->fault) {
5186 		vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
5187 					       vmf->address, &vmf->ptl);
5188 		if (unlikely(!vmf->pte))
5189 			ret = VM_FAULT_SIGBUS;
5190 		else {
5191 			/*
5192 			 * Make sure this is not a temporary clearing of pte
5193 			 * by holding ptl and checking again. A R/M/W update
5194 			 * of pte involves: take ptl, clearing the pte so that
5195 			 * we don't have concurrent modification by hardware
5196 			 * followed by an update.
5197 			 */
5198 			if (unlikely(pte_none(ptep_get(vmf->pte))))
5199 				ret = VM_FAULT_SIGBUS;
5200 			else
5201 				ret = VM_FAULT_NOPAGE;
5202 
5203 			pte_unmap_unlock(vmf->pte, vmf->ptl);
5204 		}
5205 	} else if (!(vmf->flags & FAULT_FLAG_WRITE))
5206 		ret = do_read_fault(vmf);
5207 	else if (!(vma->vm_flags & VM_SHARED))
5208 		ret = do_cow_fault(vmf);
5209 	else
5210 		ret = do_shared_fault(vmf);
5211 
5212 	/* preallocated pagetable is unused: free it */
5213 	if (vmf->prealloc_pte) {
5214 		pte_free(vm_mm, vmf->prealloc_pte);
5215 		vmf->prealloc_pte = NULL;
5216 	}
5217 	return ret;
5218 }
5219 
5220 int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
5221 		      unsigned long addr, int page_nid, int *flags)
5222 {
5223 	struct vm_area_struct *vma = vmf->vma;
5224 
5225 	/* Record the current PID acceesing VMA */
5226 	vma_set_access_pid_bit(vma);
5227 
5228 	count_vm_numa_event(NUMA_HINT_FAULTS);
5229 	if (page_nid == numa_node_id()) {
5230 		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
5231 		*flags |= TNF_FAULT_LOCAL;
5232 	}
5233 
5234 	return mpol_misplaced(folio, vmf, addr);
5235 }
5236 
5237 static void numa_rebuild_single_mapping(struct vm_fault *vmf, struct vm_area_struct *vma,
5238 					unsigned long fault_addr, pte_t *fault_pte,
5239 					bool writable)
5240 {
5241 	pte_t pte, old_pte;
5242 
5243 	old_pte = ptep_modify_prot_start(vma, fault_addr, fault_pte);
5244 	pte = pte_modify(old_pte, vma->vm_page_prot);
5245 	pte = pte_mkyoung(pte);
5246 	if (writable)
5247 		pte = pte_mkwrite(pte, vma);
5248 	ptep_modify_prot_commit(vma, fault_addr, fault_pte, old_pte, pte);
5249 	update_mmu_cache_range(vmf, vma, fault_addr, fault_pte, 1);
5250 }
5251 
5252 static void numa_rebuild_large_mapping(struct vm_fault *vmf, struct vm_area_struct *vma,
5253 				       struct folio *folio, pte_t fault_pte,
5254 				       bool ignore_writable, bool pte_write_upgrade)
5255 {
5256 	int nr = pte_pfn(fault_pte) - folio_pfn(folio);
5257 	unsigned long start, end, addr = vmf->address;
5258 	unsigned long addr_start = addr - (nr << PAGE_SHIFT);
5259 	unsigned long pt_start = ALIGN_DOWN(addr, PMD_SIZE);
5260 	pte_t *start_ptep;
5261 
5262 	/* Stay within the VMA and within the page table. */
5263 	start = max3(addr_start, pt_start, vma->vm_start);
5264 	end = min3(addr_start + folio_size(folio), pt_start + PMD_SIZE,
5265 		   vma->vm_end);
5266 	start_ptep = vmf->pte - ((addr - start) >> PAGE_SHIFT);
5267 
5268 	/* Restore all PTEs' mapping of the large folio */
5269 	for (addr = start; addr != end; start_ptep++, addr += PAGE_SIZE) {
5270 		pte_t ptent = ptep_get(start_ptep);
5271 		bool writable = false;
5272 
5273 		if (!pte_present(ptent) || !pte_protnone(ptent))
5274 			continue;
5275 
5276 		if (pfn_folio(pte_pfn(ptent)) != folio)
5277 			continue;
5278 
5279 		if (!ignore_writable) {
5280 			ptent = pte_modify(ptent, vma->vm_page_prot);
5281 			writable = pte_write(ptent);
5282 			if (!writable && pte_write_upgrade &&
5283 			    can_change_pte_writable(vma, addr, ptent))
5284 				writable = true;
5285 		}
5286 
5287 		numa_rebuild_single_mapping(vmf, vma, addr, start_ptep, writable);
5288 	}
5289 }
5290 
5291 static vm_fault_t do_numa_page(struct vm_fault *vmf)
5292 {
5293 	struct vm_area_struct *vma = vmf->vma;
5294 	struct folio *folio = NULL;
5295 	int nid = NUMA_NO_NODE;
5296 	bool writable = false, ignore_writable = false;
5297 	bool pte_write_upgrade = vma_wants_manual_pte_write_upgrade(vma);
5298 	int last_cpupid;
5299 	int target_nid;
5300 	pte_t pte, old_pte;
5301 	int flags = 0, nr_pages;
5302 
5303 	/*
5304 	 * The pte cannot be used safely until we verify, while holding the page
5305 	 * table lock, that its contents have not changed during fault handling.
5306 	 */
5307 	spin_lock(vmf->ptl);
5308 	/* Read the live PTE from the page tables: */
5309 	old_pte = ptep_get(vmf->pte);
5310 
5311 	if (unlikely(!pte_same(old_pte, vmf->orig_pte))) {
5312 		pte_unmap_unlock(vmf->pte, vmf->ptl);
5313 		return 0;
5314 	}
5315 
5316 	pte = pte_modify(old_pte, vma->vm_page_prot);
5317 
5318 	/*
5319 	 * Detect now whether the PTE could be writable; this information
5320 	 * is only valid while holding the PT lock.
5321 	 */
5322 	writable = pte_write(pte);
5323 	if (!writable && pte_write_upgrade &&
5324 	    can_change_pte_writable(vma, vmf->address, pte))
5325 		writable = true;
5326 
5327 	folio = vm_normal_folio(vma, vmf->address, pte);
5328 	if (!folio || folio_is_zone_device(folio))
5329 		goto out_map;
5330 
5331 	/*
5332 	 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
5333 	 * much anyway since they can be in shared cache state. This misses
5334 	 * the case where a mapping is writable but the process never writes
5335 	 * to it but pte_write gets cleared during protection updates and
5336 	 * pte_dirty has unpredictable behaviour between PTE scan updates,
5337 	 * background writeback, dirty balancing and application behaviour.
5338 	 */
5339 	if (!writable)
5340 		flags |= TNF_NO_GROUP;
5341 
5342 	/*
5343 	 * Flag if the folio is shared between multiple address spaces. This
5344 	 * is later used when determining whether to group tasks together
5345 	 */
5346 	if (folio_likely_mapped_shared(folio) && (vma->vm_flags & VM_SHARED))
5347 		flags |= TNF_SHARED;
5348 
5349 	nid = folio_nid(folio);
5350 	nr_pages = folio_nr_pages(folio);
5351 	/*
5352 	 * For memory tiering mode, cpupid of slow memory page is used
5353 	 * to record page access time.  So use default value.
5354 	 */
5355 	if ((sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
5356 	    !node_is_toptier(nid))
5357 		last_cpupid = (-1 & LAST_CPUPID_MASK);
5358 	else
5359 		last_cpupid = folio_last_cpupid(folio);
5360 	target_nid = numa_migrate_prep(folio, vmf, vmf->address, nid, &flags);
5361 	if (target_nid == NUMA_NO_NODE)
5362 		goto out_map;
5363 	if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
5364 		flags |= TNF_MIGRATE_FAIL;
5365 		goto out_map;
5366 	}
5367 	/* The folio is isolated and isolation code holds a folio reference. */
5368 	pte_unmap_unlock(vmf->pte, vmf->ptl);
5369 	writable = false;
5370 	ignore_writable = true;
5371 
5372 	/* Migrate to the requested node */
5373 	if (!migrate_misplaced_folio(folio, vma, target_nid)) {
5374 		nid = target_nid;
5375 		flags |= TNF_MIGRATED;
5376 		task_numa_fault(last_cpupid, nid, nr_pages, flags);
5377 		return 0;
5378 	}
5379 
5380 	flags |= TNF_MIGRATE_FAIL;
5381 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
5382 				       vmf->address, &vmf->ptl);
5383 	if (unlikely(!vmf->pte))
5384 		return 0;
5385 	if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
5386 		pte_unmap_unlock(vmf->pte, vmf->ptl);
5387 		return 0;
5388 	}
5389 out_map:
5390 	/*
5391 	 * Make it present again, depending on how arch implements
5392 	 * non-accessible ptes, some can allow access by kernel mode.
5393 	 */
5394 	if (folio && folio_test_large(folio))
5395 		numa_rebuild_large_mapping(vmf, vma, folio, pte, ignore_writable,
5396 					   pte_write_upgrade);
5397 	else
5398 		numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte,
5399 					    writable);
5400 	pte_unmap_unlock(vmf->pte, vmf->ptl);
5401 
5402 	if (nid != NUMA_NO_NODE)
5403 		task_numa_fault(last_cpupid, nid, nr_pages, flags);
5404 	return 0;
5405 }
5406 
5407 static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
5408 {
5409 	struct vm_area_struct *vma = vmf->vma;
5410 	if (vma_is_anonymous(vma))
5411 		return do_huge_pmd_anonymous_page(vmf);
5412 	if (vma->vm_ops->huge_fault)
5413 		return vma->vm_ops->huge_fault(vmf, PMD_ORDER);
5414 	return VM_FAULT_FALLBACK;
5415 }
5416 
5417 /* `inline' is required to avoid gcc 4.1.2 build error */
5418 static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf)
5419 {
5420 	struct vm_area_struct *vma = vmf->vma;
5421 	const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
5422 	vm_fault_t ret;
5423 
5424 	if (vma_is_anonymous(vma)) {
5425 		if (likely(!unshare) &&
5426 		    userfaultfd_huge_pmd_wp(vma, vmf->orig_pmd)) {
5427 			if (userfaultfd_wp_async(vmf->vma))
5428 				goto split;
5429 			return handle_userfault(vmf, VM_UFFD_WP);
5430 		}
5431 		return do_huge_pmd_wp_page(vmf);
5432 	}
5433 
5434 	if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
5435 		if (vma->vm_ops->huge_fault) {
5436 			ret = vma->vm_ops->huge_fault(vmf, PMD_ORDER);
5437 			if (!(ret & VM_FAULT_FALLBACK))
5438 				return ret;
5439 		}
5440 	}
5441 
5442 split:
5443 	/* COW or write-notify handled on pte level: split pmd. */
5444 	__split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
5445 
5446 	return VM_FAULT_FALLBACK;
5447 }
5448 
5449 static vm_fault_t create_huge_pud(struct vm_fault *vmf)
5450 {
5451 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&			\
5452 	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
5453 	struct vm_area_struct *vma = vmf->vma;
5454 	/* No support for anonymous transparent PUD pages yet */
5455 	if (vma_is_anonymous(vma))
5456 		return VM_FAULT_FALLBACK;
5457 	if (vma->vm_ops->huge_fault)
5458 		return vma->vm_ops->huge_fault(vmf, PUD_ORDER);
5459 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
5460 	return VM_FAULT_FALLBACK;
5461 }
5462 
5463 static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
5464 {
5465 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&			\
5466 	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
5467 	struct vm_area_struct *vma = vmf->vma;
5468 	vm_fault_t ret;
5469 
5470 	/* No support for anonymous transparent PUD pages yet */
5471 	if (vma_is_anonymous(vma))
5472 		goto split;
5473 	if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
5474 		if (vma->vm_ops->huge_fault) {
5475 			ret = vma->vm_ops->huge_fault(vmf, PUD_ORDER);
5476 			if (!(ret & VM_FAULT_FALLBACK))
5477 				return ret;
5478 		}
5479 	}
5480 split:
5481 	/* COW or write-notify not handled on PUD level: split pud.*/
5482 	__split_huge_pud(vma, vmf->pud, vmf->address);
5483 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
5484 	return VM_FAULT_FALLBACK;
5485 }
5486 
5487 /*
5488  * These routines also need to handle stuff like marking pages dirty
5489  * and/or accessed for architectures that don't do it in hardware (most
5490  * RISC architectures).  The early dirtying is also good on the i386.
5491  *
5492  * There is also a hook called "update_mmu_cache()" that architectures
5493  * with external mmu caches can use to update those (ie the Sparc or
5494  * PowerPC hashed page tables that act as extended TLBs).
5495  *
5496  * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow
5497  * concurrent faults).
5498  *
5499  * The mmap_lock may have been released depending on flags and our return value.
5500  * See filemap_fault() and __folio_lock_or_retry().
5501  */
5502 static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
5503 {
5504 	pte_t entry;
5505 
5506 	if (unlikely(pmd_none(*vmf->pmd))) {
5507 		/*
5508 		 * Leave __pte_alloc() until later: because vm_ops->fault may
5509 		 * want to allocate huge page, and if we expose page table
5510 		 * for an instant, it will be difficult to retract from
5511 		 * concurrent faults and from rmap lookups.
5512 		 */
5513 		vmf->pte = NULL;
5514 		vmf->flags &= ~FAULT_FLAG_ORIG_PTE_VALID;
5515 	} else {
5516 		/*
5517 		 * A regular pmd is established and it can't morph into a huge
5518 		 * pmd by anon khugepaged, since that takes mmap_lock in write
5519 		 * mode; but shmem or file collapse to THP could still morph
5520 		 * it into a huge pmd: just retry later if so.
5521 		 */
5522 		vmf->pte = pte_offset_map_nolock(vmf->vma->vm_mm, vmf->pmd,
5523 						 vmf->address, &vmf->ptl);
5524 		if (unlikely(!vmf->pte))
5525 			return 0;
5526 		vmf->orig_pte = ptep_get_lockless(vmf->pte);
5527 		vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID;
5528 
5529 		if (pte_none(vmf->orig_pte)) {
5530 			pte_unmap(vmf->pte);
5531 			vmf->pte = NULL;
5532 		}
5533 	}
5534 
5535 	if (!vmf->pte)
5536 		return do_pte_missing(vmf);
5537 
5538 	if (!pte_present(vmf->orig_pte))
5539 		return do_swap_page(vmf);
5540 
5541 	if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
5542 		return do_numa_page(vmf);
5543 
5544 	spin_lock(vmf->ptl);
5545 	entry = vmf->orig_pte;
5546 	if (unlikely(!pte_same(ptep_get(vmf->pte), entry))) {
5547 		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
5548 		goto unlock;
5549 	}
5550 	if (vmf->flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
5551 		if (!pte_write(entry))
5552 			return do_wp_page(vmf);
5553 		else if (likely(vmf->flags & FAULT_FLAG_WRITE))
5554 			entry = pte_mkdirty(entry);
5555 	}
5556 	entry = pte_mkyoung(entry);
5557 	if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
5558 				vmf->flags & FAULT_FLAG_WRITE)) {
5559 		update_mmu_cache_range(vmf, vmf->vma, vmf->address,
5560 				vmf->pte, 1);
5561 	} else {
5562 		/* Skip spurious TLB flush for retried page fault */
5563 		if (vmf->flags & FAULT_FLAG_TRIED)
5564 			goto unlock;
5565 		/*
5566 		 * This is needed only for protection faults but the arch code
5567 		 * is not yet telling us if this is a protection fault or not.
5568 		 * This still avoids useless tlb flushes for .text page faults
5569 		 * with threads.
5570 		 */
5571 		if (vmf->flags & FAULT_FLAG_WRITE)
5572 			flush_tlb_fix_spurious_fault(vmf->vma, vmf->address,
5573 						     vmf->pte);
5574 	}
5575 unlock:
5576 	pte_unmap_unlock(vmf->pte, vmf->ptl);
5577 	return 0;
5578 }
5579 
5580 /*
5581  * On entry, we hold either the VMA lock or the mmap_lock
5582  * (FAULT_FLAG_VMA_LOCK tells you which).  If VM_FAULT_RETRY is set in
5583  * the result, the mmap_lock is not held on exit.  See filemap_fault()
5584  * and __folio_lock_or_retry().
5585  */
5586 static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
5587 		unsigned long address, unsigned int flags)
5588 {
5589 	struct vm_fault vmf = {
5590 		.vma = vma,
5591 		.address = address & PAGE_MASK,
5592 		.real_address = address,
5593 		.flags = flags,
5594 		.pgoff = linear_page_index(vma, address),
5595 		.gfp_mask = __get_fault_gfp_mask(vma),
5596 	};
5597 	struct mm_struct *mm = vma->vm_mm;
5598 	unsigned long vm_flags = vma->vm_flags;
5599 	pgd_t *pgd;
5600 	p4d_t *p4d;
5601 	vm_fault_t ret;
5602 
5603 	pgd = pgd_offset(mm, address);
5604 	p4d = p4d_alloc(mm, pgd, address);
5605 	if (!p4d)
5606 		return VM_FAULT_OOM;
5607 
5608 	vmf.pud = pud_alloc(mm, p4d, address);
5609 	if (!vmf.pud)
5610 		return VM_FAULT_OOM;
5611 retry_pud:
5612 	if (pud_none(*vmf.pud) &&
5613 	    thp_vma_allowable_order(vma, vm_flags,
5614 				TVA_IN_PF | TVA_ENFORCE_SYSFS, PUD_ORDER)) {
5615 		ret = create_huge_pud(&vmf);
5616 		if (!(ret & VM_FAULT_FALLBACK))
5617 			return ret;
5618 	} else {
5619 		pud_t orig_pud = *vmf.pud;
5620 
5621 		barrier();
5622 		if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
5623 
5624 			/*
5625 			 * TODO once we support anonymous PUDs: NUMA case and
5626 			 * FAULT_FLAG_UNSHARE handling.
5627 			 */
5628 			if ((flags & FAULT_FLAG_WRITE) && !pud_write(orig_pud)) {
5629 				ret = wp_huge_pud(&vmf, orig_pud);
5630 				if (!(ret & VM_FAULT_FALLBACK))
5631 					return ret;
5632 			} else {
5633 				huge_pud_set_accessed(&vmf, orig_pud);
5634 				return 0;
5635 			}
5636 		}
5637 	}
5638 
5639 	vmf.pmd = pmd_alloc(mm, vmf.pud, address);
5640 	if (!vmf.pmd)
5641 		return VM_FAULT_OOM;
5642 
5643 	/* Huge pud page fault raced with pmd_alloc? */
5644 	if (pud_trans_unstable(vmf.pud))
5645 		goto retry_pud;
5646 
5647 	if (pmd_none(*vmf.pmd) &&
5648 	    thp_vma_allowable_order(vma, vm_flags,
5649 				TVA_IN_PF | TVA_ENFORCE_SYSFS, PMD_ORDER)) {
5650 		ret = create_huge_pmd(&vmf);
5651 		if (!(ret & VM_FAULT_FALLBACK))
5652 			return ret;
5653 	} else {
5654 		vmf.orig_pmd = pmdp_get_lockless(vmf.pmd);
5655 
5656 		if (unlikely(is_swap_pmd(vmf.orig_pmd))) {
5657 			VM_BUG_ON(thp_migration_supported() &&
5658 					  !is_pmd_migration_entry(vmf.orig_pmd));
5659 			if (is_pmd_migration_entry(vmf.orig_pmd))
5660 				pmd_migration_entry_wait(mm, vmf.pmd);
5661 			return 0;
5662 		}
5663 		if (pmd_trans_huge(vmf.orig_pmd) || pmd_devmap(vmf.orig_pmd)) {
5664 			if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma))
5665 				return do_huge_pmd_numa_page(&vmf);
5666 
5667 			if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
5668 			    !pmd_write(vmf.orig_pmd)) {
5669 				ret = wp_huge_pmd(&vmf);
5670 				if (!(ret & VM_FAULT_FALLBACK))
5671 					return ret;
5672 			} else {
5673 				huge_pmd_set_accessed(&vmf);
5674 				return 0;
5675 			}
5676 		}
5677 	}
5678 
5679 	return handle_pte_fault(&vmf);
5680 }
5681 
5682 /**
5683  * mm_account_fault - Do page fault accounting
5684  * @mm: mm from which memcg should be extracted. It can be NULL.
5685  * @regs: the pt_regs struct pointer.  When set to NULL, will skip accounting
5686  *        of perf event counters, but we'll still do the per-task accounting to
5687  *        the task who triggered this page fault.
5688  * @address: the faulted address.
5689  * @flags: the fault flags.
5690  * @ret: the fault retcode.
5691  *
5692  * This will take care of most of the page fault accounting.  Meanwhile, it
5693  * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter
5694  * updates.  However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should
5695  * still be in per-arch page fault handlers at the entry of page fault.
5696  */
5697 static inline void mm_account_fault(struct mm_struct *mm, struct pt_regs *regs,
5698 				    unsigned long address, unsigned int flags,
5699 				    vm_fault_t ret)
5700 {
5701 	bool major;
5702 
5703 	/* Incomplete faults will be accounted upon completion. */
5704 	if (ret & VM_FAULT_RETRY)
5705 		return;
5706 
5707 	/*
5708 	 * To preserve the behavior of older kernels, PGFAULT counters record
5709 	 * both successful and failed faults, as opposed to perf counters,
5710 	 * which ignore failed cases.
5711 	 */
5712 	count_vm_event(PGFAULT);
5713 	count_memcg_event_mm(mm, PGFAULT);
5714 
5715 	/*
5716 	 * Do not account for unsuccessful faults (e.g. when the address wasn't
5717 	 * valid).  That includes arch_vma_access_permitted() failing before
5718 	 * reaching here. So this is not a "this many hardware page faults"
5719 	 * counter.  We should use the hw profiling for that.
5720 	 */
5721 	if (ret & VM_FAULT_ERROR)
5722 		return;
5723 
5724 	/*
5725 	 * We define the fault as a major fault when the final successful fault
5726 	 * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't
5727 	 * handle it immediately previously).
5728 	 */
5729 	major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED);
5730 
5731 	if (major)
5732 		current->maj_flt++;
5733 	else
5734 		current->min_flt++;
5735 
5736 	/*
5737 	 * If the fault is done for GUP, regs will be NULL.  We only do the
5738 	 * accounting for the per thread fault counters who triggered the
5739 	 * fault, and we skip the perf event updates.
5740 	 */
5741 	if (!regs)
5742 		return;
5743 
5744 	if (major)
5745 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
5746 	else
5747 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
5748 }
5749 
5750 #ifdef CONFIG_LRU_GEN
5751 static void lru_gen_enter_fault(struct vm_area_struct *vma)
5752 {
5753 	/* the LRU algorithm only applies to accesses with recency */
5754 	current->in_lru_fault = vma_has_recency(vma);
5755 }
5756 
5757 static void lru_gen_exit_fault(void)
5758 {
5759 	current->in_lru_fault = false;
5760 }
5761 #else
5762 static void lru_gen_enter_fault(struct vm_area_struct *vma)
5763 {
5764 }
5765 
5766 static void lru_gen_exit_fault(void)
5767 {
5768 }
5769 #endif /* CONFIG_LRU_GEN */
5770 
5771 static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma,
5772 				       unsigned int *flags)
5773 {
5774 	if (unlikely(*flags & FAULT_FLAG_UNSHARE)) {
5775 		if (WARN_ON_ONCE(*flags & FAULT_FLAG_WRITE))
5776 			return VM_FAULT_SIGSEGV;
5777 		/*
5778 		 * FAULT_FLAG_UNSHARE only applies to COW mappings. Let's
5779 		 * just treat it like an ordinary read-fault otherwise.
5780 		 */
5781 		if (!is_cow_mapping(vma->vm_flags))
5782 			*flags &= ~FAULT_FLAG_UNSHARE;
5783 	} else if (*flags & FAULT_FLAG_WRITE) {
5784 		/* Write faults on read-only mappings are impossible ... */
5785 		if (WARN_ON_ONCE(!(vma->vm_flags & VM_MAYWRITE)))
5786 			return VM_FAULT_SIGSEGV;
5787 		/* ... and FOLL_FORCE only applies to COW mappings. */
5788 		if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE) &&
5789 				 !is_cow_mapping(vma->vm_flags)))
5790 			return VM_FAULT_SIGSEGV;
5791 	}
5792 #ifdef CONFIG_PER_VMA_LOCK
5793 	/*
5794 	 * Per-VMA locks can't be used with FAULT_FLAG_RETRY_NOWAIT because of
5795 	 * the assumption that lock is dropped on VM_FAULT_RETRY.
5796 	 */
5797 	if (WARN_ON_ONCE((*flags &
5798 			(FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT)) ==
5799 			(FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT)))
5800 		return VM_FAULT_SIGSEGV;
5801 #endif
5802 
5803 	return 0;
5804 }
5805 
5806 /*
5807  * By the time we get here, we already hold the mm semaphore
5808  *
5809  * The mmap_lock may have been released depending on flags and our
5810  * return value.  See filemap_fault() and __folio_lock_or_retry().
5811  */
5812 vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
5813 			   unsigned int flags, struct pt_regs *regs)
5814 {
5815 	/* If the fault handler drops the mmap_lock, vma may be freed */
5816 	struct mm_struct *mm = vma->vm_mm;
5817 	vm_fault_t ret;
5818 	bool is_droppable;
5819 
5820 	__set_current_state(TASK_RUNNING);
5821 
5822 	ret = sanitize_fault_flags(vma, &flags);
5823 	if (ret)
5824 		goto out;
5825 
5826 	if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
5827 					    flags & FAULT_FLAG_INSTRUCTION,
5828 					    flags & FAULT_FLAG_REMOTE)) {
5829 		ret = VM_FAULT_SIGSEGV;
5830 		goto out;
5831 	}
5832 
5833 	is_droppable = !!(vma->vm_flags & VM_DROPPABLE);
5834 
5835 	/*
5836 	 * Enable the memcg OOM handling for faults triggered in user
5837 	 * space.  Kernel faults are handled more gracefully.
5838 	 */
5839 	if (flags & FAULT_FLAG_USER)
5840 		mem_cgroup_enter_user_fault();
5841 
5842 	lru_gen_enter_fault(vma);
5843 
5844 	if (unlikely(is_vm_hugetlb_page(vma)))
5845 		ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
5846 	else
5847 		ret = __handle_mm_fault(vma, address, flags);
5848 
5849 	/*
5850 	 * Warning: It is no longer safe to dereference vma-> after this point,
5851 	 * because mmap_lock might have been dropped by __handle_mm_fault(), so
5852 	 * vma might be destroyed from underneath us.
5853 	 */
5854 
5855 	lru_gen_exit_fault();
5856 
5857 	/* If the mapping is droppable, then errors due to OOM aren't fatal. */
5858 	if (is_droppable)
5859 		ret &= ~VM_FAULT_OOM;
5860 
5861 	if (flags & FAULT_FLAG_USER) {
5862 		mem_cgroup_exit_user_fault();
5863 		/*
5864 		 * The task may have entered a memcg OOM situation but
5865 		 * if the allocation error was handled gracefully (no
5866 		 * VM_FAULT_OOM), there is no need to kill anything.
5867 		 * Just clean up the OOM state peacefully.
5868 		 */
5869 		if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
5870 			mem_cgroup_oom_synchronize(false);
5871 	}
5872 out:
5873 	mm_account_fault(mm, regs, address, flags, ret);
5874 
5875 	return ret;
5876 }
5877 EXPORT_SYMBOL_GPL(handle_mm_fault);
5878 
5879 #ifdef CONFIG_LOCK_MM_AND_FIND_VMA
5880 #include <linux/extable.h>
5881 
5882 static inline bool get_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs)
5883 {
5884 	if (likely(mmap_read_trylock(mm)))
5885 		return true;
5886 
5887 	if (regs && !user_mode(regs)) {
5888 		unsigned long ip = exception_ip(regs);
5889 		if (!search_exception_tables(ip))
5890 			return false;
5891 	}
5892 
5893 	return !mmap_read_lock_killable(mm);
5894 }
5895 
5896 static inline bool mmap_upgrade_trylock(struct mm_struct *mm)
5897 {
5898 	/*
5899 	 * We don't have this operation yet.
5900 	 *
5901 	 * It should be easy enough to do: it's basically a
5902 	 *    atomic_long_try_cmpxchg_acquire()
5903 	 * from RWSEM_READER_BIAS -> RWSEM_WRITER_LOCKED, but
5904 	 * it also needs the proper lockdep magic etc.
5905 	 */
5906 	return false;
5907 }
5908 
5909 static inline bool upgrade_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs)
5910 {
5911 	mmap_read_unlock(mm);
5912 	if (regs && !user_mode(regs)) {
5913 		unsigned long ip = exception_ip(regs);
5914 		if (!search_exception_tables(ip))
5915 			return false;
5916 	}
5917 	return !mmap_write_lock_killable(mm);
5918 }
5919 
5920 /*
5921  * Helper for page fault handling.
5922  *
5923  * This is kind of equivalend to "mmap_read_lock()" followed
5924  * by "find_extend_vma()", except it's a lot more careful about
5925  * the locking (and will drop the lock on failure).
5926  *
5927  * For example, if we have a kernel bug that causes a page
5928  * fault, we don't want to just use mmap_read_lock() to get
5929  * the mm lock, because that would deadlock if the bug were
5930  * to happen while we're holding the mm lock for writing.
5931  *
5932  * So this checks the exception tables on kernel faults in
5933  * order to only do this all for instructions that are actually
5934  * expected to fault.
5935  *
5936  * We can also actually take the mm lock for writing if we
5937  * need to extend the vma, which helps the VM layer a lot.
5938  */
5939 struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
5940 			unsigned long addr, struct pt_regs *regs)
5941 {
5942 	struct vm_area_struct *vma;
5943 
5944 	if (!get_mmap_lock_carefully(mm, regs))
5945 		return NULL;
5946 
5947 	vma = find_vma(mm, addr);
5948 	if (likely(vma && (vma->vm_start <= addr)))
5949 		return vma;
5950 
5951 	/*
5952 	 * Well, dang. We might still be successful, but only
5953 	 * if we can extend a vma to do so.
5954 	 */
5955 	if (!vma || !(vma->vm_flags & VM_GROWSDOWN)) {
5956 		mmap_read_unlock(mm);
5957 		return NULL;
5958 	}
5959 
5960 	/*
5961 	 * We can try to upgrade the mmap lock atomically,
5962 	 * in which case we can continue to use the vma
5963 	 * we already looked up.
5964 	 *
5965 	 * Otherwise we'll have to drop the mmap lock and
5966 	 * re-take it, and also look up the vma again,
5967 	 * re-checking it.
5968 	 */
5969 	if (!mmap_upgrade_trylock(mm)) {
5970 		if (!upgrade_mmap_lock_carefully(mm, regs))
5971 			return NULL;
5972 
5973 		vma = find_vma(mm, addr);
5974 		if (!vma)
5975 			goto fail;
5976 		if (vma->vm_start <= addr)
5977 			goto success;
5978 		if (!(vma->vm_flags & VM_GROWSDOWN))
5979 			goto fail;
5980 	}
5981 
5982 	if (expand_stack_locked(vma, addr))
5983 		goto fail;
5984 
5985 success:
5986 	mmap_write_downgrade(mm);
5987 	return vma;
5988 
5989 fail:
5990 	mmap_write_unlock(mm);
5991 	return NULL;
5992 }
5993 #endif
5994 
5995 #ifdef CONFIG_PER_VMA_LOCK
5996 /*
5997  * Lookup and lock a VMA under RCU protection. Returned VMA is guaranteed to be
5998  * stable and not isolated. If the VMA is not found or is being modified the
5999  * function returns NULL.
6000  */
6001 struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
6002 					  unsigned long address)
6003 {
6004 	MA_STATE(mas, &mm->mm_mt, address, address);
6005 	struct vm_area_struct *vma;
6006 
6007 	rcu_read_lock();
6008 retry:
6009 	vma = mas_walk(&mas);
6010 	if (!vma)
6011 		goto inval;
6012 
6013 	if (!vma_start_read(vma))
6014 		goto inval;
6015 
6016 	/* Check since vm_start/vm_end might change before we lock the VMA */
6017 	if (unlikely(address < vma->vm_start || address >= vma->vm_end))
6018 		goto inval_end_read;
6019 
6020 	/* Check if the VMA got isolated after we found it */
6021 	if (vma->detached) {
6022 		vma_end_read(vma);
6023 		count_vm_vma_lock_event(VMA_LOCK_MISS);
6024 		/* The area was replaced with another one */
6025 		goto retry;
6026 	}
6027 
6028 	rcu_read_unlock();
6029 	return vma;
6030 
6031 inval_end_read:
6032 	vma_end_read(vma);
6033 inval:
6034 	rcu_read_unlock();
6035 	count_vm_vma_lock_event(VMA_LOCK_ABORT);
6036 	return NULL;
6037 }
6038 #endif /* CONFIG_PER_VMA_LOCK */
6039 
6040 #ifndef __PAGETABLE_P4D_FOLDED
6041 /*
6042  * Allocate p4d page table.
6043  * We've already handled the fast-path in-line.
6044  */
6045 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
6046 {
6047 	p4d_t *new = p4d_alloc_one(mm, address);
6048 	if (!new)
6049 		return -ENOMEM;
6050 
6051 	spin_lock(&mm->page_table_lock);
6052 	if (pgd_present(*pgd)) {	/* Another has populated it */
6053 		p4d_free(mm, new);
6054 	} else {
6055 		smp_wmb(); /* See comment in pmd_install() */
6056 		pgd_populate(mm, pgd, new);
6057 	}
6058 	spin_unlock(&mm->page_table_lock);
6059 	return 0;
6060 }
6061 #endif /* __PAGETABLE_P4D_FOLDED */
6062 
6063 #ifndef __PAGETABLE_PUD_FOLDED
6064 /*
6065  * Allocate page upper directory.
6066  * We've already handled the fast-path in-line.
6067  */
6068 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
6069 {
6070 	pud_t *new = pud_alloc_one(mm, address);
6071 	if (!new)
6072 		return -ENOMEM;
6073 
6074 	spin_lock(&mm->page_table_lock);
6075 	if (!p4d_present(*p4d)) {
6076 		mm_inc_nr_puds(mm);
6077 		smp_wmb(); /* See comment in pmd_install() */
6078 		p4d_populate(mm, p4d, new);
6079 	} else	/* Another has populated it */
6080 		pud_free(mm, new);
6081 	spin_unlock(&mm->page_table_lock);
6082 	return 0;
6083 }
6084 #endif /* __PAGETABLE_PUD_FOLDED */
6085 
6086 #ifndef __PAGETABLE_PMD_FOLDED
6087 /*
6088  * Allocate page middle directory.
6089  * We've already handled the fast-path in-line.
6090  */
6091 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
6092 {
6093 	spinlock_t *ptl;
6094 	pmd_t *new = pmd_alloc_one(mm, address);
6095 	if (!new)
6096 		return -ENOMEM;
6097 
6098 	ptl = pud_lock(mm, pud);
6099 	if (!pud_present(*pud)) {
6100 		mm_inc_nr_pmds(mm);
6101 		smp_wmb(); /* See comment in pmd_install() */
6102 		pud_populate(mm, pud, new);
6103 	} else {	/* Another has populated it */
6104 		pmd_free(mm, new);
6105 	}
6106 	spin_unlock(ptl);
6107 	return 0;
6108 }
6109 #endif /* __PAGETABLE_PMD_FOLDED */
6110 
6111 /**
6112  * follow_pte - look up PTE at a user virtual address
6113  * @vma: the memory mapping
6114  * @address: user virtual address
6115  * @ptepp: location to store found PTE
6116  * @ptlp: location to store the lock for the PTE
6117  *
6118  * On a successful return, the pointer to the PTE is stored in @ptepp;
6119  * the corresponding lock is taken and its location is stored in @ptlp.
6120  *
6121  * The contents of the PTE are only stable until @ptlp is released using
6122  * pte_unmap_unlock(). This function will fail if the PTE is non-present.
6123  * Present PTEs may include PTEs that map refcounted pages, such as
6124  * anonymous folios in COW mappings.
6125  *
6126  * Callers must be careful when relying on PTE content after
6127  * pte_unmap_unlock(). Especially if the PTE maps a refcounted page,
6128  * callers must protect against invalidation with MMU notifiers; otherwise
6129  * access to the PFN at a later point in time can trigger use-after-free.
6130  *
6131  * Only IO mappings and raw PFN mappings are allowed.  The mmap semaphore
6132  * should be taken for read.
6133  *
6134  * This function must not be used to modify PTE content.
6135  *
6136  * Return: zero on success, -ve otherwise.
6137  */
6138 int follow_pte(struct vm_area_struct *vma, unsigned long address,
6139 	       pte_t **ptepp, spinlock_t **ptlp)
6140 {
6141 	struct mm_struct *mm = vma->vm_mm;
6142 	pgd_t *pgd;
6143 	p4d_t *p4d;
6144 	pud_t *pud;
6145 	pmd_t *pmd;
6146 	pte_t *ptep;
6147 
6148 	mmap_assert_locked(mm);
6149 	if (unlikely(address < vma->vm_start || address >= vma->vm_end))
6150 		goto out;
6151 
6152 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
6153 		goto out;
6154 
6155 	pgd = pgd_offset(mm, address);
6156 	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
6157 		goto out;
6158 
6159 	p4d = p4d_offset(pgd, address);
6160 	if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
6161 		goto out;
6162 
6163 	pud = pud_offset(p4d, address);
6164 	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
6165 		goto out;
6166 
6167 	pmd = pmd_offset(pud, address);
6168 	VM_BUG_ON(pmd_trans_huge(*pmd));
6169 
6170 	ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
6171 	if (!ptep)
6172 		goto out;
6173 	if (!pte_present(ptep_get(ptep)))
6174 		goto unlock;
6175 	*ptepp = ptep;
6176 	return 0;
6177 unlock:
6178 	pte_unmap_unlock(ptep, *ptlp);
6179 out:
6180 	return -EINVAL;
6181 }
6182 EXPORT_SYMBOL_GPL(follow_pte);
6183 
6184 #ifdef CONFIG_HAVE_IOREMAP_PROT
6185 /**
6186  * generic_access_phys - generic implementation for iomem mmap access
6187  * @vma: the vma to access
6188  * @addr: userspace address, not relative offset within @vma
6189  * @buf: buffer to read/write
6190  * @len: length of transfer
6191  * @write: set to FOLL_WRITE when writing, otherwise reading
6192  *
6193  * This is a generic implementation for &vm_operations_struct.access for an
6194  * iomem mapping. This callback is used by access_process_vm() when the @vma is
6195  * not page based.
6196  */
6197 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
6198 			void *buf, int len, int write)
6199 {
6200 	resource_size_t phys_addr;
6201 	unsigned long prot = 0;
6202 	void __iomem *maddr;
6203 	pte_t *ptep, pte;
6204 	spinlock_t *ptl;
6205 	int offset = offset_in_page(addr);
6206 	int ret = -EINVAL;
6207 
6208 retry:
6209 	if (follow_pte(vma, addr, &ptep, &ptl))
6210 		return -EINVAL;
6211 	pte = ptep_get(ptep);
6212 	pte_unmap_unlock(ptep, ptl);
6213 
6214 	prot = pgprot_val(pte_pgprot(pte));
6215 	phys_addr = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
6216 
6217 	if ((write & FOLL_WRITE) && !pte_write(pte))
6218 		return -EINVAL;
6219 
6220 	maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
6221 	if (!maddr)
6222 		return -ENOMEM;
6223 
6224 	if (follow_pte(vma, addr, &ptep, &ptl))
6225 		goto out_unmap;
6226 
6227 	if (!pte_same(pte, ptep_get(ptep))) {
6228 		pte_unmap_unlock(ptep, ptl);
6229 		iounmap(maddr);
6230 
6231 		goto retry;
6232 	}
6233 
6234 	if (write)
6235 		memcpy_toio(maddr + offset, buf, len);
6236 	else
6237 		memcpy_fromio(buf, maddr + offset, len);
6238 	ret = len;
6239 	pte_unmap_unlock(ptep, ptl);
6240 out_unmap:
6241 	iounmap(maddr);
6242 
6243 	return ret;
6244 }
6245 EXPORT_SYMBOL_GPL(generic_access_phys);
6246 #endif
6247 
6248 /*
6249  * Access another process' address space as given in mm.
6250  */
6251 static int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
6252 			      void *buf, int len, unsigned int gup_flags)
6253 {
6254 	void *old_buf = buf;
6255 	int write = gup_flags & FOLL_WRITE;
6256 
6257 	if (mmap_read_lock_killable(mm))
6258 		return 0;
6259 
6260 	/* Untag the address before looking up the VMA */
6261 	addr = untagged_addr_remote(mm, addr);
6262 
6263 	/* Avoid triggering the temporary warning in __get_user_pages */
6264 	if (!vma_lookup(mm, addr) && !expand_stack(mm, addr))
6265 		return 0;
6266 
6267 	/* ignore errors, just check how much was successfully transferred */
6268 	while (len) {
6269 		int bytes, offset;
6270 		void *maddr;
6271 		struct vm_area_struct *vma = NULL;
6272 		struct page *page = get_user_page_vma_remote(mm, addr,
6273 							     gup_flags, &vma);
6274 
6275 		if (IS_ERR(page)) {
6276 			/* We might need to expand the stack to access it */
6277 			vma = vma_lookup(mm, addr);
6278 			if (!vma) {
6279 				vma = expand_stack(mm, addr);
6280 
6281 				/* mmap_lock was dropped on failure */
6282 				if (!vma)
6283 					return buf - old_buf;
6284 
6285 				/* Try again if stack expansion worked */
6286 				continue;
6287 			}
6288 
6289 			/*
6290 			 * Check if this is a VM_IO | VM_PFNMAP VMA, which
6291 			 * we can access using slightly different code.
6292 			 */
6293 			bytes = 0;
6294 #ifdef CONFIG_HAVE_IOREMAP_PROT
6295 			if (vma->vm_ops && vma->vm_ops->access)
6296 				bytes = vma->vm_ops->access(vma, addr, buf,
6297 							    len, write);
6298 #endif
6299 			if (bytes <= 0)
6300 				break;
6301 		} else {
6302 			bytes = len;
6303 			offset = addr & (PAGE_SIZE-1);
6304 			if (bytes > PAGE_SIZE-offset)
6305 				bytes = PAGE_SIZE-offset;
6306 
6307 			maddr = kmap_local_page(page);
6308 			if (write) {
6309 				copy_to_user_page(vma, page, addr,
6310 						  maddr + offset, buf, bytes);
6311 				set_page_dirty_lock(page);
6312 			} else {
6313 				copy_from_user_page(vma, page, addr,
6314 						    buf, maddr + offset, bytes);
6315 			}
6316 			unmap_and_put_page(page, maddr);
6317 		}
6318 		len -= bytes;
6319 		buf += bytes;
6320 		addr += bytes;
6321 	}
6322 	mmap_read_unlock(mm);
6323 
6324 	return buf - old_buf;
6325 }
6326 
6327 /**
6328  * access_remote_vm - access another process' address space
6329  * @mm:		the mm_struct of the target address space
6330  * @addr:	start address to access
6331  * @buf:	source or destination buffer
6332  * @len:	number of bytes to transfer
6333  * @gup_flags:	flags modifying lookup behaviour
6334  *
6335  * The caller must hold a reference on @mm.
6336  *
6337  * Return: number of bytes copied from source to destination.
6338  */
6339 int access_remote_vm(struct mm_struct *mm, unsigned long addr,
6340 		void *buf, int len, unsigned int gup_flags)
6341 {
6342 	return __access_remote_vm(mm, addr, buf, len, gup_flags);
6343 }
6344 
6345 /*
6346  * Access another process' address space.
6347  * Source/target buffer must be kernel space,
6348  * Do not walk the page table directly, use get_user_pages
6349  */
6350 int access_process_vm(struct task_struct *tsk, unsigned long addr,
6351 		void *buf, int len, unsigned int gup_flags)
6352 {
6353 	struct mm_struct *mm;
6354 	int ret;
6355 
6356 	mm = get_task_mm(tsk);
6357 	if (!mm)
6358 		return 0;
6359 
6360 	ret = __access_remote_vm(mm, addr, buf, len, gup_flags);
6361 
6362 	mmput(mm);
6363 
6364 	return ret;
6365 }
6366 EXPORT_SYMBOL_GPL(access_process_vm);
6367 
6368 /*
6369  * Print the name of a VMA.
6370  */
6371 void print_vma_addr(char *prefix, unsigned long ip)
6372 {
6373 	struct mm_struct *mm = current->mm;
6374 	struct vm_area_struct *vma;
6375 
6376 	/*
6377 	 * we might be running from an atomic context so we cannot sleep
6378 	 */
6379 	if (!mmap_read_trylock(mm))
6380 		return;
6381 
6382 	vma = vma_lookup(mm, ip);
6383 	if (vma && vma->vm_file) {
6384 		struct file *f = vma->vm_file;
6385 		ip -= vma->vm_start;
6386 		ip += vma->vm_pgoff << PAGE_SHIFT;
6387 		printk("%s%pD[%lx,%lx+%lx]", prefix, f, ip,
6388 				vma->vm_start,
6389 				vma->vm_end - vma->vm_start);
6390 	}
6391 	mmap_read_unlock(mm);
6392 }
6393 
6394 #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
6395 void __might_fault(const char *file, int line)
6396 {
6397 	if (pagefault_disabled())
6398 		return;
6399 	__might_sleep(file, line);
6400 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
6401 	if (current->mm)
6402 		might_lock_read(&current->mm->mmap_lock);
6403 #endif
6404 }
6405 EXPORT_SYMBOL(__might_fault);
6406 #endif
6407 
6408 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
6409 /*
6410  * Process all subpages of the specified huge page with the specified
6411  * operation.  The target subpage will be processed last to keep its
6412  * cache lines hot.
6413  */
6414 static inline int process_huge_page(
6415 	unsigned long addr_hint, unsigned int nr_pages,
6416 	int (*process_subpage)(unsigned long addr, int idx, void *arg),
6417 	void *arg)
6418 {
6419 	int i, n, base, l, ret;
6420 	unsigned long addr = addr_hint &
6421 		~(((unsigned long)nr_pages << PAGE_SHIFT) - 1);
6422 
6423 	/* Process target subpage last to keep its cache lines hot */
6424 	might_sleep();
6425 	n = (addr_hint - addr) / PAGE_SIZE;
6426 	if (2 * n <= nr_pages) {
6427 		/* If target subpage in first half of huge page */
6428 		base = 0;
6429 		l = n;
6430 		/* Process subpages at the end of huge page */
6431 		for (i = nr_pages - 1; i >= 2 * n; i--) {
6432 			cond_resched();
6433 			ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
6434 			if (ret)
6435 				return ret;
6436 		}
6437 	} else {
6438 		/* If target subpage in second half of huge page */
6439 		base = nr_pages - 2 * (nr_pages - n);
6440 		l = nr_pages - n;
6441 		/* Process subpages at the begin of huge page */
6442 		for (i = 0; i < base; i++) {
6443 			cond_resched();
6444 			ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
6445 			if (ret)
6446 				return ret;
6447 		}
6448 	}
6449 	/*
6450 	 * Process remaining subpages in left-right-left-right pattern
6451 	 * towards the target subpage
6452 	 */
6453 	for (i = 0; i < l; i++) {
6454 		int left_idx = base + i;
6455 		int right_idx = base + 2 * l - 1 - i;
6456 
6457 		cond_resched();
6458 		ret = process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
6459 		if (ret)
6460 			return ret;
6461 		cond_resched();
6462 		ret = process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
6463 		if (ret)
6464 			return ret;
6465 	}
6466 	return 0;
6467 }
6468 
6469 static void clear_gigantic_page(struct folio *folio, unsigned long addr,
6470 				unsigned int nr_pages)
6471 {
6472 	int i;
6473 
6474 	might_sleep();
6475 	for (i = 0; i < nr_pages; i++) {
6476 		cond_resched();
6477 		clear_user_highpage(folio_page(folio, i), addr + i * PAGE_SIZE);
6478 	}
6479 }
6480 
6481 static int clear_subpage(unsigned long addr, int idx, void *arg)
6482 {
6483 	struct folio *folio = arg;
6484 
6485 	clear_user_highpage(folio_page(folio, idx), addr);
6486 	return 0;
6487 }
6488 
6489 /**
6490  * folio_zero_user - Zero a folio which will be mapped to userspace.
6491  * @folio: The folio to zero.
6492  * @addr_hint: The address will be accessed or the base address if uncelar.
6493  */
6494 void folio_zero_user(struct folio *folio, unsigned long addr_hint)
6495 {
6496 	unsigned int nr_pages = folio_nr_pages(folio);
6497 
6498 	if (unlikely(nr_pages > MAX_ORDER_NR_PAGES))
6499 		clear_gigantic_page(folio, addr_hint, nr_pages);
6500 	else
6501 		process_huge_page(addr_hint, nr_pages, clear_subpage, folio);
6502 }
6503 
6504 static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
6505 				   unsigned long addr,
6506 				   struct vm_area_struct *vma,
6507 				   unsigned int nr_pages)
6508 {
6509 	int i;
6510 	struct page *dst_page;
6511 	struct page *src_page;
6512 
6513 	for (i = 0; i < nr_pages; i++) {
6514 		dst_page = folio_page(dst, i);
6515 		src_page = folio_page(src, i);
6516 
6517 		cond_resched();
6518 		if (copy_mc_user_highpage(dst_page, src_page,
6519 					  addr + i*PAGE_SIZE, vma))
6520 			return -EHWPOISON;
6521 	}
6522 	return 0;
6523 }
6524 
6525 struct copy_subpage_arg {
6526 	struct folio *dst;
6527 	struct folio *src;
6528 	struct vm_area_struct *vma;
6529 };
6530 
6531 static int copy_subpage(unsigned long addr, int idx, void *arg)
6532 {
6533 	struct copy_subpage_arg *copy_arg = arg;
6534 	struct page *dst = folio_page(copy_arg->dst, idx);
6535 	struct page *src = folio_page(copy_arg->src, idx);
6536 
6537 	if (copy_mc_user_highpage(dst, src, addr, copy_arg->vma))
6538 		return -EHWPOISON;
6539 	return 0;
6540 }
6541 
6542 int copy_user_large_folio(struct folio *dst, struct folio *src,
6543 			  unsigned long addr_hint, struct vm_area_struct *vma)
6544 {
6545 	unsigned int nr_pages = folio_nr_pages(dst);
6546 	struct copy_subpage_arg arg = {
6547 		.dst = dst,
6548 		.src = src,
6549 		.vma = vma,
6550 	};
6551 
6552 	if (unlikely(nr_pages > MAX_ORDER_NR_PAGES))
6553 		return copy_user_gigantic_page(dst, src, addr_hint, vma, nr_pages);
6554 
6555 	return process_huge_page(addr_hint, nr_pages, copy_subpage, &arg);
6556 }
6557 
6558 long copy_folio_from_user(struct folio *dst_folio,
6559 			   const void __user *usr_src,
6560 			   bool allow_pagefault)
6561 {
6562 	void *kaddr;
6563 	unsigned long i, rc = 0;
6564 	unsigned int nr_pages = folio_nr_pages(dst_folio);
6565 	unsigned long ret_val = nr_pages * PAGE_SIZE;
6566 	struct page *subpage;
6567 
6568 	for (i = 0; i < nr_pages; i++) {
6569 		subpage = folio_page(dst_folio, i);
6570 		kaddr = kmap_local_page(subpage);
6571 		if (!allow_pagefault)
6572 			pagefault_disable();
6573 		rc = copy_from_user(kaddr, usr_src + i * PAGE_SIZE, PAGE_SIZE);
6574 		if (!allow_pagefault)
6575 			pagefault_enable();
6576 		kunmap_local(kaddr);
6577 
6578 		ret_val -= (PAGE_SIZE - rc);
6579 		if (rc)
6580 			break;
6581 
6582 		flush_dcache_page(subpage);
6583 
6584 		cond_resched();
6585 	}
6586 	return ret_val;
6587 }
6588 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
6589 
6590 #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
6591 
6592 static struct kmem_cache *page_ptl_cachep;
6593 
6594 void __init ptlock_cache_init(void)
6595 {
6596 	page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
6597 			SLAB_PANIC, NULL);
6598 }
6599 
6600 bool ptlock_alloc(struct ptdesc *ptdesc)
6601 {
6602 	spinlock_t *ptl;
6603 
6604 	ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
6605 	if (!ptl)
6606 		return false;
6607 	ptdesc->ptl = ptl;
6608 	return true;
6609 }
6610 
6611 void ptlock_free(struct ptdesc *ptdesc)
6612 {
6613 	kmem_cache_free(page_ptl_cachep, ptdesc->ptl);
6614 }
6615 #endif
6616 
6617 void vma_pgtable_walk_begin(struct vm_area_struct *vma)
6618 {
6619 	if (is_vm_hugetlb_page(vma))
6620 		hugetlb_vma_lock_read(vma);
6621 }
6622 
6623 void vma_pgtable_walk_end(struct vm_area_struct *vma)
6624 {
6625 	if (is_vm_hugetlb_page(vma))
6626 		hugetlb_vma_unlock_read(vma);
6627 }
6628