xref: /linux/mm/huge_memory.c (revision 247dbcdbf790c52fc76cf8e327cd0a5778e41e66)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Copyright (C) 2009  Red Hat, Inc.
4  */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/sched/mm.h>
11 #include <linux/sched/coredump.h>
12 #include <linux/sched/numa_balancing.h>
13 #include <linux/highmem.h>
14 #include <linux/hugetlb.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/rmap.h>
17 #include <linux/swap.h>
18 #include <linux/shrinker.h>
19 #include <linux/mm_inline.h>
20 #include <linux/swapops.h>
21 #include <linux/backing-dev.h>
22 #include <linux/dax.h>
23 #include <linux/khugepaged.h>
24 #include <linux/freezer.h>
25 #include <linux/pfn_t.h>
26 #include <linux/mman.h>
27 #include <linux/memremap.h>
28 #include <linux/pagemap.h>
29 #include <linux/debugfs.h>
30 #include <linux/migrate.h>
31 #include <linux/hashtable.h>
32 #include <linux/userfaultfd_k.h>
33 #include <linux/page_idle.h>
34 #include <linux/shmem_fs.h>
35 #include <linux/oom.h>
36 #include <linux/numa.h>
37 #include <linux/page_owner.h>
38 #include <linux/sched/sysctl.h>
39 #include <linux/memory-tiers.h>
40 
41 #include <asm/tlb.h>
42 #include <asm/pgalloc.h>
43 #include "internal.h"
44 #include "swap.h"
45 
46 #define CREATE_TRACE_POINTS
47 #include <trace/events/thp.h>
48 
49 /*
50  * By default, transparent hugepage support is disabled in order to avoid
51  * risking an increased memory footprint for applications that are not
52  * guaranteed to benefit from it. When transparent hugepage support is
53  * enabled, it is for all mappings, and khugepaged scans all mappings.
54  * Defrag is invoked by khugepaged hugepage allocations and by page faults
55  * for all hugepage allocations.
56  */
57 unsigned long transparent_hugepage_flags __read_mostly =
58 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
59 	(1<<TRANSPARENT_HUGEPAGE_FLAG)|
60 #endif
61 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
62 	(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
63 #endif
64 	(1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
65 	(1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
66 	(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
67 
68 static struct shrinker *deferred_split_shrinker;
69 static unsigned long deferred_split_count(struct shrinker *shrink,
70 					  struct shrink_control *sc);
71 static unsigned long deferred_split_scan(struct shrinker *shrink,
72 					 struct shrink_control *sc);
73 
74 static atomic_t huge_zero_refcount;
75 struct page *huge_zero_page __read_mostly;
76 unsigned long huge_zero_pfn __read_mostly = ~0UL;
77 
78 bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
79 			bool smaps, bool in_pf, bool enforce_sysfs)
80 {
81 	if (!vma->vm_mm)		/* vdso */
82 		return false;
83 
84 	/*
85 	 * Explicitly disabled through madvise or prctl, or some
86 	 * architectures may disable THP for some mappings, for
87 	 * example, s390 kvm.
88 	 * */
89 	if ((vm_flags & VM_NOHUGEPAGE) ||
90 	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
91 		return false;
92 	/*
93 	 * If the hardware/firmware marked hugepage support disabled.
94 	 */
95 	if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED))
96 		return false;
97 
98 	/* khugepaged doesn't collapse DAX vma, but page fault is fine. */
99 	if (vma_is_dax(vma))
100 		return in_pf;
101 
102 	/*
103 	 * Special VMA and hugetlb VMA.
104 	 * Must be checked after dax since some dax mappings may have
105 	 * VM_MIXEDMAP set.
106 	 */
107 	if (vm_flags & VM_NO_KHUGEPAGED)
108 		return false;
109 
110 	/*
111 	 * Check alignment for file vma and size for both file and anon vma.
112 	 *
113 	 * Skip the check for page fault. Huge fault does the check in fault
114 	 * handlers. And this check is not suitable for huge PUD fault.
115 	 */
116 	if (!in_pf &&
117 	    !transhuge_vma_suitable(vma, (vma->vm_end - HPAGE_PMD_SIZE)))
118 		return false;
119 
120 	/*
121 	 * Enabled via shmem mount options or sysfs settings.
122 	 * Must be done before hugepage flags check since shmem has its
123 	 * own flags.
124 	 */
125 	if (!in_pf && shmem_file(vma->vm_file))
126 		return shmem_is_huge(file_inode(vma->vm_file), vma->vm_pgoff,
127 				     !enforce_sysfs, vma->vm_mm, vm_flags);
128 
129 	/* Enforce sysfs THP requirements as necessary */
130 	if (enforce_sysfs &&
131 	    (!hugepage_flags_enabled() || (!(vm_flags & VM_HUGEPAGE) &&
132 					   !hugepage_flags_always())))
133 		return false;
134 
135 	/* Only regular file is valid */
136 	if (!in_pf && file_thp_enabled(vma))
137 		return true;
138 
139 	if (!vma_is_anonymous(vma))
140 		return false;
141 
142 	if (vma_is_temporary_stack(vma))
143 		return false;
144 
145 	/*
146 	 * THPeligible bit of smaps should show 1 for proper VMAs even
147 	 * though anon_vma is not initialized yet.
148 	 *
149 	 * Allow page fault since anon_vma may be not initialized until
150 	 * the first page fault.
151 	 */
152 	if (!vma->anon_vma)
153 		return (smaps || in_pf);
154 
155 	return true;
156 }
157 
158 static bool get_huge_zero_page(void)
159 {
160 	struct page *zero_page;
161 retry:
162 	if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
163 		return true;
164 
165 	zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
166 			HPAGE_PMD_ORDER);
167 	if (!zero_page) {
168 		count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
169 		return false;
170 	}
171 	preempt_disable();
172 	if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
173 		preempt_enable();
174 		__free_pages(zero_page, compound_order(zero_page));
175 		goto retry;
176 	}
177 	WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page));
178 
179 	/* We take additional reference here. It will be put back by shrinker */
180 	atomic_set(&huge_zero_refcount, 2);
181 	preempt_enable();
182 	count_vm_event(THP_ZERO_PAGE_ALLOC);
183 	return true;
184 }
185 
186 static void put_huge_zero_page(void)
187 {
188 	/*
189 	 * Counter should never go to zero here. Only shrinker can put
190 	 * last reference.
191 	 */
192 	BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
193 }
194 
195 struct page *mm_get_huge_zero_page(struct mm_struct *mm)
196 {
197 	if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
198 		return READ_ONCE(huge_zero_page);
199 
200 	if (!get_huge_zero_page())
201 		return NULL;
202 
203 	if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
204 		put_huge_zero_page();
205 
206 	return READ_ONCE(huge_zero_page);
207 }
208 
209 void mm_put_huge_zero_page(struct mm_struct *mm)
210 {
211 	if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
212 		put_huge_zero_page();
213 }
214 
215 static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
216 					struct shrink_control *sc)
217 {
218 	/* we can free zero page only if last reference remains */
219 	return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
220 }
221 
222 static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
223 				       struct shrink_control *sc)
224 {
225 	if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
226 		struct page *zero_page = xchg(&huge_zero_page, NULL);
227 		BUG_ON(zero_page == NULL);
228 		WRITE_ONCE(huge_zero_pfn, ~0UL);
229 		__free_pages(zero_page, compound_order(zero_page));
230 		return HPAGE_PMD_NR;
231 	}
232 
233 	return 0;
234 }
235 
236 static struct shrinker *huge_zero_page_shrinker;
237 
238 #ifdef CONFIG_SYSFS
239 static ssize_t enabled_show(struct kobject *kobj,
240 			    struct kobj_attribute *attr, char *buf)
241 {
242 	const char *output;
243 
244 	if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags))
245 		output = "[always] madvise never";
246 	else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
247 			  &transparent_hugepage_flags))
248 		output = "always [madvise] never";
249 	else
250 		output = "always madvise [never]";
251 
252 	return sysfs_emit(buf, "%s\n", output);
253 }
254 
255 static ssize_t enabled_store(struct kobject *kobj,
256 			     struct kobj_attribute *attr,
257 			     const char *buf, size_t count)
258 {
259 	ssize_t ret = count;
260 
261 	if (sysfs_streq(buf, "always")) {
262 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
263 		set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
264 	} else if (sysfs_streq(buf, "madvise")) {
265 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
266 		set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
267 	} else if (sysfs_streq(buf, "never")) {
268 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
269 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
270 	} else
271 		ret = -EINVAL;
272 
273 	if (ret > 0) {
274 		int err = start_stop_khugepaged();
275 		if (err)
276 			ret = err;
277 	}
278 	return ret;
279 }
280 
281 static struct kobj_attribute enabled_attr = __ATTR_RW(enabled);
282 
283 ssize_t single_hugepage_flag_show(struct kobject *kobj,
284 				  struct kobj_attribute *attr, char *buf,
285 				  enum transparent_hugepage_flag flag)
286 {
287 	return sysfs_emit(buf, "%d\n",
288 			  !!test_bit(flag, &transparent_hugepage_flags));
289 }
290 
291 ssize_t single_hugepage_flag_store(struct kobject *kobj,
292 				 struct kobj_attribute *attr,
293 				 const char *buf, size_t count,
294 				 enum transparent_hugepage_flag flag)
295 {
296 	unsigned long value;
297 	int ret;
298 
299 	ret = kstrtoul(buf, 10, &value);
300 	if (ret < 0)
301 		return ret;
302 	if (value > 1)
303 		return -EINVAL;
304 
305 	if (value)
306 		set_bit(flag, &transparent_hugepage_flags);
307 	else
308 		clear_bit(flag, &transparent_hugepage_flags);
309 
310 	return count;
311 }
312 
313 static ssize_t defrag_show(struct kobject *kobj,
314 			   struct kobj_attribute *attr, char *buf)
315 {
316 	const char *output;
317 
318 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
319 		     &transparent_hugepage_flags))
320 		output = "[always] defer defer+madvise madvise never";
321 	else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
322 			  &transparent_hugepage_flags))
323 		output = "always [defer] defer+madvise madvise never";
324 	else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
325 			  &transparent_hugepage_flags))
326 		output = "always defer [defer+madvise] madvise never";
327 	else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
328 			  &transparent_hugepage_flags))
329 		output = "always defer defer+madvise [madvise] never";
330 	else
331 		output = "always defer defer+madvise madvise [never]";
332 
333 	return sysfs_emit(buf, "%s\n", output);
334 }
335 
336 static ssize_t defrag_store(struct kobject *kobj,
337 			    struct kobj_attribute *attr,
338 			    const char *buf, size_t count)
339 {
340 	if (sysfs_streq(buf, "always")) {
341 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
342 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
343 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
344 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
345 	} else if (sysfs_streq(buf, "defer+madvise")) {
346 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
347 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
348 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
349 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
350 	} else if (sysfs_streq(buf, "defer")) {
351 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
352 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
353 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
354 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
355 	} else if (sysfs_streq(buf, "madvise")) {
356 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
357 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
358 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
359 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
360 	} else if (sysfs_streq(buf, "never")) {
361 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
362 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
363 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
364 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
365 	} else
366 		return -EINVAL;
367 
368 	return count;
369 }
370 static struct kobj_attribute defrag_attr = __ATTR_RW(defrag);
371 
372 static ssize_t use_zero_page_show(struct kobject *kobj,
373 				  struct kobj_attribute *attr, char *buf)
374 {
375 	return single_hugepage_flag_show(kobj, attr, buf,
376 					 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
377 }
378 static ssize_t use_zero_page_store(struct kobject *kobj,
379 		struct kobj_attribute *attr, const char *buf, size_t count)
380 {
381 	return single_hugepage_flag_store(kobj, attr, buf, count,
382 				 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
383 }
384 static struct kobj_attribute use_zero_page_attr = __ATTR_RW(use_zero_page);
385 
386 static ssize_t hpage_pmd_size_show(struct kobject *kobj,
387 				   struct kobj_attribute *attr, char *buf)
388 {
389 	return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE);
390 }
391 static struct kobj_attribute hpage_pmd_size_attr =
392 	__ATTR_RO(hpage_pmd_size);
393 
394 static struct attribute *hugepage_attr[] = {
395 	&enabled_attr.attr,
396 	&defrag_attr.attr,
397 	&use_zero_page_attr.attr,
398 	&hpage_pmd_size_attr.attr,
399 #ifdef CONFIG_SHMEM
400 	&shmem_enabled_attr.attr,
401 #endif
402 	NULL,
403 };
404 
405 static const struct attribute_group hugepage_attr_group = {
406 	.attrs = hugepage_attr,
407 };
408 
409 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
410 {
411 	int err;
412 
413 	*hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
414 	if (unlikely(!*hugepage_kobj)) {
415 		pr_err("failed to create transparent hugepage kobject\n");
416 		return -ENOMEM;
417 	}
418 
419 	err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
420 	if (err) {
421 		pr_err("failed to register transparent hugepage group\n");
422 		goto delete_obj;
423 	}
424 
425 	err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
426 	if (err) {
427 		pr_err("failed to register transparent hugepage group\n");
428 		goto remove_hp_group;
429 	}
430 
431 	return 0;
432 
433 remove_hp_group:
434 	sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
435 delete_obj:
436 	kobject_put(*hugepage_kobj);
437 	return err;
438 }
439 
440 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
441 {
442 	sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
443 	sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
444 	kobject_put(hugepage_kobj);
445 }
446 #else
447 static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
448 {
449 	return 0;
450 }
451 
452 static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
453 {
454 }
455 #endif /* CONFIG_SYSFS */
456 
457 static int __init thp_shrinker_init(void)
458 {
459 	huge_zero_page_shrinker = shrinker_alloc(0, "thp-zero");
460 	if (!huge_zero_page_shrinker)
461 		return -ENOMEM;
462 
463 	deferred_split_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE |
464 						 SHRINKER_MEMCG_AWARE |
465 						 SHRINKER_NONSLAB,
466 						 "thp-deferred_split");
467 	if (!deferred_split_shrinker) {
468 		shrinker_free(huge_zero_page_shrinker);
469 		return -ENOMEM;
470 	}
471 
472 	huge_zero_page_shrinker->count_objects = shrink_huge_zero_page_count;
473 	huge_zero_page_shrinker->scan_objects = shrink_huge_zero_page_scan;
474 	shrinker_register(huge_zero_page_shrinker);
475 
476 	deferred_split_shrinker->count_objects = deferred_split_count;
477 	deferred_split_shrinker->scan_objects = deferred_split_scan;
478 	shrinker_register(deferred_split_shrinker);
479 
480 	return 0;
481 }
482 
483 static void __init thp_shrinker_exit(void)
484 {
485 	shrinker_free(huge_zero_page_shrinker);
486 	shrinker_free(deferred_split_shrinker);
487 }
488 
489 static int __init hugepage_init(void)
490 {
491 	int err;
492 	struct kobject *hugepage_kobj;
493 
494 	if (!has_transparent_hugepage()) {
495 		transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED;
496 		return -EINVAL;
497 	}
498 
499 	/*
500 	 * hugepages can't be allocated by the buddy allocator
501 	 */
502 	MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER > MAX_ORDER);
503 	/*
504 	 * we use page->mapping and page->index in second tail page
505 	 * as list_head: assuming THP order >= 2
506 	 */
507 	MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2);
508 
509 	err = hugepage_init_sysfs(&hugepage_kobj);
510 	if (err)
511 		goto err_sysfs;
512 
513 	err = khugepaged_init();
514 	if (err)
515 		goto err_slab;
516 
517 	err = thp_shrinker_init();
518 	if (err)
519 		goto err_shrinker;
520 
521 	/*
522 	 * By default disable transparent hugepages on smaller systems,
523 	 * where the extra memory used could hurt more than TLB overhead
524 	 * is likely to save.  The admin can still enable it through /sys.
525 	 */
526 	if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) {
527 		transparent_hugepage_flags = 0;
528 		return 0;
529 	}
530 
531 	err = start_stop_khugepaged();
532 	if (err)
533 		goto err_khugepaged;
534 
535 	return 0;
536 err_khugepaged:
537 	thp_shrinker_exit();
538 err_shrinker:
539 	khugepaged_destroy();
540 err_slab:
541 	hugepage_exit_sysfs(hugepage_kobj);
542 err_sysfs:
543 	return err;
544 }
545 subsys_initcall(hugepage_init);
546 
547 static int __init setup_transparent_hugepage(char *str)
548 {
549 	int ret = 0;
550 	if (!str)
551 		goto out;
552 	if (!strcmp(str, "always")) {
553 		set_bit(TRANSPARENT_HUGEPAGE_FLAG,
554 			&transparent_hugepage_flags);
555 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
556 			  &transparent_hugepage_flags);
557 		ret = 1;
558 	} else if (!strcmp(str, "madvise")) {
559 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
560 			  &transparent_hugepage_flags);
561 		set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
562 			&transparent_hugepage_flags);
563 		ret = 1;
564 	} else if (!strcmp(str, "never")) {
565 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
566 			  &transparent_hugepage_flags);
567 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
568 			  &transparent_hugepage_flags);
569 		ret = 1;
570 	}
571 out:
572 	if (!ret)
573 		pr_warn("transparent_hugepage= cannot parse, ignored\n");
574 	return ret;
575 }
576 __setup("transparent_hugepage=", setup_transparent_hugepage);
577 
578 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
579 {
580 	if (likely(vma->vm_flags & VM_WRITE))
581 		pmd = pmd_mkwrite(pmd, vma);
582 	return pmd;
583 }
584 
585 #ifdef CONFIG_MEMCG
586 static inline
587 struct deferred_split *get_deferred_split_queue(struct folio *folio)
588 {
589 	struct mem_cgroup *memcg = folio_memcg(folio);
590 	struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
591 
592 	if (memcg)
593 		return &memcg->deferred_split_queue;
594 	else
595 		return &pgdat->deferred_split_queue;
596 }
597 #else
598 static inline
599 struct deferred_split *get_deferred_split_queue(struct folio *folio)
600 {
601 	struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
602 
603 	return &pgdat->deferred_split_queue;
604 }
605 #endif
606 
607 void folio_prep_large_rmappable(struct folio *folio)
608 {
609 	VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
610 	INIT_LIST_HEAD(&folio->_deferred_list);
611 	folio_set_large_rmappable(folio);
612 }
613 
614 static inline bool is_transparent_hugepage(struct folio *folio)
615 {
616 	if (!folio_test_large(folio))
617 		return false;
618 
619 	return is_huge_zero_page(&folio->page) ||
620 		folio_test_large_rmappable(folio);
621 }
622 
623 static unsigned long __thp_get_unmapped_area(struct file *filp,
624 		unsigned long addr, unsigned long len,
625 		loff_t off, unsigned long flags, unsigned long size)
626 {
627 	loff_t off_end = off + len;
628 	loff_t off_align = round_up(off, size);
629 	unsigned long len_pad, ret;
630 
631 	if (off_end <= off_align || (off_end - off_align) < size)
632 		return 0;
633 
634 	len_pad = len + size;
635 	if (len_pad < len || (off + len_pad) < off)
636 		return 0;
637 
638 	ret = current->mm->get_unmapped_area(filp, addr, len_pad,
639 					      off >> PAGE_SHIFT, flags);
640 
641 	/*
642 	 * The failure might be due to length padding. The caller will retry
643 	 * without the padding.
644 	 */
645 	if (IS_ERR_VALUE(ret))
646 		return 0;
647 
648 	/*
649 	 * Do not try to align to THP boundary if allocation at the address
650 	 * hint succeeds.
651 	 */
652 	if (ret == addr)
653 		return addr;
654 
655 	ret += (off - ret) & (size - 1);
656 	return ret;
657 }
658 
659 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
660 		unsigned long len, unsigned long pgoff, unsigned long flags)
661 {
662 	unsigned long ret;
663 	loff_t off = (loff_t)pgoff << PAGE_SHIFT;
664 
665 	ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE);
666 	if (ret)
667 		return ret;
668 
669 	return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
670 }
671 EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
672 
673 static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
674 			struct page *page, gfp_t gfp)
675 {
676 	struct vm_area_struct *vma = vmf->vma;
677 	struct folio *folio = page_folio(page);
678 	pgtable_t pgtable;
679 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
680 	vm_fault_t ret = 0;
681 
682 	VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
683 
684 	if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
685 		folio_put(folio);
686 		count_vm_event(THP_FAULT_FALLBACK);
687 		count_vm_event(THP_FAULT_FALLBACK_CHARGE);
688 		return VM_FAULT_FALLBACK;
689 	}
690 	folio_throttle_swaprate(folio, gfp);
691 
692 	pgtable = pte_alloc_one(vma->vm_mm);
693 	if (unlikely(!pgtable)) {
694 		ret = VM_FAULT_OOM;
695 		goto release;
696 	}
697 
698 	clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
699 	/*
700 	 * The memory barrier inside __folio_mark_uptodate makes sure that
701 	 * clear_huge_page writes become visible before the set_pmd_at()
702 	 * write.
703 	 */
704 	__folio_mark_uptodate(folio);
705 
706 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
707 	if (unlikely(!pmd_none(*vmf->pmd))) {
708 		goto unlock_release;
709 	} else {
710 		pmd_t entry;
711 
712 		ret = check_stable_address_space(vma->vm_mm);
713 		if (ret)
714 			goto unlock_release;
715 
716 		/* Deliver the page fault to userland */
717 		if (userfaultfd_missing(vma)) {
718 			spin_unlock(vmf->ptl);
719 			folio_put(folio);
720 			pte_free(vma->vm_mm, pgtable);
721 			ret = handle_userfault(vmf, VM_UFFD_MISSING);
722 			VM_BUG_ON(ret & VM_FAULT_FALLBACK);
723 			return ret;
724 		}
725 
726 		entry = mk_huge_pmd(page, vma->vm_page_prot);
727 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
728 		folio_add_new_anon_rmap(folio, vma, haddr);
729 		folio_add_lru_vma(folio, vma);
730 		pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
731 		set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
732 		update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
733 		add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
734 		mm_inc_nr_ptes(vma->vm_mm);
735 		spin_unlock(vmf->ptl);
736 		count_vm_event(THP_FAULT_ALLOC);
737 		count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
738 	}
739 
740 	return 0;
741 unlock_release:
742 	spin_unlock(vmf->ptl);
743 release:
744 	if (pgtable)
745 		pte_free(vma->vm_mm, pgtable);
746 	folio_put(folio);
747 	return ret;
748 
749 }
750 
751 /*
752  * always: directly stall for all thp allocations
753  * defer: wake kswapd and fail if not immediately available
754  * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
755  *		  fail if not immediately available
756  * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
757  *	    available
758  * never: never stall for any thp allocation
759  */
760 gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma)
761 {
762 	const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE);
763 
764 	/* Always do synchronous compaction */
765 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
766 		return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
767 
768 	/* Kick kcompactd and fail quickly */
769 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
770 		return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
771 
772 	/* Synchronous compaction if madvised, otherwise kick kcompactd */
773 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
774 		return GFP_TRANSHUGE_LIGHT |
775 			(vma_madvised ? __GFP_DIRECT_RECLAIM :
776 					__GFP_KSWAPD_RECLAIM);
777 
778 	/* Only do synchronous compaction if madvised */
779 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
780 		return GFP_TRANSHUGE_LIGHT |
781 		       (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
782 
783 	return GFP_TRANSHUGE_LIGHT;
784 }
785 
786 /* Caller must hold page table lock. */
787 static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
788 		struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
789 		struct page *zero_page)
790 {
791 	pmd_t entry;
792 	if (!pmd_none(*pmd))
793 		return;
794 	entry = mk_pmd(zero_page, vma->vm_page_prot);
795 	entry = pmd_mkhuge(entry);
796 	pgtable_trans_huge_deposit(mm, pmd, pgtable);
797 	set_pmd_at(mm, haddr, pmd, entry);
798 	mm_inc_nr_ptes(mm);
799 }
800 
801 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
802 {
803 	struct vm_area_struct *vma = vmf->vma;
804 	gfp_t gfp;
805 	struct folio *folio;
806 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
807 
808 	if (!transhuge_vma_suitable(vma, haddr))
809 		return VM_FAULT_FALLBACK;
810 	if (unlikely(anon_vma_prepare(vma)))
811 		return VM_FAULT_OOM;
812 	khugepaged_enter_vma(vma, vma->vm_flags);
813 
814 	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
815 			!mm_forbids_zeropage(vma->vm_mm) &&
816 			transparent_hugepage_use_zero_page()) {
817 		pgtable_t pgtable;
818 		struct page *zero_page;
819 		vm_fault_t ret;
820 		pgtable = pte_alloc_one(vma->vm_mm);
821 		if (unlikely(!pgtable))
822 			return VM_FAULT_OOM;
823 		zero_page = mm_get_huge_zero_page(vma->vm_mm);
824 		if (unlikely(!zero_page)) {
825 			pte_free(vma->vm_mm, pgtable);
826 			count_vm_event(THP_FAULT_FALLBACK);
827 			return VM_FAULT_FALLBACK;
828 		}
829 		vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
830 		ret = 0;
831 		if (pmd_none(*vmf->pmd)) {
832 			ret = check_stable_address_space(vma->vm_mm);
833 			if (ret) {
834 				spin_unlock(vmf->ptl);
835 				pte_free(vma->vm_mm, pgtable);
836 			} else if (userfaultfd_missing(vma)) {
837 				spin_unlock(vmf->ptl);
838 				pte_free(vma->vm_mm, pgtable);
839 				ret = handle_userfault(vmf, VM_UFFD_MISSING);
840 				VM_BUG_ON(ret & VM_FAULT_FALLBACK);
841 			} else {
842 				set_huge_zero_page(pgtable, vma->vm_mm, vma,
843 						   haddr, vmf->pmd, zero_page);
844 				update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
845 				spin_unlock(vmf->ptl);
846 			}
847 		} else {
848 			spin_unlock(vmf->ptl);
849 			pte_free(vma->vm_mm, pgtable);
850 		}
851 		return ret;
852 	}
853 	gfp = vma_thp_gfp_mask(vma);
854 	folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr, true);
855 	if (unlikely(!folio)) {
856 		count_vm_event(THP_FAULT_FALLBACK);
857 		return VM_FAULT_FALLBACK;
858 	}
859 	return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp);
860 }
861 
862 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
863 		pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
864 		pgtable_t pgtable)
865 {
866 	struct mm_struct *mm = vma->vm_mm;
867 	pmd_t entry;
868 	spinlock_t *ptl;
869 
870 	ptl = pmd_lock(mm, pmd);
871 	if (!pmd_none(*pmd)) {
872 		if (write) {
873 			if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
874 				WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
875 				goto out_unlock;
876 			}
877 			entry = pmd_mkyoung(*pmd);
878 			entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
879 			if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
880 				update_mmu_cache_pmd(vma, addr, pmd);
881 		}
882 
883 		goto out_unlock;
884 	}
885 
886 	entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
887 	if (pfn_t_devmap(pfn))
888 		entry = pmd_mkdevmap(entry);
889 	if (write) {
890 		entry = pmd_mkyoung(pmd_mkdirty(entry));
891 		entry = maybe_pmd_mkwrite(entry, vma);
892 	}
893 
894 	if (pgtable) {
895 		pgtable_trans_huge_deposit(mm, pmd, pgtable);
896 		mm_inc_nr_ptes(mm);
897 		pgtable = NULL;
898 	}
899 
900 	set_pmd_at(mm, addr, pmd, entry);
901 	update_mmu_cache_pmd(vma, addr, pmd);
902 
903 out_unlock:
904 	spin_unlock(ptl);
905 	if (pgtable)
906 		pte_free(mm, pgtable);
907 }
908 
909 /**
910  * vmf_insert_pfn_pmd - insert a pmd size pfn
911  * @vmf: Structure describing the fault
912  * @pfn: pfn to insert
913  * @write: whether it's a write fault
914  *
915  * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
916  *
917  * Return: vm_fault_t value.
918  */
919 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
920 {
921 	unsigned long addr = vmf->address & PMD_MASK;
922 	struct vm_area_struct *vma = vmf->vma;
923 	pgprot_t pgprot = vma->vm_page_prot;
924 	pgtable_t pgtable = NULL;
925 
926 	/*
927 	 * If we had pmd_special, we could avoid all these restrictions,
928 	 * but we need to be consistent with PTEs and architectures that
929 	 * can't support a 'special' bit.
930 	 */
931 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
932 			!pfn_t_devmap(pfn));
933 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
934 						(VM_PFNMAP|VM_MIXEDMAP));
935 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
936 
937 	if (addr < vma->vm_start || addr >= vma->vm_end)
938 		return VM_FAULT_SIGBUS;
939 
940 	if (arch_needs_pgtable_deposit()) {
941 		pgtable = pte_alloc_one(vma->vm_mm);
942 		if (!pgtable)
943 			return VM_FAULT_OOM;
944 	}
945 
946 	track_pfn_insert(vma, &pgprot, pfn);
947 
948 	insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
949 	return VM_FAULT_NOPAGE;
950 }
951 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
952 
953 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
954 static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
955 {
956 	if (likely(vma->vm_flags & VM_WRITE))
957 		pud = pud_mkwrite(pud);
958 	return pud;
959 }
960 
961 static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
962 		pud_t *pud, pfn_t pfn, bool write)
963 {
964 	struct mm_struct *mm = vma->vm_mm;
965 	pgprot_t prot = vma->vm_page_prot;
966 	pud_t entry;
967 	spinlock_t *ptl;
968 
969 	ptl = pud_lock(mm, pud);
970 	if (!pud_none(*pud)) {
971 		if (write) {
972 			if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
973 				WARN_ON_ONCE(!is_huge_zero_pud(*pud));
974 				goto out_unlock;
975 			}
976 			entry = pud_mkyoung(*pud);
977 			entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
978 			if (pudp_set_access_flags(vma, addr, pud, entry, 1))
979 				update_mmu_cache_pud(vma, addr, pud);
980 		}
981 		goto out_unlock;
982 	}
983 
984 	entry = pud_mkhuge(pfn_t_pud(pfn, prot));
985 	if (pfn_t_devmap(pfn))
986 		entry = pud_mkdevmap(entry);
987 	if (write) {
988 		entry = pud_mkyoung(pud_mkdirty(entry));
989 		entry = maybe_pud_mkwrite(entry, vma);
990 	}
991 	set_pud_at(mm, addr, pud, entry);
992 	update_mmu_cache_pud(vma, addr, pud);
993 
994 out_unlock:
995 	spin_unlock(ptl);
996 }
997 
998 /**
999  * vmf_insert_pfn_pud - insert a pud size pfn
1000  * @vmf: Structure describing the fault
1001  * @pfn: pfn to insert
1002  * @write: whether it's a write fault
1003  *
1004  * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
1005  *
1006  * Return: vm_fault_t value.
1007  */
1008 vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
1009 {
1010 	unsigned long addr = vmf->address & PUD_MASK;
1011 	struct vm_area_struct *vma = vmf->vma;
1012 	pgprot_t pgprot = vma->vm_page_prot;
1013 
1014 	/*
1015 	 * If we had pud_special, we could avoid all these restrictions,
1016 	 * but we need to be consistent with PTEs and architectures that
1017 	 * can't support a 'special' bit.
1018 	 */
1019 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
1020 			!pfn_t_devmap(pfn));
1021 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1022 						(VM_PFNMAP|VM_MIXEDMAP));
1023 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1024 
1025 	if (addr < vma->vm_start || addr >= vma->vm_end)
1026 		return VM_FAULT_SIGBUS;
1027 
1028 	track_pfn_insert(vma, &pgprot, pfn);
1029 
1030 	insert_pfn_pud(vma, addr, vmf->pud, pfn, write);
1031 	return VM_FAULT_NOPAGE;
1032 }
1033 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
1034 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1035 
1036 static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1037 		      pmd_t *pmd, bool write)
1038 {
1039 	pmd_t _pmd;
1040 
1041 	_pmd = pmd_mkyoung(*pmd);
1042 	if (write)
1043 		_pmd = pmd_mkdirty(_pmd);
1044 	if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
1045 				  pmd, _pmd, write))
1046 		update_mmu_cache_pmd(vma, addr, pmd);
1047 }
1048 
1049 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
1050 		pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
1051 {
1052 	unsigned long pfn = pmd_pfn(*pmd);
1053 	struct mm_struct *mm = vma->vm_mm;
1054 	struct page *page;
1055 	int ret;
1056 
1057 	assert_spin_locked(pmd_lockptr(mm, pmd));
1058 
1059 	if (flags & FOLL_WRITE && !pmd_write(*pmd))
1060 		return NULL;
1061 
1062 	if (pmd_present(*pmd) && pmd_devmap(*pmd))
1063 		/* pass */;
1064 	else
1065 		return NULL;
1066 
1067 	if (flags & FOLL_TOUCH)
1068 		touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
1069 
1070 	/*
1071 	 * device mapped pages can only be returned if the
1072 	 * caller will manage the page reference count.
1073 	 */
1074 	if (!(flags & (FOLL_GET | FOLL_PIN)))
1075 		return ERR_PTR(-EEXIST);
1076 
1077 	pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
1078 	*pgmap = get_dev_pagemap(pfn, *pgmap);
1079 	if (!*pgmap)
1080 		return ERR_PTR(-EFAULT);
1081 	page = pfn_to_page(pfn);
1082 	ret = try_grab_page(page, flags);
1083 	if (ret)
1084 		page = ERR_PTR(ret);
1085 
1086 	return page;
1087 }
1088 
1089 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1090 		  pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1091 		  struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1092 {
1093 	spinlock_t *dst_ptl, *src_ptl;
1094 	struct page *src_page;
1095 	pmd_t pmd;
1096 	pgtable_t pgtable = NULL;
1097 	int ret = -ENOMEM;
1098 
1099 	/* Skip if can be re-fill on fault */
1100 	if (!vma_is_anonymous(dst_vma))
1101 		return 0;
1102 
1103 	pgtable = pte_alloc_one(dst_mm);
1104 	if (unlikely(!pgtable))
1105 		goto out;
1106 
1107 	dst_ptl = pmd_lock(dst_mm, dst_pmd);
1108 	src_ptl = pmd_lockptr(src_mm, src_pmd);
1109 	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1110 
1111 	ret = -EAGAIN;
1112 	pmd = *src_pmd;
1113 
1114 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1115 	if (unlikely(is_swap_pmd(pmd))) {
1116 		swp_entry_t entry = pmd_to_swp_entry(pmd);
1117 
1118 		VM_BUG_ON(!is_pmd_migration_entry(pmd));
1119 		if (!is_readable_migration_entry(entry)) {
1120 			entry = make_readable_migration_entry(
1121 							swp_offset(entry));
1122 			pmd = swp_entry_to_pmd(entry);
1123 			if (pmd_swp_soft_dirty(*src_pmd))
1124 				pmd = pmd_swp_mksoft_dirty(pmd);
1125 			if (pmd_swp_uffd_wp(*src_pmd))
1126 				pmd = pmd_swp_mkuffd_wp(pmd);
1127 			set_pmd_at(src_mm, addr, src_pmd, pmd);
1128 		}
1129 		add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1130 		mm_inc_nr_ptes(dst_mm);
1131 		pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1132 		if (!userfaultfd_wp(dst_vma))
1133 			pmd = pmd_swp_clear_uffd_wp(pmd);
1134 		set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1135 		ret = 0;
1136 		goto out_unlock;
1137 	}
1138 #endif
1139 
1140 	if (unlikely(!pmd_trans_huge(pmd))) {
1141 		pte_free(dst_mm, pgtable);
1142 		goto out_unlock;
1143 	}
1144 	/*
1145 	 * When page table lock is held, the huge zero pmd should not be
1146 	 * under splitting since we don't split the page itself, only pmd to
1147 	 * a page table.
1148 	 */
1149 	if (is_huge_zero_pmd(pmd)) {
1150 		/*
1151 		 * get_huge_zero_page() will never allocate a new page here,
1152 		 * since we already have a zero page to copy. It just takes a
1153 		 * reference.
1154 		 */
1155 		mm_get_huge_zero_page(dst_mm);
1156 		goto out_zero_page;
1157 	}
1158 
1159 	src_page = pmd_page(pmd);
1160 	VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
1161 
1162 	get_page(src_page);
1163 	if (unlikely(page_try_dup_anon_rmap(src_page, true, src_vma))) {
1164 		/* Page maybe pinned: split and retry the fault on PTEs. */
1165 		put_page(src_page);
1166 		pte_free(dst_mm, pgtable);
1167 		spin_unlock(src_ptl);
1168 		spin_unlock(dst_ptl);
1169 		__split_huge_pmd(src_vma, src_pmd, addr, false, NULL);
1170 		return -EAGAIN;
1171 	}
1172 	add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1173 out_zero_page:
1174 	mm_inc_nr_ptes(dst_mm);
1175 	pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1176 	pmdp_set_wrprotect(src_mm, addr, src_pmd);
1177 	if (!userfaultfd_wp(dst_vma))
1178 		pmd = pmd_clear_uffd_wp(pmd);
1179 	pmd = pmd_mkold(pmd_wrprotect(pmd));
1180 	set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1181 
1182 	ret = 0;
1183 out_unlock:
1184 	spin_unlock(src_ptl);
1185 	spin_unlock(dst_ptl);
1186 out:
1187 	return ret;
1188 }
1189 
1190 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1191 static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1192 		      pud_t *pud, bool write)
1193 {
1194 	pud_t _pud;
1195 
1196 	_pud = pud_mkyoung(*pud);
1197 	if (write)
1198 		_pud = pud_mkdirty(_pud);
1199 	if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
1200 				  pud, _pud, write))
1201 		update_mmu_cache_pud(vma, addr, pud);
1202 }
1203 
1204 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
1205 		pud_t *pud, int flags, struct dev_pagemap **pgmap)
1206 {
1207 	unsigned long pfn = pud_pfn(*pud);
1208 	struct mm_struct *mm = vma->vm_mm;
1209 	struct page *page;
1210 	int ret;
1211 
1212 	assert_spin_locked(pud_lockptr(mm, pud));
1213 
1214 	if (flags & FOLL_WRITE && !pud_write(*pud))
1215 		return NULL;
1216 
1217 	if (pud_present(*pud) && pud_devmap(*pud))
1218 		/* pass */;
1219 	else
1220 		return NULL;
1221 
1222 	if (flags & FOLL_TOUCH)
1223 		touch_pud(vma, addr, pud, flags & FOLL_WRITE);
1224 
1225 	/*
1226 	 * device mapped pages can only be returned if the
1227 	 * caller will manage the page reference count.
1228 	 *
1229 	 * At least one of FOLL_GET | FOLL_PIN must be set, so assert that here:
1230 	 */
1231 	if (!(flags & (FOLL_GET | FOLL_PIN)))
1232 		return ERR_PTR(-EEXIST);
1233 
1234 	pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
1235 	*pgmap = get_dev_pagemap(pfn, *pgmap);
1236 	if (!*pgmap)
1237 		return ERR_PTR(-EFAULT);
1238 	page = pfn_to_page(pfn);
1239 
1240 	ret = try_grab_page(page, flags);
1241 	if (ret)
1242 		page = ERR_PTR(ret);
1243 
1244 	return page;
1245 }
1246 
1247 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1248 		  pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1249 		  struct vm_area_struct *vma)
1250 {
1251 	spinlock_t *dst_ptl, *src_ptl;
1252 	pud_t pud;
1253 	int ret;
1254 
1255 	dst_ptl = pud_lock(dst_mm, dst_pud);
1256 	src_ptl = pud_lockptr(src_mm, src_pud);
1257 	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1258 
1259 	ret = -EAGAIN;
1260 	pud = *src_pud;
1261 	if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
1262 		goto out_unlock;
1263 
1264 	/*
1265 	 * When page table lock is held, the huge zero pud should not be
1266 	 * under splitting since we don't split the page itself, only pud to
1267 	 * a page table.
1268 	 */
1269 	if (is_huge_zero_pud(pud)) {
1270 		/* No huge zero pud yet */
1271 	}
1272 
1273 	/*
1274 	 * TODO: once we support anonymous pages, use page_try_dup_anon_rmap()
1275 	 * and split if duplicating fails.
1276 	 */
1277 	pudp_set_wrprotect(src_mm, addr, src_pud);
1278 	pud = pud_mkold(pud_wrprotect(pud));
1279 	set_pud_at(dst_mm, addr, dst_pud, pud);
1280 
1281 	ret = 0;
1282 out_unlock:
1283 	spin_unlock(src_ptl);
1284 	spin_unlock(dst_ptl);
1285 	return ret;
1286 }
1287 
1288 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
1289 {
1290 	bool write = vmf->flags & FAULT_FLAG_WRITE;
1291 
1292 	vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
1293 	if (unlikely(!pud_same(*vmf->pud, orig_pud)))
1294 		goto unlock;
1295 
1296 	touch_pud(vmf->vma, vmf->address, vmf->pud, write);
1297 unlock:
1298 	spin_unlock(vmf->ptl);
1299 }
1300 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1301 
1302 void huge_pmd_set_accessed(struct vm_fault *vmf)
1303 {
1304 	bool write = vmf->flags & FAULT_FLAG_WRITE;
1305 
1306 	vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1307 	if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd)))
1308 		goto unlock;
1309 
1310 	touch_pmd(vmf->vma, vmf->address, vmf->pmd, write);
1311 
1312 unlock:
1313 	spin_unlock(vmf->ptl);
1314 }
1315 
1316 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
1317 {
1318 	const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
1319 	struct vm_area_struct *vma = vmf->vma;
1320 	struct folio *folio;
1321 	struct page *page;
1322 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1323 	pmd_t orig_pmd = vmf->orig_pmd;
1324 
1325 	vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
1326 	VM_BUG_ON_VMA(!vma->anon_vma, vma);
1327 
1328 	if (is_huge_zero_pmd(orig_pmd))
1329 		goto fallback;
1330 
1331 	spin_lock(vmf->ptl);
1332 
1333 	if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1334 		spin_unlock(vmf->ptl);
1335 		return 0;
1336 	}
1337 
1338 	page = pmd_page(orig_pmd);
1339 	folio = page_folio(page);
1340 	VM_BUG_ON_PAGE(!PageHead(page), page);
1341 
1342 	/* Early check when only holding the PT lock. */
1343 	if (PageAnonExclusive(page))
1344 		goto reuse;
1345 
1346 	if (!folio_trylock(folio)) {
1347 		folio_get(folio);
1348 		spin_unlock(vmf->ptl);
1349 		folio_lock(folio);
1350 		spin_lock(vmf->ptl);
1351 		if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1352 			spin_unlock(vmf->ptl);
1353 			folio_unlock(folio);
1354 			folio_put(folio);
1355 			return 0;
1356 		}
1357 		folio_put(folio);
1358 	}
1359 
1360 	/* Recheck after temporarily dropping the PT lock. */
1361 	if (PageAnonExclusive(page)) {
1362 		folio_unlock(folio);
1363 		goto reuse;
1364 	}
1365 
1366 	/*
1367 	 * See do_wp_page(): we can only reuse the folio exclusively if
1368 	 * there are no additional references. Note that we always drain
1369 	 * the LRU cache immediately after adding a THP.
1370 	 */
1371 	if (folio_ref_count(folio) >
1372 			1 + folio_test_swapcache(folio) * folio_nr_pages(folio))
1373 		goto unlock_fallback;
1374 	if (folio_test_swapcache(folio))
1375 		folio_free_swap(folio);
1376 	if (folio_ref_count(folio) == 1) {
1377 		pmd_t entry;
1378 
1379 		folio_move_anon_rmap(folio, vma);
1380 		SetPageAnonExclusive(page);
1381 		folio_unlock(folio);
1382 reuse:
1383 		if (unlikely(unshare)) {
1384 			spin_unlock(vmf->ptl);
1385 			return 0;
1386 		}
1387 		entry = pmd_mkyoung(orig_pmd);
1388 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1389 		if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
1390 			update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1391 		spin_unlock(vmf->ptl);
1392 		return 0;
1393 	}
1394 
1395 unlock_fallback:
1396 	folio_unlock(folio);
1397 	spin_unlock(vmf->ptl);
1398 fallback:
1399 	__split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
1400 	return VM_FAULT_FALLBACK;
1401 }
1402 
1403 static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
1404 					   unsigned long addr, pmd_t pmd)
1405 {
1406 	struct page *page;
1407 
1408 	if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
1409 		return false;
1410 
1411 	/* Don't touch entries that are not even readable (NUMA hinting). */
1412 	if (pmd_protnone(pmd))
1413 		return false;
1414 
1415 	/* Do we need write faults for softdirty tracking? */
1416 	if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
1417 		return false;
1418 
1419 	/* Do we need write faults for uffd-wp tracking? */
1420 	if (userfaultfd_huge_pmd_wp(vma, pmd))
1421 		return false;
1422 
1423 	if (!(vma->vm_flags & VM_SHARED)) {
1424 		/* See can_change_pte_writable(). */
1425 		page = vm_normal_page_pmd(vma, addr, pmd);
1426 		return page && PageAnon(page) && PageAnonExclusive(page);
1427 	}
1428 
1429 	/* See can_change_pte_writable(). */
1430 	return pmd_dirty(pmd);
1431 }
1432 
1433 /* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */
1434 static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
1435 					struct vm_area_struct *vma,
1436 					unsigned int flags)
1437 {
1438 	/* If the pmd is writable, we can write to the page. */
1439 	if (pmd_write(pmd))
1440 		return true;
1441 
1442 	/* Maybe FOLL_FORCE is set to override it? */
1443 	if (!(flags & FOLL_FORCE))
1444 		return false;
1445 
1446 	/* But FOLL_FORCE has no effect on shared mappings */
1447 	if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
1448 		return false;
1449 
1450 	/* ... or read-only private ones */
1451 	if (!(vma->vm_flags & VM_MAYWRITE))
1452 		return false;
1453 
1454 	/* ... or already writable ones that just need to take a write fault */
1455 	if (vma->vm_flags & VM_WRITE)
1456 		return false;
1457 
1458 	/*
1459 	 * See can_change_pte_writable(): we broke COW and could map the page
1460 	 * writable if we have an exclusive anonymous page ...
1461 	 */
1462 	if (!page || !PageAnon(page) || !PageAnonExclusive(page))
1463 		return false;
1464 
1465 	/* ... and a write-fault isn't required for other reasons. */
1466 	if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
1467 		return false;
1468 	return !userfaultfd_huge_pmd_wp(vma, pmd);
1469 }
1470 
1471 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1472 				   unsigned long addr,
1473 				   pmd_t *pmd,
1474 				   unsigned int flags)
1475 {
1476 	struct mm_struct *mm = vma->vm_mm;
1477 	struct page *page;
1478 	int ret;
1479 
1480 	assert_spin_locked(pmd_lockptr(mm, pmd));
1481 
1482 	page = pmd_page(*pmd);
1483 	VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
1484 
1485 	if ((flags & FOLL_WRITE) &&
1486 	    !can_follow_write_pmd(*pmd, page, vma, flags))
1487 		return NULL;
1488 
1489 	/* Avoid dumping huge zero page */
1490 	if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1491 		return ERR_PTR(-EFAULT);
1492 
1493 	if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags))
1494 		return NULL;
1495 
1496 	if (!pmd_write(*pmd) && gup_must_unshare(vma, flags, page))
1497 		return ERR_PTR(-EMLINK);
1498 
1499 	VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
1500 			!PageAnonExclusive(page), page);
1501 
1502 	ret = try_grab_page(page, flags);
1503 	if (ret)
1504 		return ERR_PTR(ret);
1505 
1506 	if (flags & FOLL_TOUCH)
1507 		touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
1508 
1509 	page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
1510 	VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
1511 
1512 	return page;
1513 }
1514 
1515 /* NUMA hinting page fault entry point for trans huge pmds */
1516 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
1517 {
1518 	struct vm_area_struct *vma = vmf->vma;
1519 	pmd_t oldpmd = vmf->orig_pmd;
1520 	pmd_t pmd;
1521 	struct folio *folio;
1522 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1523 	int nid = NUMA_NO_NODE;
1524 	int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK);
1525 	bool migrated = false, writable = false;
1526 	int flags = 0;
1527 
1528 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1529 	if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
1530 		spin_unlock(vmf->ptl);
1531 		goto out;
1532 	}
1533 
1534 	pmd = pmd_modify(oldpmd, vma->vm_page_prot);
1535 
1536 	/*
1537 	 * Detect now whether the PMD could be writable; this information
1538 	 * is only valid while holding the PT lock.
1539 	 */
1540 	writable = pmd_write(pmd);
1541 	if (!writable && vma_wants_manual_pte_write_upgrade(vma) &&
1542 	    can_change_pmd_writable(vma, vmf->address, pmd))
1543 		writable = true;
1544 
1545 	folio = vm_normal_folio_pmd(vma, haddr, pmd);
1546 	if (!folio)
1547 		goto out_map;
1548 
1549 	/* See similar comment in do_numa_page for explanation */
1550 	if (!writable)
1551 		flags |= TNF_NO_GROUP;
1552 
1553 	nid = folio_nid(folio);
1554 	/*
1555 	 * For memory tiering mode, cpupid of slow memory page is used
1556 	 * to record page access time.  So use default value.
1557 	 */
1558 	if (node_is_toptier(nid))
1559 		last_cpupid = page_cpupid_last(&folio->page);
1560 	target_nid = numa_migrate_prep(folio, vma, haddr, nid, &flags);
1561 	if (target_nid == NUMA_NO_NODE) {
1562 		folio_put(folio);
1563 		goto out_map;
1564 	}
1565 
1566 	spin_unlock(vmf->ptl);
1567 	writable = false;
1568 
1569 	migrated = migrate_misplaced_folio(folio, vma, target_nid);
1570 	if (migrated) {
1571 		flags |= TNF_MIGRATED;
1572 		nid = target_nid;
1573 	} else {
1574 		flags |= TNF_MIGRATE_FAIL;
1575 		vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1576 		if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
1577 			spin_unlock(vmf->ptl);
1578 			goto out;
1579 		}
1580 		goto out_map;
1581 	}
1582 
1583 out:
1584 	if (nid != NUMA_NO_NODE)
1585 		task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
1586 
1587 	return 0;
1588 
1589 out_map:
1590 	/* Restore the PMD */
1591 	pmd = pmd_modify(oldpmd, vma->vm_page_prot);
1592 	pmd = pmd_mkyoung(pmd);
1593 	if (writable)
1594 		pmd = pmd_mkwrite(pmd, vma);
1595 	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
1596 	update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1597 	spin_unlock(vmf->ptl);
1598 	goto out;
1599 }
1600 
1601 /*
1602  * Return true if we do MADV_FREE successfully on entire pmd page.
1603  * Otherwise, return false.
1604  */
1605 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1606 		pmd_t *pmd, unsigned long addr, unsigned long next)
1607 {
1608 	spinlock_t *ptl;
1609 	pmd_t orig_pmd;
1610 	struct folio *folio;
1611 	struct mm_struct *mm = tlb->mm;
1612 	bool ret = false;
1613 
1614 	tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
1615 
1616 	ptl = pmd_trans_huge_lock(pmd, vma);
1617 	if (!ptl)
1618 		goto out_unlocked;
1619 
1620 	orig_pmd = *pmd;
1621 	if (is_huge_zero_pmd(orig_pmd))
1622 		goto out;
1623 
1624 	if (unlikely(!pmd_present(orig_pmd))) {
1625 		VM_BUG_ON(thp_migration_supported() &&
1626 				  !is_pmd_migration_entry(orig_pmd));
1627 		goto out;
1628 	}
1629 
1630 	folio = pfn_folio(pmd_pfn(orig_pmd));
1631 	/*
1632 	 * If other processes are mapping this folio, we couldn't discard
1633 	 * the folio unless they all do MADV_FREE so let's skip the folio.
1634 	 */
1635 	if (folio_estimated_sharers(folio) != 1)
1636 		goto out;
1637 
1638 	if (!folio_trylock(folio))
1639 		goto out;
1640 
1641 	/*
1642 	 * If user want to discard part-pages of THP, split it so MADV_FREE
1643 	 * will deactivate only them.
1644 	 */
1645 	if (next - addr != HPAGE_PMD_SIZE) {
1646 		folio_get(folio);
1647 		spin_unlock(ptl);
1648 		split_folio(folio);
1649 		folio_unlock(folio);
1650 		folio_put(folio);
1651 		goto out_unlocked;
1652 	}
1653 
1654 	if (folio_test_dirty(folio))
1655 		folio_clear_dirty(folio);
1656 	folio_unlock(folio);
1657 
1658 	if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
1659 		pmdp_invalidate(vma, addr, pmd);
1660 		orig_pmd = pmd_mkold(orig_pmd);
1661 		orig_pmd = pmd_mkclean(orig_pmd);
1662 
1663 		set_pmd_at(mm, addr, pmd, orig_pmd);
1664 		tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1665 	}
1666 
1667 	folio_mark_lazyfree(folio);
1668 	ret = true;
1669 out:
1670 	spin_unlock(ptl);
1671 out_unlocked:
1672 	return ret;
1673 }
1674 
1675 static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
1676 {
1677 	pgtable_t pgtable;
1678 
1679 	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1680 	pte_free(mm, pgtable);
1681 	mm_dec_nr_ptes(mm);
1682 }
1683 
1684 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1685 		 pmd_t *pmd, unsigned long addr)
1686 {
1687 	pmd_t orig_pmd;
1688 	spinlock_t *ptl;
1689 
1690 	tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
1691 
1692 	ptl = __pmd_trans_huge_lock(pmd, vma);
1693 	if (!ptl)
1694 		return 0;
1695 	/*
1696 	 * For architectures like ppc64 we look at deposited pgtable
1697 	 * when calling pmdp_huge_get_and_clear. So do the
1698 	 * pgtable_trans_huge_withdraw after finishing pmdp related
1699 	 * operations.
1700 	 */
1701 	orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd,
1702 						tlb->fullmm);
1703 	arch_check_zapped_pmd(vma, orig_pmd);
1704 	tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1705 	if (vma_is_special_huge(vma)) {
1706 		if (arch_needs_pgtable_deposit())
1707 			zap_deposited_table(tlb->mm, pmd);
1708 		spin_unlock(ptl);
1709 	} else if (is_huge_zero_pmd(orig_pmd)) {
1710 		zap_deposited_table(tlb->mm, pmd);
1711 		spin_unlock(ptl);
1712 	} else {
1713 		struct page *page = NULL;
1714 		int flush_needed = 1;
1715 
1716 		if (pmd_present(orig_pmd)) {
1717 			page = pmd_page(orig_pmd);
1718 			page_remove_rmap(page, vma, true);
1719 			VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1720 			VM_BUG_ON_PAGE(!PageHead(page), page);
1721 		} else if (thp_migration_supported()) {
1722 			swp_entry_t entry;
1723 
1724 			VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
1725 			entry = pmd_to_swp_entry(orig_pmd);
1726 			page = pfn_swap_entry_to_page(entry);
1727 			flush_needed = 0;
1728 		} else
1729 			WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
1730 
1731 		if (PageAnon(page)) {
1732 			zap_deposited_table(tlb->mm, pmd);
1733 			add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1734 		} else {
1735 			if (arch_needs_pgtable_deposit())
1736 				zap_deposited_table(tlb->mm, pmd);
1737 			add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR);
1738 		}
1739 
1740 		spin_unlock(ptl);
1741 		if (flush_needed)
1742 			tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE);
1743 	}
1744 	return 1;
1745 }
1746 
1747 #ifndef pmd_move_must_withdraw
1748 static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
1749 					 spinlock_t *old_pmd_ptl,
1750 					 struct vm_area_struct *vma)
1751 {
1752 	/*
1753 	 * With split pmd lock we also need to move preallocated
1754 	 * PTE page table if new_pmd is on different PMD page table.
1755 	 *
1756 	 * We also don't deposit and withdraw tables for file pages.
1757 	 */
1758 	return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
1759 }
1760 #endif
1761 
1762 static pmd_t move_soft_dirty_pmd(pmd_t pmd)
1763 {
1764 #ifdef CONFIG_MEM_SOFT_DIRTY
1765 	if (unlikely(is_pmd_migration_entry(pmd)))
1766 		pmd = pmd_swp_mksoft_dirty(pmd);
1767 	else if (pmd_present(pmd))
1768 		pmd = pmd_mksoft_dirty(pmd);
1769 #endif
1770 	return pmd;
1771 }
1772 
1773 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
1774 		  unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
1775 {
1776 	spinlock_t *old_ptl, *new_ptl;
1777 	pmd_t pmd;
1778 	struct mm_struct *mm = vma->vm_mm;
1779 	bool force_flush = false;
1780 
1781 	/*
1782 	 * The destination pmd shouldn't be established, free_pgtables()
1783 	 * should have released it; but move_page_tables() might have already
1784 	 * inserted a page table, if racing against shmem/file collapse.
1785 	 */
1786 	if (!pmd_none(*new_pmd)) {
1787 		VM_BUG_ON(pmd_trans_huge(*new_pmd));
1788 		return false;
1789 	}
1790 
1791 	/*
1792 	 * We don't have to worry about the ordering of src and dst
1793 	 * ptlocks because exclusive mmap_lock prevents deadlock.
1794 	 */
1795 	old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
1796 	if (old_ptl) {
1797 		new_ptl = pmd_lockptr(mm, new_pmd);
1798 		if (new_ptl != old_ptl)
1799 			spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
1800 		pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
1801 		if (pmd_present(pmd))
1802 			force_flush = true;
1803 		VM_BUG_ON(!pmd_none(*new_pmd));
1804 
1805 		if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) {
1806 			pgtable_t pgtable;
1807 			pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
1808 			pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
1809 		}
1810 		pmd = move_soft_dirty_pmd(pmd);
1811 		set_pmd_at(mm, new_addr, new_pmd, pmd);
1812 		if (force_flush)
1813 			flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
1814 		if (new_ptl != old_ptl)
1815 			spin_unlock(new_ptl);
1816 		spin_unlock(old_ptl);
1817 		return true;
1818 	}
1819 	return false;
1820 }
1821 
1822 /*
1823  * Returns
1824  *  - 0 if PMD could not be locked
1825  *  - 1 if PMD was locked but protections unchanged and TLB flush unnecessary
1826  *      or if prot_numa but THP migration is not supported
1827  *  - HPAGE_PMD_NR if protections changed and TLB flush necessary
1828  */
1829 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1830 		    pmd_t *pmd, unsigned long addr, pgprot_t newprot,
1831 		    unsigned long cp_flags)
1832 {
1833 	struct mm_struct *mm = vma->vm_mm;
1834 	spinlock_t *ptl;
1835 	pmd_t oldpmd, entry;
1836 	bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
1837 	bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
1838 	bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
1839 	int ret = 1;
1840 
1841 	tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
1842 
1843 	if (prot_numa && !thp_migration_supported())
1844 		return 1;
1845 
1846 	ptl = __pmd_trans_huge_lock(pmd, vma);
1847 	if (!ptl)
1848 		return 0;
1849 
1850 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1851 	if (is_swap_pmd(*pmd)) {
1852 		swp_entry_t entry = pmd_to_swp_entry(*pmd);
1853 		struct page *page = pfn_swap_entry_to_page(entry);
1854 		pmd_t newpmd;
1855 
1856 		VM_BUG_ON(!is_pmd_migration_entry(*pmd));
1857 		if (is_writable_migration_entry(entry)) {
1858 			/*
1859 			 * A protection check is difficult so
1860 			 * just be safe and disable write
1861 			 */
1862 			if (PageAnon(page))
1863 				entry = make_readable_exclusive_migration_entry(swp_offset(entry));
1864 			else
1865 				entry = make_readable_migration_entry(swp_offset(entry));
1866 			newpmd = swp_entry_to_pmd(entry);
1867 			if (pmd_swp_soft_dirty(*pmd))
1868 				newpmd = pmd_swp_mksoft_dirty(newpmd);
1869 		} else {
1870 			newpmd = *pmd;
1871 		}
1872 
1873 		if (uffd_wp)
1874 			newpmd = pmd_swp_mkuffd_wp(newpmd);
1875 		else if (uffd_wp_resolve)
1876 			newpmd = pmd_swp_clear_uffd_wp(newpmd);
1877 		if (!pmd_same(*pmd, newpmd))
1878 			set_pmd_at(mm, addr, pmd, newpmd);
1879 		goto unlock;
1880 	}
1881 #endif
1882 
1883 	if (prot_numa) {
1884 		struct page *page;
1885 		bool toptier;
1886 		/*
1887 		 * Avoid trapping faults against the zero page. The read-only
1888 		 * data is likely to be read-cached on the local CPU and
1889 		 * local/remote hits to the zero page are not interesting.
1890 		 */
1891 		if (is_huge_zero_pmd(*pmd))
1892 			goto unlock;
1893 
1894 		if (pmd_protnone(*pmd))
1895 			goto unlock;
1896 
1897 		page = pmd_page(*pmd);
1898 		toptier = node_is_toptier(page_to_nid(page));
1899 		/*
1900 		 * Skip scanning top tier node if normal numa
1901 		 * balancing is disabled
1902 		 */
1903 		if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
1904 		    toptier)
1905 			goto unlock;
1906 
1907 		if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
1908 		    !toptier)
1909 			xchg_page_access_time(page, jiffies_to_msecs(jiffies));
1910 	}
1911 	/*
1912 	 * In case prot_numa, we are under mmap_read_lock(mm). It's critical
1913 	 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
1914 	 * which is also under mmap_read_lock(mm):
1915 	 *
1916 	 *	CPU0:				CPU1:
1917 	 *				change_huge_pmd(prot_numa=1)
1918 	 *				 pmdp_huge_get_and_clear_notify()
1919 	 * madvise_dontneed()
1920 	 *  zap_pmd_range()
1921 	 *   pmd_trans_huge(*pmd) == 0 (without ptl)
1922 	 *   // skip the pmd
1923 	 *				 set_pmd_at();
1924 	 *				 // pmd is re-established
1925 	 *
1926 	 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
1927 	 * which may break userspace.
1928 	 *
1929 	 * pmdp_invalidate_ad() is required to make sure we don't miss
1930 	 * dirty/young flags set by hardware.
1931 	 */
1932 	oldpmd = pmdp_invalidate_ad(vma, addr, pmd);
1933 
1934 	entry = pmd_modify(oldpmd, newprot);
1935 	if (uffd_wp)
1936 		entry = pmd_mkuffd_wp(entry);
1937 	else if (uffd_wp_resolve)
1938 		/*
1939 		 * Leave the write bit to be handled by PF interrupt
1940 		 * handler, then things like COW could be properly
1941 		 * handled.
1942 		 */
1943 		entry = pmd_clear_uffd_wp(entry);
1944 
1945 	/* See change_pte_range(). */
1946 	if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && !pmd_write(entry) &&
1947 	    can_change_pmd_writable(vma, addr, entry))
1948 		entry = pmd_mkwrite(entry, vma);
1949 
1950 	ret = HPAGE_PMD_NR;
1951 	set_pmd_at(mm, addr, pmd, entry);
1952 
1953 	if (huge_pmd_needs_flush(oldpmd, entry))
1954 		tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE);
1955 unlock:
1956 	spin_unlock(ptl);
1957 	return ret;
1958 }
1959 
1960 /*
1961  * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
1962  *
1963  * Note that if it returns page table lock pointer, this routine returns without
1964  * unlocking page table lock. So callers must unlock it.
1965  */
1966 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
1967 {
1968 	spinlock_t *ptl;
1969 	ptl = pmd_lock(vma->vm_mm, pmd);
1970 	if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
1971 			pmd_devmap(*pmd)))
1972 		return ptl;
1973 	spin_unlock(ptl);
1974 	return NULL;
1975 }
1976 
1977 /*
1978  * Returns page table lock pointer if a given pud maps a thp, NULL otherwise.
1979  *
1980  * Note that if it returns page table lock pointer, this routine returns without
1981  * unlocking page table lock. So callers must unlock it.
1982  */
1983 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
1984 {
1985 	spinlock_t *ptl;
1986 
1987 	ptl = pud_lock(vma->vm_mm, pud);
1988 	if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
1989 		return ptl;
1990 	spin_unlock(ptl);
1991 	return NULL;
1992 }
1993 
1994 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1995 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
1996 		 pud_t *pud, unsigned long addr)
1997 {
1998 	spinlock_t *ptl;
1999 
2000 	ptl = __pud_trans_huge_lock(pud, vma);
2001 	if (!ptl)
2002 		return 0;
2003 
2004 	pudp_huge_get_and_clear_full(vma, addr, pud, tlb->fullmm);
2005 	tlb_remove_pud_tlb_entry(tlb, pud, addr);
2006 	if (vma_is_special_huge(vma)) {
2007 		spin_unlock(ptl);
2008 		/* No zero page support yet */
2009 	} else {
2010 		/* No support for anonymous PUD pages yet */
2011 		BUG();
2012 	}
2013 	return 1;
2014 }
2015 
2016 static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
2017 		unsigned long haddr)
2018 {
2019 	VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
2020 	VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2021 	VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
2022 	VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
2023 
2024 	count_vm_event(THP_SPLIT_PUD);
2025 
2026 	pudp_huge_clear_flush(vma, haddr, pud);
2027 }
2028 
2029 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
2030 		unsigned long address)
2031 {
2032 	spinlock_t *ptl;
2033 	struct mmu_notifier_range range;
2034 
2035 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2036 				address & HPAGE_PUD_MASK,
2037 				(address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
2038 	mmu_notifier_invalidate_range_start(&range);
2039 	ptl = pud_lock(vma->vm_mm, pud);
2040 	if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
2041 		goto out;
2042 	__split_huge_pud_locked(vma, pud, range.start);
2043 
2044 out:
2045 	spin_unlock(ptl);
2046 	mmu_notifier_invalidate_range_end(&range);
2047 }
2048 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
2049 
2050 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2051 		unsigned long haddr, pmd_t *pmd)
2052 {
2053 	struct mm_struct *mm = vma->vm_mm;
2054 	pgtable_t pgtable;
2055 	pmd_t _pmd, old_pmd;
2056 	unsigned long addr;
2057 	pte_t *pte;
2058 	int i;
2059 
2060 	/*
2061 	 * Leave pmd empty until pte is filled note that it is fine to delay
2062 	 * notification until mmu_notifier_invalidate_range_end() as we are
2063 	 * replacing a zero pmd write protected page with a zero pte write
2064 	 * protected page.
2065 	 *
2066 	 * See Documentation/mm/mmu_notifier.rst
2067 	 */
2068 	old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
2069 
2070 	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2071 	pmd_populate(mm, &_pmd, pgtable);
2072 
2073 	pte = pte_offset_map(&_pmd, haddr);
2074 	VM_BUG_ON(!pte);
2075 	for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
2076 		pte_t entry;
2077 
2078 		entry = pfn_pte(my_zero_pfn(addr), vma->vm_page_prot);
2079 		entry = pte_mkspecial(entry);
2080 		if (pmd_uffd_wp(old_pmd))
2081 			entry = pte_mkuffd_wp(entry);
2082 		VM_BUG_ON(!pte_none(ptep_get(pte)));
2083 		set_pte_at(mm, addr, pte, entry);
2084 		pte++;
2085 	}
2086 	pte_unmap(pte - 1);
2087 	smp_wmb(); /* make pte visible before pmd */
2088 	pmd_populate(mm, pmd, pgtable);
2089 }
2090 
2091 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2092 		unsigned long haddr, bool freeze)
2093 {
2094 	struct mm_struct *mm = vma->vm_mm;
2095 	struct page *page;
2096 	pgtable_t pgtable;
2097 	pmd_t old_pmd, _pmd;
2098 	bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false;
2099 	bool anon_exclusive = false, dirty = false;
2100 	unsigned long addr;
2101 	pte_t *pte;
2102 	int i;
2103 
2104 	VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
2105 	VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2106 	VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
2107 	VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
2108 				&& !pmd_devmap(*pmd));
2109 
2110 	count_vm_event(THP_SPLIT_PMD);
2111 
2112 	if (!vma_is_anonymous(vma)) {
2113 		old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
2114 		/*
2115 		 * We are going to unmap this huge page. So
2116 		 * just go ahead and zap it
2117 		 */
2118 		if (arch_needs_pgtable_deposit())
2119 			zap_deposited_table(mm, pmd);
2120 		if (vma_is_special_huge(vma))
2121 			return;
2122 		if (unlikely(is_pmd_migration_entry(old_pmd))) {
2123 			swp_entry_t entry;
2124 
2125 			entry = pmd_to_swp_entry(old_pmd);
2126 			page = pfn_swap_entry_to_page(entry);
2127 		} else {
2128 			page = pmd_page(old_pmd);
2129 			if (!PageDirty(page) && pmd_dirty(old_pmd))
2130 				set_page_dirty(page);
2131 			if (!PageReferenced(page) && pmd_young(old_pmd))
2132 				SetPageReferenced(page);
2133 			page_remove_rmap(page, vma, true);
2134 			put_page(page);
2135 		}
2136 		add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
2137 		return;
2138 	}
2139 
2140 	if (is_huge_zero_pmd(*pmd)) {
2141 		/*
2142 		 * FIXME: Do we want to invalidate secondary mmu by calling
2143 		 * mmu_notifier_arch_invalidate_secondary_tlbs() see comments below
2144 		 * inside __split_huge_pmd() ?
2145 		 *
2146 		 * We are going from a zero huge page write protected to zero
2147 		 * small page also write protected so it does not seems useful
2148 		 * to invalidate secondary mmu at this time.
2149 		 */
2150 		return __split_huge_zero_page_pmd(vma, haddr, pmd);
2151 	}
2152 
2153 	/*
2154 	 * Up to this point the pmd is present and huge and userland has the
2155 	 * whole access to the hugepage during the split (which happens in
2156 	 * place). If we overwrite the pmd with the not-huge version pointing
2157 	 * to the pte here (which of course we could if all CPUs were bug
2158 	 * free), userland could trigger a small page size TLB miss on the
2159 	 * small sized TLB while the hugepage TLB entry is still established in
2160 	 * the huge TLB. Some CPU doesn't like that.
2161 	 * See http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
2162 	 * 383 on page 105. Intel should be safe but is also warns that it's
2163 	 * only safe if the permission and cache attributes of the two entries
2164 	 * loaded in the two TLB is identical (which should be the case here).
2165 	 * But it is generally safer to never allow small and huge TLB entries
2166 	 * for the same virtual address to be loaded simultaneously. So instead
2167 	 * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
2168 	 * current pmd notpresent (atomically because here the pmd_trans_huge
2169 	 * must remain set at all times on the pmd until the split is complete
2170 	 * for this pmd), then we flush the SMP TLB and finally we write the
2171 	 * non-huge version of the pmd entry with pmd_populate.
2172 	 */
2173 	old_pmd = pmdp_invalidate(vma, haddr, pmd);
2174 
2175 	pmd_migration = is_pmd_migration_entry(old_pmd);
2176 	if (unlikely(pmd_migration)) {
2177 		swp_entry_t entry;
2178 
2179 		entry = pmd_to_swp_entry(old_pmd);
2180 		page = pfn_swap_entry_to_page(entry);
2181 		write = is_writable_migration_entry(entry);
2182 		if (PageAnon(page))
2183 			anon_exclusive = is_readable_exclusive_migration_entry(entry);
2184 		young = is_migration_entry_young(entry);
2185 		dirty = is_migration_entry_dirty(entry);
2186 		soft_dirty = pmd_swp_soft_dirty(old_pmd);
2187 		uffd_wp = pmd_swp_uffd_wp(old_pmd);
2188 	} else {
2189 		page = pmd_page(old_pmd);
2190 		if (pmd_dirty(old_pmd)) {
2191 			dirty = true;
2192 			SetPageDirty(page);
2193 		}
2194 		write = pmd_write(old_pmd);
2195 		young = pmd_young(old_pmd);
2196 		soft_dirty = pmd_soft_dirty(old_pmd);
2197 		uffd_wp = pmd_uffd_wp(old_pmd);
2198 
2199 		VM_BUG_ON_PAGE(!page_count(page), page);
2200 
2201 		/*
2202 		 * Without "freeze", we'll simply split the PMD, propagating the
2203 		 * PageAnonExclusive() flag for each PTE by setting it for
2204 		 * each subpage -- no need to (temporarily) clear.
2205 		 *
2206 		 * With "freeze" we want to replace mapped pages by
2207 		 * migration entries right away. This is only possible if we
2208 		 * managed to clear PageAnonExclusive() -- see
2209 		 * set_pmd_migration_entry().
2210 		 *
2211 		 * In case we cannot clear PageAnonExclusive(), split the PMD
2212 		 * only and let try_to_migrate_one() fail later.
2213 		 *
2214 		 * See page_try_share_anon_rmap(): invalidate PMD first.
2215 		 */
2216 		anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
2217 		if (freeze && anon_exclusive && page_try_share_anon_rmap(page))
2218 			freeze = false;
2219 		if (!freeze)
2220 			page_ref_add(page, HPAGE_PMD_NR - 1);
2221 	}
2222 
2223 	/*
2224 	 * Withdraw the table only after we mark the pmd entry invalid.
2225 	 * This's critical for some architectures (Power).
2226 	 */
2227 	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2228 	pmd_populate(mm, &_pmd, pgtable);
2229 
2230 	pte = pte_offset_map(&_pmd, haddr);
2231 	VM_BUG_ON(!pte);
2232 	for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
2233 		pte_t entry;
2234 		/*
2235 		 * Note that NUMA hinting access restrictions are not
2236 		 * transferred to avoid any possibility of altering
2237 		 * permissions across VMAs.
2238 		 */
2239 		if (freeze || pmd_migration) {
2240 			swp_entry_t swp_entry;
2241 			if (write)
2242 				swp_entry = make_writable_migration_entry(
2243 							page_to_pfn(page + i));
2244 			else if (anon_exclusive)
2245 				swp_entry = make_readable_exclusive_migration_entry(
2246 							page_to_pfn(page + i));
2247 			else
2248 				swp_entry = make_readable_migration_entry(
2249 							page_to_pfn(page + i));
2250 			if (young)
2251 				swp_entry = make_migration_entry_young(swp_entry);
2252 			if (dirty)
2253 				swp_entry = make_migration_entry_dirty(swp_entry);
2254 			entry = swp_entry_to_pte(swp_entry);
2255 			if (soft_dirty)
2256 				entry = pte_swp_mksoft_dirty(entry);
2257 			if (uffd_wp)
2258 				entry = pte_swp_mkuffd_wp(entry);
2259 		} else {
2260 			entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
2261 			if (write)
2262 				entry = pte_mkwrite(entry, vma);
2263 			if (anon_exclusive)
2264 				SetPageAnonExclusive(page + i);
2265 			if (!young)
2266 				entry = pte_mkold(entry);
2267 			/* NOTE: this may set soft-dirty too on some archs */
2268 			if (dirty)
2269 				entry = pte_mkdirty(entry);
2270 			if (soft_dirty)
2271 				entry = pte_mksoft_dirty(entry);
2272 			if (uffd_wp)
2273 				entry = pte_mkuffd_wp(entry);
2274 			page_add_anon_rmap(page + i, vma, addr, RMAP_NONE);
2275 		}
2276 		VM_BUG_ON(!pte_none(ptep_get(pte)));
2277 		set_pte_at(mm, addr, pte, entry);
2278 		pte++;
2279 	}
2280 	pte_unmap(pte - 1);
2281 
2282 	if (!pmd_migration)
2283 		page_remove_rmap(page, vma, true);
2284 	if (freeze)
2285 		put_page(page);
2286 
2287 	smp_wmb(); /* make pte visible before pmd */
2288 	pmd_populate(mm, pmd, pgtable);
2289 }
2290 
2291 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
2292 		unsigned long address, bool freeze, struct folio *folio)
2293 {
2294 	spinlock_t *ptl;
2295 	struct mmu_notifier_range range;
2296 
2297 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2298 				address & HPAGE_PMD_MASK,
2299 				(address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
2300 	mmu_notifier_invalidate_range_start(&range);
2301 	ptl = pmd_lock(vma->vm_mm, pmd);
2302 
2303 	/*
2304 	 * If caller asks to setup a migration entry, we need a folio to check
2305 	 * pmd against. Otherwise we can end up replacing wrong folio.
2306 	 */
2307 	VM_BUG_ON(freeze && !folio);
2308 	VM_WARN_ON_ONCE(folio && !folio_test_locked(folio));
2309 
2310 	if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
2311 	    is_pmd_migration_entry(*pmd)) {
2312 		/*
2313 		 * It's safe to call pmd_page when folio is set because it's
2314 		 * guaranteed that pmd is present.
2315 		 */
2316 		if (folio && folio != page_folio(pmd_page(*pmd)))
2317 			goto out;
2318 		__split_huge_pmd_locked(vma, pmd, range.start, freeze);
2319 	}
2320 
2321 out:
2322 	spin_unlock(ptl);
2323 	mmu_notifier_invalidate_range_end(&range);
2324 }
2325 
2326 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
2327 		bool freeze, struct folio *folio)
2328 {
2329 	pmd_t *pmd = mm_find_pmd(vma->vm_mm, address);
2330 
2331 	if (!pmd)
2332 		return;
2333 
2334 	__split_huge_pmd(vma, pmd, address, freeze, folio);
2335 }
2336 
2337 static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address)
2338 {
2339 	/*
2340 	 * If the new address isn't hpage aligned and it could previously
2341 	 * contain an hugepage: check if we need to split an huge pmd.
2342 	 */
2343 	if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) &&
2344 	    range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE),
2345 			 ALIGN(address, HPAGE_PMD_SIZE)))
2346 		split_huge_pmd_address(vma, address, false, NULL);
2347 }
2348 
2349 void vma_adjust_trans_huge(struct vm_area_struct *vma,
2350 			     unsigned long start,
2351 			     unsigned long end,
2352 			     long adjust_next)
2353 {
2354 	/* Check if we need to split start first. */
2355 	split_huge_pmd_if_needed(vma, start);
2356 
2357 	/* Check if we need to split end next. */
2358 	split_huge_pmd_if_needed(vma, end);
2359 
2360 	/*
2361 	 * If we're also updating the next vma vm_start,
2362 	 * check if we need to split it.
2363 	 */
2364 	if (adjust_next > 0) {
2365 		struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end);
2366 		unsigned long nstart = next->vm_start;
2367 		nstart += adjust_next;
2368 		split_huge_pmd_if_needed(next, nstart);
2369 	}
2370 }
2371 
2372 static void unmap_folio(struct folio *folio)
2373 {
2374 	enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
2375 		TTU_SYNC;
2376 
2377 	VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
2378 
2379 	/*
2380 	 * Anon pages need migration entries to preserve them, but file
2381 	 * pages can simply be left unmapped, then faulted back on demand.
2382 	 * If that is ever changed (perhaps for mlock), update remap_page().
2383 	 */
2384 	if (folio_test_anon(folio))
2385 		try_to_migrate(folio, ttu_flags);
2386 	else
2387 		try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK);
2388 }
2389 
2390 static void remap_page(struct folio *folio, unsigned long nr)
2391 {
2392 	int i = 0;
2393 
2394 	/* If unmap_folio() uses try_to_migrate() on file, remove this check */
2395 	if (!folio_test_anon(folio))
2396 		return;
2397 	for (;;) {
2398 		remove_migration_ptes(folio, folio, true);
2399 		i += folio_nr_pages(folio);
2400 		if (i >= nr)
2401 			break;
2402 		folio = folio_next(folio);
2403 	}
2404 }
2405 
2406 static void lru_add_page_tail(struct page *head, struct page *tail,
2407 		struct lruvec *lruvec, struct list_head *list)
2408 {
2409 	VM_BUG_ON_PAGE(!PageHead(head), head);
2410 	VM_BUG_ON_PAGE(PageCompound(tail), head);
2411 	VM_BUG_ON_PAGE(PageLRU(tail), head);
2412 	lockdep_assert_held(&lruvec->lru_lock);
2413 
2414 	if (list) {
2415 		/* page reclaim is reclaiming a huge page */
2416 		VM_WARN_ON(PageLRU(head));
2417 		get_page(tail);
2418 		list_add_tail(&tail->lru, list);
2419 	} else {
2420 		/* head is still on lru (and we have it frozen) */
2421 		VM_WARN_ON(!PageLRU(head));
2422 		if (PageUnevictable(tail))
2423 			tail->mlock_count = 0;
2424 		else
2425 			list_add_tail(&tail->lru, &head->lru);
2426 		SetPageLRU(tail);
2427 	}
2428 }
2429 
2430 static void __split_huge_page_tail(struct folio *folio, int tail,
2431 		struct lruvec *lruvec, struct list_head *list)
2432 {
2433 	struct page *head = &folio->page;
2434 	struct page *page_tail = head + tail;
2435 	/*
2436 	 * Careful: new_folio is not a "real" folio before we cleared PageTail.
2437 	 * Don't pass it around before clear_compound_head().
2438 	 */
2439 	struct folio *new_folio = (struct folio *)page_tail;
2440 
2441 	VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
2442 
2443 	/*
2444 	 * Clone page flags before unfreezing refcount.
2445 	 *
2446 	 * After successful get_page_unless_zero() might follow flags change,
2447 	 * for example lock_page() which set PG_waiters.
2448 	 *
2449 	 * Note that for mapped sub-pages of an anonymous THP,
2450 	 * PG_anon_exclusive has been cleared in unmap_folio() and is stored in
2451 	 * the migration entry instead from where remap_page() will restore it.
2452 	 * We can still have PG_anon_exclusive set on effectively unmapped and
2453 	 * unreferenced sub-pages of an anonymous THP: we can simply drop
2454 	 * PG_anon_exclusive (-> PG_mappedtodisk) for these here.
2455 	 */
2456 	page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
2457 	page_tail->flags |= (head->flags &
2458 			((1L << PG_referenced) |
2459 			 (1L << PG_swapbacked) |
2460 			 (1L << PG_swapcache) |
2461 			 (1L << PG_mlocked) |
2462 			 (1L << PG_uptodate) |
2463 			 (1L << PG_active) |
2464 			 (1L << PG_workingset) |
2465 			 (1L << PG_locked) |
2466 			 (1L << PG_unevictable) |
2467 #ifdef CONFIG_ARCH_USES_PG_ARCH_X
2468 			 (1L << PG_arch_2) |
2469 			 (1L << PG_arch_3) |
2470 #endif
2471 			 (1L << PG_dirty) |
2472 			 LRU_GEN_MASK | LRU_REFS_MASK));
2473 
2474 	/* ->mapping in first and second tail page is replaced by other uses */
2475 	VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
2476 			page_tail);
2477 	page_tail->mapping = head->mapping;
2478 	page_tail->index = head->index + tail;
2479 
2480 	/*
2481 	 * page->private should not be set in tail pages. Fix up and warn once
2482 	 * if private is unexpectedly set.
2483 	 */
2484 	if (unlikely(page_tail->private)) {
2485 		VM_WARN_ON_ONCE_PAGE(true, page_tail);
2486 		page_tail->private = 0;
2487 	}
2488 	if (folio_test_swapcache(folio))
2489 		new_folio->swap.val = folio->swap.val + tail;
2490 
2491 	/* Page flags must be visible before we make the page non-compound. */
2492 	smp_wmb();
2493 
2494 	/*
2495 	 * Clear PageTail before unfreezing page refcount.
2496 	 *
2497 	 * After successful get_page_unless_zero() might follow put_page()
2498 	 * which needs correct compound_head().
2499 	 */
2500 	clear_compound_head(page_tail);
2501 
2502 	/* Finally unfreeze refcount. Additional reference from page cache. */
2503 	page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) ||
2504 					  PageSwapCache(head)));
2505 
2506 	if (page_is_young(head))
2507 		set_page_young(page_tail);
2508 	if (page_is_idle(head))
2509 		set_page_idle(page_tail);
2510 
2511 	page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
2512 
2513 	/*
2514 	 * always add to the tail because some iterators expect new
2515 	 * pages to show after the currently processed elements - e.g.
2516 	 * migrate_pages
2517 	 */
2518 	lru_add_page_tail(head, page_tail, lruvec, list);
2519 }
2520 
2521 static void __split_huge_page(struct page *page, struct list_head *list,
2522 		pgoff_t end)
2523 {
2524 	struct folio *folio = page_folio(page);
2525 	struct page *head = &folio->page;
2526 	struct lruvec *lruvec;
2527 	struct address_space *swap_cache = NULL;
2528 	unsigned long offset = 0;
2529 	unsigned int nr = thp_nr_pages(head);
2530 	int i, nr_dropped = 0;
2531 
2532 	/* complete memcg works before add pages to LRU */
2533 	split_page_memcg(head, nr);
2534 
2535 	if (folio_test_anon(folio) && folio_test_swapcache(folio)) {
2536 		offset = swp_offset(folio->swap);
2537 		swap_cache = swap_address_space(folio->swap);
2538 		xa_lock(&swap_cache->i_pages);
2539 	}
2540 
2541 	/* lock lru list/PageCompound, ref frozen by page_ref_freeze */
2542 	lruvec = folio_lruvec_lock(folio);
2543 
2544 	ClearPageHasHWPoisoned(head);
2545 
2546 	for (i = nr - 1; i >= 1; i--) {
2547 		__split_huge_page_tail(folio, i, lruvec, list);
2548 		/* Some pages can be beyond EOF: drop them from page cache */
2549 		if (head[i].index >= end) {
2550 			struct folio *tail = page_folio(head + i);
2551 
2552 			if (shmem_mapping(head->mapping))
2553 				nr_dropped++;
2554 			else if (folio_test_clear_dirty(tail))
2555 				folio_account_cleaned(tail,
2556 					inode_to_wb(folio->mapping->host));
2557 			__filemap_remove_folio(tail, NULL);
2558 			folio_put(tail);
2559 		} else if (!PageAnon(page)) {
2560 			__xa_store(&head->mapping->i_pages, head[i].index,
2561 					head + i, 0);
2562 		} else if (swap_cache) {
2563 			__xa_store(&swap_cache->i_pages, offset + i,
2564 					head + i, 0);
2565 		}
2566 	}
2567 
2568 	ClearPageCompound(head);
2569 	unlock_page_lruvec(lruvec);
2570 	/* Caller disabled irqs, so they are still disabled here */
2571 
2572 	split_page_owner(head, nr);
2573 
2574 	/* See comment in __split_huge_page_tail() */
2575 	if (PageAnon(head)) {
2576 		/* Additional pin to swap cache */
2577 		if (PageSwapCache(head)) {
2578 			page_ref_add(head, 2);
2579 			xa_unlock(&swap_cache->i_pages);
2580 		} else {
2581 			page_ref_inc(head);
2582 		}
2583 	} else {
2584 		/* Additional pin to page cache */
2585 		page_ref_add(head, 2);
2586 		xa_unlock(&head->mapping->i_pages);
2587 	}
2588 	local_irq_enable();
2589 
2590 	if (nr_dropped)
2591 		shmem_uncharge(head->mapping->host, nr_dropped);
2592 	remap_page(folio, nr);
2593 
2594 	if (folio_test_swapcache(folio))
2595 		split_swap_cluster(folio->swap);
2596 
2597 	for (i = 0; i < nr; i++) {
2598 		struct page *subpage = head + i;
2599 		if (subpage == page)
2600 			continue;
2601 		unlock_page(subpage);
2602 
2603 		/*
2604 		 * Subpages may be freed if there wasn't any mapping
2605 		 * like if add_to_swap() is running on a lru page that
2606 		 * had its mapping zapped. And freeing these pages
2607 		 * requires taking the lru_lock so we do the put_page
2608 		 * of the tail pages after the split is complete.
2609 		 */
2610 		free_page_and_swap_cache(subpage);
2611 	}
2612 }
2613 
2614 /* Racy check whether the huge page can be split */
2615 bool can_split_folio(struct folio *folio, int *pextra_pins)
2616 {
2617 	int extra_pins;
2618 
2619 	/* Additional pins from page cache */
2620 	if (folio_test_anon(folio))
2621 		extra_pins = folio_test_swapcache(folio) ?
2622 				folio_nr_pages(folio) : 0;
2623 	else
2624 		extra_pins = folio_nr_pages(folio);
2625 	if (pextra_pins)
2626 		*pextra_pins = extra_pins;
2627 	return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - 1;
2628 }
2629 
2630 /*
2631  * This function splits huge page into normal pages. @page can point to any
2632  * subpage of huge page to split. Split doesn't change the position of @page.
2633  *
2634  * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
2635  * The huge page must be locked.
2636  *
2637  * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
2638  *
2639  * Both head page and tail pages will inherit mapping, flags, and so on from
2640  * the hugepage.
2641  *
2642  * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
2643  * they are not mapped.
2644  *
2645  * Returns 0 if the hugepage is split successfully.
2646  * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
2647  * us.
2648  */
2649 int split_huge_page_to_list(struct page *page, struct list_head *list)
2650 {
2651 	struct folio *folio = page_folio(page);
2652 	struct deferred_split *ds_queue = get_deferred_split_queue(folio);
2653 	XA_STATE(xas, &folio->mapping->i_pages, folio->index);
2654 	struct anon_vma *anon_vma = NULL;
2655 	struct address_space *mapping = NULL;
2656 	int extra_pins, ret;
2657 	pgoff_t end;
2658 	bool is_hzp;
2659 
2660 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
2661 	VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
2662 
2663 	is_hzp = is_huge_zero_page(&folio->page);
2664 	if (is_hzp) {
2665 		pr_warn_ratelimited("Called split_huge_page for huge zero page\n");
2666 		return -EBUSY;
2667 	}
2668 
2669 	if (folio_test_writeback(folio))
2670 		return -EBUSY;
2671 
2672 	if (folio_test_anon(folio)) {
2673 		/*
2674 		 * The caller does not necessarily hold an mmap_lock that would
2675 		 * prevent the anon_vma disappearing so we first we take a
2676 		 * reference to it and then lock the anon_vma for write. This
2677 		 * is similar to folio_lock_anon_vma_read except the write lock
2678 		 * is taken to serialise against parallel split or collapse
2679 		 * operations.
2680 		 */
2681 		anon_vma = folio_get_anon_vma(folio);
2682 		if (!anon_vma) {
2683 			ret = -EBUSY;
2684 			goto out;
2685 		}
2686 		end = -1;
2687 		mapping = NULL;
2688 		anon_vma_lock_write(anon_vma);
2689 	} else {
2690 		gfp_t gfp;
2691 
2692 		mapping = folio->mapping;
2693 
2694 		/* Truncated ? */
2695 		if (!mapping) {
2696 			ret = -EBUSY;
2697 			goto out;
2698 		}
2699 
2700 		gfp = current_gfp_context(mapping_gfp_mask(mapping) &
2701 							GFP_RECLAIM_MASK);
2702 
2703 		if (!filemap_release_folio(folio, gfp)) {
2704 			ret = -EBUSY;
2705 			goto out;
2706 		}
2707 
2708 		xas_split_alloc(&xas, folio, folio_order(folio), gfp);
2709 		if (xas_error(&xas)) {
2710 			ret = xas_error(&xas);
2711 			goto out;
2712 		}
2713 
2714 		anon_vma = NULL;
2715 		i_mmap_lock_read(mapping);
2716 
2717 		/*
2718 		 *__split_huge_page() may need to trim off pages beyond EOF:
2719 		 * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
2720 		 * which cannot be nested inside the page tree lock. So note
2721 		 * end now: i_size itself may be changed at any moment, but
2722 		 * folio lock is good enough to serialize the trimming.
2723 		 */
2724 		end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
2725 		if (shmem_mapping(mapping))
2726 			end = shmem_fallocend(mapping->host, end);
2727 	}
2728 
2729 	/*
2730 	 * Racy check if we can split the page, before unmap_folio() will
2731 	 * split PMDs
2732 	 */
2733 	if (!can_split_folio(folio, &extra_pins)) {
2734 		ret = -EAGAIN;
2735 		goto out_unlock;
2736 	}
2737 
2738 	unmap_folio(folio);
2739 
2740 	/* block interrupt reentry in xa_lock and spinlock */
2741 	local_irq_disable();
2742 	if (mapping) {
2743 		/*
2744 		 * Check if the folio is present in page cache.
2745 		 * We assume all tail are present too, if folio is there.
2746 		 */
2747 		xas_lock(&xas);
2748 		xas_reset(&xas);
2749 		if (xas_load(&xas) != folio)
2750 			goto fail;
2751 	}
2752 
2753 	/* Prevent deferred_split_scan() touching ->_refcount */
2754 	spin_lock(&ds_queue->split_queue_lock);
2755 	if (folio_ref_freeze(folio, 1 + extra_pins)) {
2756 		if (!list_empty(&folio->_deferred_list)) {
2757 			ds_queue->split_queue_len--;
2758 			list_del(&folio->_deferred_list);
2759 		}
2760 		spin_unlock(&ds_queue->split_queue_lock);
2761 		if (mapping) {
2762 			int nr = folio_nr_pages(folio);
2763 
2764 			xas_split(&xas, folio, folio_order(folio));
2765 			if (folio_test_swapbacked(folio)) {
2766 				__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS,
2767 							-nr);
2768 			} else {
2769 				__lruvec_stat_mod_folio(folio, NR_FILE_THPS,
2770 							-nr);
2771 				filemap_nr_thps_dec(mapping);
2772 			}
2773 		}
2774 
2775 		__split_huge_page(page, list, end);
2776 		ret = 0;
2777 	} else {
2778 		spin_unlock(&ds_queue->split_queue_lock);
2779 fail:
2780 		if (mapping)
2781 			xas_unlock(&xas);
2782 		local_irq_enable();
2783 		remap_page(folio, folio_nr_pages(folio));
2784 		ret = -EAGAIN;
2785 	}
2786 
2787 out_unlock:
2788 	if (anon_vma) {
2789 		anon_vma_unlock_write(anon_vma);
2790 		put_anon_vma(anon_vma);
2791 	}
2792 	if (mapping)
2793 		i_mmap_unlock_read(mapping);
2794 out:
2795 	xas_destroy(&xas);
2796 	count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
2797 	return ret;
2798 }
2799 
2800 void folio_undo_large_rmappable(struct folio *folio)
2801 {
2802 	struct deferred_split *ds_queue;
2803 	unsigned long flags;
2804 
2805 	/*
2806 	 * At this point, there is no one trying to add the folio to
2807 	 * deferred_list. If folio is not in deferred_list, it's safe
2808 	 * to check without acquiring the split_queue_lock.
2809 	 */
2810 	if (data_race(list_empty(&folio->_deferred_list)))
2811 		return;
2812 
2813 	ds_queue = get_deferred_split_queue(folio);
2814 	spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
2815 	if (!list_empty(&folio->_deferred_list)) {
2816 		ds_queue->split_queue_len--;
2817 		list_del(&folio->_deferred_list);
2818 	}
2819 	spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
2820 }
2821 
2822 void deferred_split_folio(struct folio *folio)
2823 {
2824 	struct deferred_split *ds_queue = get_deferred_split_queue(folio);
2825 #ifdef CONFIG_MEMCG
2826 	struct mem_cgroup *memcg = folio_memcg(folio);
2827 #endif
2828 	unsigned long flags;
2829 
2830 	VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
2831 
2832 	/*
2833 	 * The try_to_unmap() in page reclaim path might reach here too,
2834 	 * this may cause a race condition to corrupt deferred split queue.
2835 	 * And, if page reclaim is already handling the same folio, it is
2836 	 * unnecessary to handle it again in shrinker.
2837 	 *
2838 	 * Check the swapcache flag to determine if the folio is being
2839 	 * handled by page reclaim since THP swap would add the folio into
2840 	 * swap cache before calling try_to_unmap().
2841 	 */
2842 	if (folio_test_swapcache(folio))
2843 		return;
2844 
2845 	if (!list_empty(&folio->_deferred_list))
2846 		return;
2847 
2848 	spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
2849 	if (list_empty(&folio->_deferred_list)) {
2850 		count_vm_event(THP_DEFERRED_SPLIT_PAGE);
2851 		list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
2852 		ds_queue->split_queue_len++;
2853 #ifdef CONFIG_MEMCG
2854 		if (memcg)
2855 			set_shrinker_bit(memcg, folio_nid(folio),
2856 					 deferred_split_shrinker->id);
2857 #endif
2858 	}
2859 	spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
2860 }
2861 
2862 static unsigned long deferred_split_count(struct shrinker *shrink,
2863 		struct shrink_control *sc)
2864 {
2865 	struct pglist_data *pgdata = NODE_DATA(sc->nid);
2866 	struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
2867 
2868 #ifdef CONFIG_MEMCG
2869 	if (sc->memcg)
2870 		ds_queue = &sc->memcg->deferred_split_queue;
2871 #endif
2872 	return READ_ONCE(ds_queue->split_queue_len);
2873 }
2874 
2875 static unsigned long deferred_split_scan(struct shrinker *shrink,
2876 		struct shrink_control *sc)
2877 {
2878 	struct pglist_data *pgdata = NODE_DATA(sc->nid);
2879 	struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
2880 	unsigned long flags;
2881 	LIST_HEAD(list);
2882 	struct folio *folio, *next;
2883 	int split = 0;
2884 
2885 #ifdef CONFIG_MEMCG
2886 	if (sc->memcg)
2887 		ds_queue = &sc->memcg->deferred_split_queue;
2888 #endif
2889 
2890 	spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
2891 	/* Take pin on all head pages to avoid freeing them under us */
2892 	list_for_each_entry_safe(folio, next, &ds_queue->split_queue,
2893 							_deferred_list) {
2894 		if (folio_try_get(folio)) {
2895 			list_move(&folio->_deferred_list, &list);
2896 		} else {
2897 			/* We lost race with folio_put() */
2898 			list_del_init(&folio->_deferred_list);
2899 			ds_queue->split_queue_len--;
2900 		}
2901 		if (!--sc->nr_to_scan)
2902 			break;
2903 	}
2904 	spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
2905 
2906 	list_for_each_entry_safe(folio, next, &list, _deferred_list) {
2907 		if (!folio_trylock(folio))
2908 			goto next;
2909 		/* split_huge_page() removes page from list on success */
2910 		if (!split_folio(folio))
2911 			split++;
2912 		folio_unlock(folio);
2913 next:
2914 		folio_put(folio);
2915 	}
2916 
2917 	spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
2918 	list_splice_tail(&list, &ds_queue->split_queue);
2919 	spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
2920 
2921 	/*
2922 	 * Stop shrinker if we didn't split any page, but the queue is empty.
2923 	 * This can happen if pages were freed under us.
2924 	 */
2925 	if (!split && list_empty(&ds_queue->split_queue))
2926 		return SHRINK_STOP;
2927 	return split;
2928 }
2929 
2930 #ifdef CONFIG_DEBUG_FS
2931 static void split_huge_pages_all(void)
2932 {
2933 	struct zone *zone;
2934 	struct page *page;
2935 	struct folio *folio;
2936 	unsigned long pfn, max_zone_pfn;
2937 	unsigned long total = 0, split = 0;
2938 
2939 	pr_debug("Split all THPs\n");
2940 	for_each_zone(zone) {
2941 		if (!managed_zone(zone))
2942 			continue;
2943 		max_zone_pfn = zone_end_pfn(zone);
2944 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
2945 			int nr_pages;
2946 
2947 			page = pfn_to_online_page(pfn);
2948 			if (!page || PageTail(page))
2949 				continue;
2950 			folio = page_folio(page);
2951 			if (!folio_try_get(folio))
2952 				continue;
2953 
2954 			if (unlikely(page_folio(page) != folio))
2955 				goto next;
2956 
2957 			if (zone != folio_zone(folio))
2958 				goto next;
2959 
2960 			if (!folio_test_large(folio)
2961 				|| folio_test_hugetlb(folio)
2962 				|| !folio_test_lru(folio))
2963 				goto next;
2964 
2965 			total++;
2966 			folio_lock(folio);
2967 			nr_pages = folio_nr_pages(folio);
2968 			if (!split_folio(folio))
2969 				split++;
2970 			pfn += nr_pages - 1;
2971 			folio_unlock(folio);
2972 next:
2973 			folio_put(folio);
2974 			cond_resched();
2975 		}
2976 	}
2977 
2978 	pr_debug("%lu of %lu THP split\n", split, total);
2979 }
2980 
2981 static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma)
2982 {
2983 	return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) ||
2984 		    is_vm_hugetlb_page(vma);
2985 }
2986 
2987 static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
2988 				unsigned long vaddr_end)
2989 {
2990 	int ret = 0;
2991 	struct task_struct *task;
2992 	struct mm_struct *mm;
2993 	unsigned long total = 0, split = 0;
2994 	unsigned long addr;
2995 
2996 	vaddr_start &= PAGE_MASK;
2997 	vaddr_end &= PAGE_MASK;
2998 
2999 	/* Find the task_struct from pid */
3000 	rcu_read_lock();
3001 	task = find_task_by_vpid(pid);
3002 	if (!task) {
3003 		rcu_read_unlock();
3004 		ret = -ESRCH;
3005 		goto out;
3006 	}
3007 	get_task_struct(task);
3008 	rcu_read_unlock();
3009 
3010 	/* Find the mm_struct */
3011 	mm = get_task_mm(task);
3012 	put_task_struct(task);
3013 
3014 	if (!mm) {
3015 		ret = -EINVAL;
3016 		goto out;
3017 	}
3018 
3019 	pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n",
3020 		 pid, vaddr_start, vaddr_end);
3021 
3022 	mmap_read_lock(mm);
3023 	/*
3024 	 * always increase addr by PAGE_SIZE, since we could have a PTE page
3025 	 * table filled with PTE-mapped THPs, each of which is distinct.
3026 	 */
3027 	for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) {
3028 		struct vm_area_struct *vma = vma_lookup(mm, addr);
3029 		struct page *page;
3030 		struct folio *folio;
3031 
3032 		if (!vma)
3033 			break;
3034 
3035 		/* skip special VMA and hugetlb VMA */
3036 		if (vma_not_suitable_for_thp_split(vma)) {
3037 			addr = vma->vm_end;
3038 			continue;
3039 		}
3040 
3041 		/* FOLL_DUMP to ignore special (like zero) pages */
3042 		page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
3043 
3044 		if (IS_ERR_OR_NULL(page))
3045 			continue;
3046 
3047 		folio = page_folio(page);
3048 		if (!is_transparent_hugepage(folio))
3049 			goto next;
3050 
3051 		total++;
3052 		if (!can_split_folio(folio, NULL))
3053 			goto next;
3054 
3055 		if (!folio_trylock(folio))
3056 			goto next;
3057 
3058 		if (!split_folio(folio))
3059 			split++;
3060 
3061 		folio_unlock(folio);
3062 next:
3063 		folio_put(folio);
3064 		cond_resched();
3065 	}
3066 	mmap_read_unlock(mm);
3067 	mmput(mm);
3068 
3069 	pr_debug("%lu of %lu THP split\n", split, total);
3070 
3071 out:
3072 	return ret;
3073 }
3074 
3075 static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
3076 				pgoff_t off_end)
3077 {
3078 	struct filename *file;
3079 	struct file *candidate;
3080 	struct address_space *mapping;
3081 	int ret = -EINVAL;
3082 	pgoff_t index;
3083 	int nr_pages = 1;
3084 	unsigned long total = 0, split = 0;
3085 
3086 	file = getname_kernel(file_path);
3087 	if (IS_ERR(file))
3088 		return ret;
3089 
3090 	candidate = file_open_name(file, O_RDONLY, 0);
3091 	if (IS_ERR(candidate))
3092 		goto out;
3093 
3094 	pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n",
3095 		 file_path, off_start, off_end);
3096 
3097 	mapping = candidate->f_mapping;
3098 
3099 	for (index = off_start; index < off_end; index += nr_pages) {
3100 		struct folio *folio = filemap_get_folio(mapping, index);
3101 
3102 		nr_pages = 1;
3103 		if (IS_ERR(folio))
3104 			continue;
3105 
3106 		if (!folio_test_large(folio))
3107 			goto next;
3108 
3109 		total++;
3110 		nr_pages = folio_nr_pages(folio);
3111 
3112 		if (!folio_trylock(folio))
3113 			goto next;
3114 
3115 		if (!split_folio(folio))
3116 			split++;
3117 
3118 		folio_unlock(folio);
3119 next:
3120 		folio_put(folio);
3121 		cond_resched();
3122 	}
3123 
3124 	filp_close(candidate, NULL);
3125 	ret = 0;
3126 
3127 	pr_debug("%lu of %lu file-backed THP split\n", split, total);
3128 out:
3129 	putname(file);
3130 	return ret;
3131 }
3132 
3133 #define MAX_INPUT_BUF_SZ 255
3134 
3135 static ssize_t split_huge_pages_write(struct file *file, const char __user *buf,
3136 				size_t count, loff_t *ppops)
3137 {
3138 	static DEFINE_MUTEX(split_debug_mutex);
3139 	ssize_t ret;
3140 	/* hold pid, start_vaddr, end_vaddr or file_path, off_start, off_end */
3141 	char input_buf[MAX_INPUT_BUF_SZ];
3142 	int pid;
3143 	unsigned long vaddr_start, vaddr_end;
3144 
3145 	ret = mutex_lock_interruptible(&split_debug_mutex);
3146 	if (ret)
3147 		return ret;
3148 
3149 	ret = -EFAULT;
3150 
3151 	memset(input_buf, 0, MAX_INPUT_BUF_SZ);
3152 	if (copy_from_user(input_buf, buf, min_t(size_t, count, MAX_INPUT_BUF_SZ)))
3153 		goto out;
3154 
3155 	input_buf[MAX_INPUT_BUF_SZ - 1] = '\0';
3156 
3157 	if (input_buf[0] == '/') {
3158 		char *tok;
3159 		char *buf = input_buf;
3160 		char file_path[MAX_INPUT_BUF_SZ];
3161 		pgoff_t off_start = 0, off_end = 0;
3162 		size_t input_len = strlen(input_buf);
3163 
3164 		tok = strsep(&buf, ",");
3165 		if (tok) {
3166 			strcpy(file_path, tok);
3167 		} else {
3168 			ret = -EINVAL;
3169 			goto out;
3170 		}
3171 
3172 		ret = sscanf(buf, "0x%lx,0x%lx", &off_start, &off_end);
3173 		if (ret != 2) {
3174 			ret = -EINVAL;
3175 			goto out;
3176 		}
3177 		ret = split_huge_pages_in_file(file_path, off_start, off_end);
3178 		if (!ret)
3179 			ret = input_len;
3180 
3181 		goto out;
3182 	}
3183 
3184 	ret = sscanf(input_buf, "%d,0x%lx,0x%lx", &pid, &vaddr_start, &vaddr_end);
3185 	if (ret == 1 && pid == 1) {
3186 		split_huge_pages_all();
3187 		ret = strlen(input_buf);
3188 		goto out;
3189 	} else if (ret != 3) {
3190 		ret = -EINVAL;
3191 		goto out;
3192 	}
3193 
3194 	ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end);
3195 	if (!ret)
3196 		ret = strlen(input_buf);
3197 out:
3198 	mutex_unlock(&split_debug_mutex);
3199 	return ret;
3200 
3201 }
3202 
3203 static const struct file_operations split_huge_pages_fops = {
3204 	.owner	 = THIS_MODULE,
3205 	.write	 = split_huge_pages_write,
3206 	.llseek  = no_llseek,
3207 };
3208 
3209 static int __init split_huge_pages_debugfs(void)
3210 {
3211 	debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
3212 			    &split_huge_pages_fops);
3213 	return 0;
3214 }
3215 late_initcall(split_huge_pages_debugfs);
3216 #endif
3217 
3218 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
3219 int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
3220 		struct page *page)
3221 {
3222 	struct vm_area_struct *vma = pvmw->vma;
3223 	struct mm_struct *mm = vma->vm_mm;
3224 	unsigned long address = pvmw->address;
3225 	bool anon_exclusive;
3226 	pmd_t pmdval;
3227 	swp_entry_t entry;
3228 	pmd_t pmdswp;
3229 
3230 	if (!(pvmw->pmd && !pvmw->pte))
3231 		return 0;
3232 
3233 	flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
3234 	pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
3235 
3236 	/* See page_try_share_anon_rmap(): invalidate PMD first. */
3237 	anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
3238 	if (anon_exclusive && page_try_share_anon_rmap(page)) {
3239 		set_pmd_at(mm, address, pvmw->pmd, pmdval);
3240 		return -EBUSY;
3241 	}
3242 
3243 	if (pmd_dirty(pmdval))
3244 		set_page_dirty(page);
3245 	if (pmd_write(pmdval))
3246 		entry = make_writable_migration_entry(page_to_pfn(page));
3247 	else if (anon_exclusive)
3248 		entry = make_readable_exclusive_migration_entry(page_to_pfn(page));
3249 	else
3250 		entry = make_readable_migration_entry(page_to_pfn(page));
3251 	if (pmd_young(pmdval))
3252 		entry = make_migration_entry_young(entry);
3253 	if (pmd_dirty(pmdval))
3254 		entry = make_migration_entry_dirty(entry);
3255 	pmdswp = swp_entry_to_pmd(entry);
3256 	if (pmd_soft_dirty(pmdval))
3257 		pmdswp = pmd_swp_mksoft_dirty(pmdswp);
3258 	if (pmd_uffd_wp(pmdval))
3259 		pmdswp = pmd_swp_mkuffd_wp(pmdswp);
3260 	set_pmd_at(mm, address, pvmw->pmd, pmdswp);
3261 	page_remove_rmap(page, vma, true);
3262 	put_page(page);
3263 	trace_set_migration_pmd(address, pmd_val(pmdswp));
3264 
3265 	return 0;
3266 }
3267 
3268 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
3269 {
3270 	struct vm_area_struct *vma = pvmw->vma;
3271 	struct mm_struct *mm = vma->vm_mm;
3272 	unsigned long address = pvmw->address;
3273 	unsigned long haddr = address & HPAGE_PMD_MASK;
3274 	pmd_t pmde;
3275 	swp_entry_t entry;
3276 
3277 	if (!(pvmw->pmd && !pvmw->pte))
3278 		return;
3279 
3280 	entry = pmd_to_swp_entry(*pvmw->pmd);
3281 	get_page(new);
3282 	pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot));
3283 	if (pmd_swp_soft_dirty(*pvmw->pmd))
3284 		pmde = pmd_mksoft_dirty(pmde);
3285 	if (is_writable_migration_entry(entry))
3286 		pmde = pmd_mkwrite(pmde, vma);
3287 	if (pmd_swp_uffd_wp(*pvmw->pmd))
3288 		pmde = pmd_mkuffd_wp(pmde);
3289 	if (!is_migration_entry_young(entry))
3290 		pmde = pmd_mkold(pmde);
3291 	/* NOTE: this may contain setting soft-dirty on some archs */
3292 	if (PageDirty(new) && is_migration_entry_dirty(entry))
3293 		pmde = pmd_mkdirty(pmde);
3294 
3295 	if (PageAnon(new)) {
3296 		rmap_t rmap_flags = RMAP_COMPOUND;
3297 
3298 		if (!is_readable_migration_entry(entry))
3299 			rmap_flags |= RMAP_EXCLUSIVE;
3300 
3301 		page_add_anon_rmap(new, vma, haddr, rmap_flags);
3302 	} else {
3303 		page_add_file_rmap(new, vma, true);
3304 	}
3305 	VM_BUG_ON(pmd_write(pmde) && PageAnon(new) && !PageAnonExclusive(new));
3306 	set_pmd_at(mm, haddr, pvmw->pmd, pmde);
3307 
3308 	/* No need to invalidate - it was non-present before */
3309 	update_mmu_cache_pmd(vma, address, pvmw->pmd);
3310 	trace_remove_migration_pmd(address, pmd_val(pmde));
3311 }
3312 #endif
3313