xref: /linux/mm/huge_memory.c (revision 61a1a9906f66bd0eaaf9bade96f22a60f04240b7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Copyright (C) 2009  Red Hat, Inc.
4  */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/sched/mm.h>
11 #include <linux/sched/coredump.h>
12 #include <linux/sched/numa_balancing.h>
13 #include <linux/highmem.h>
14 #include <linux/hugetlb.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/rmap.h>
17 #include <linux/swap.h>
18 #include <linux/shrinker.h>
19 #include <linux/mm_inline.h>
20 #include <linux/swapops.h>
21 #include <linux/backing-dev.h>
22 #include <linux/dax.h>
23 #include <linux/khugepaged.h>
24 #include <linux/freezer.h>
25 #include <linux/pfn_t.h>
26 #include <linux/mman.h>
27 #include <linux/memremap.h>
28 #include <linux/pagemap.h>
29 #include <linux/debugfs.h>
30 #include <linux/migrate.h>
31 #include <linux/hashtable.h>
32 #include <linux/userfaultfd_k.h>
33 #include <linux/page_idle.h>
34 #include <linux/shmem_fs.h>
35 #include <linux/oom.h>
36 #include <linux/numa.h>
37 #include <linux/page_owner.h>
38 #include <linux/sched/sysctl.h>
39 #include <linux/memory-tiers.h>
40 
41 #include <asm/tlb.h>
42 #include <asm/pgalloc.h>
43 #include "internal.h"
44 #include "swap.h"
45 
46 #define CREATE_TRACE_POINTS
47 #include <trace/events/thp.h>
48 
49 /*
50  * By default, transparent hugepage support is disabled in order to avoid
51  * risking an increased memory footprint for applications that are not
52  * guaranteed to benefit from it. When transparent hugepage support is
53  * enabled, it is for all mappings, and khugepaged scans all mappings.
54  * Defrag is invoked by khugepaged hugepage allocations and by page faults
55  * for all hugepage allocations.
56  */
57 unsigned long transparent_hugepage_flags __read_mostly =
58 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
59 	(1<<TRANSPARENT_HUGEPAGE_FLAG)|
60 #endif
61 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
62 	(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
63 #endif
64 	(1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
65 	(1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
66 	(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
67 
68 static struct shrinker deferred_split_shrinker;
69 
70 static atomic_t huge_zero_refcount;
71 struct page *huge_zero_page __read_mostly;
72 unsigned long huge_zero_pfn __read_mostly = ~0UL;
73 
74 bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
75 			bool smaps, bool in_pf, bool enforce_sysfs)
76 {
77 	if (!vma->vm_mm)		/* vdso */
78 		return false;
79 
80 	/*
81 	 * Explicitly disabled through madvise or prctl, or some
82 	 * architectures may disable THP for some mappings, for
83 	 * example, s390 kvm.
84 	 * */
85 	if ((vm_flags & VM_NOHUGEPAGE) ||
86 	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
87 		return false;
88 	/*
89 	 * If the hardware/firmware marked hugepage support disabled.
90 	 */
91 	if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX))
92 		return false;
93 
94 	/* khugepaged doesn't collapse DAX vma, but page fault is fine. */
95 	if (vma_is_dax(vma))
96 		return in_pf;
97 
98 	/*
99 	 * Special VMA and hugetlb VMA.
100 	 * Must be checked after dax since some dax mappings may have
101 	 * VM_MIXEDMAP set.
102 	 */
103 	if (vm_flags & VM_NO_KHUGEPAGED)
104 		return false;
105 
106 	/*
107 	 * Check alignment for file vma and size for both file and anon vma.
108 	 *
109 	 * Skip the check for page fault. Huge fault does the check in fault
110 	 * handlers. And this check is not suitable for huge PUD fault.
111 	 */
112 	if (!in_pf &&
113 	    !transhuge_vma_suitable(vma, (vma->vm_end - HPAGE_PMD_SIZE)))
114 		return false;
115 
116 	/*
117 	 * Enabled via shmem mount options or sysfs settings.
118 	 * Must be done before hugepage flags check since shmem has its
119 	 * own flags.
120 	 */
121 	if (!in_pf && shmem_file(vma->vm_file))
122 		return shmem_is_huge(file_inode(vma->vm_file), vma->vm_pgoff,
123 				     !enforce_sysfs, vma->vm_mm, vm_flags);
124 
125 	/* Enforce sysfs THP requirements as necessary */
126 	if (enforce_sysfs &&
127 	    (!hugepage_flags_enabled() || (!(vm_flags & VM_HUGEPAGE) &&
128 					   !hugepage_flags_always())))
129 		return false;
130 
131 	/* Only regular file is valid */
132 	if (!in_pf && file_thp_enabled(vma))
133 		return true;
134 
135 	if (!vma_is_anonymous(vma))
136 		return false;
137 
138 	if (vma_is_temporary_stack(vma))
139 		return false;
140 
141 	/*
142 	 * THPeligible bit of smaps should show 1 for proper VMAs even
143 	 * though anon_vma is not initialized yet.
144 	 *
145 	 * Allow page fault since anon_vma may be not initialized until
146 	 * the first page fault.
147 	 */
148 	if (!vma->anon_vma)
149 		return (smaps || in_pf);
150 
151 	return true;
152 }
153 
154 static bool get_huge_zero_page(void)
155 {
156 	struct page *zero_page;
157 retry:
158 	if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
159 		return true;
160 
161 	zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
162 			HPAGE_PMD_ORDER);
163 	if (!zero_page) {
164 		count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
165 		return false;
166 	}
167 	preempt_disable();
168 	if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
169 		preempt_enable();
170 		__free_pages(zero_page, compound_order(zero_page));
171 		goto retry;
172 	}
173 	WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page));
174 
175 	/* We take additional reference here. It will be put back by shrinker */
176 	atomic_set(&huge_zero_refcount, 2);
177 	preempt_enable();
178 	count_vm_event(THP_ZERO_PAGE_ALLOC);
179 	return true;
180 }
181 
182 static void put_huge_zero_page(void)
183 {
184 	/*
185 	 * Counter should never go to zero here. Only shrinker can put
186 	 * last reference.
187 	 */
188 	BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
189 }
190 
191 struct page *mm_get_huge_zero_page(struct mm_struct *mm)
192 {
193 	if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
194 		return READ_ONCE(huge_zero_page);
195 
196 	if (!get_huge_zero_page())
197 		return NULL;
198 
199 	if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
200 		put_huge_zero_page();
201 
202 	return READ_ONCE(huge_zero_page);
203 }
204 
205 void mm_put_huge_zero_page(struct mm_struct *mm)
206 {
207 	if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
208 		put_huge_zero_page();
209 }
210 
211 static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
212 					struct shrink_control *sc)
213 {
214 	/* we can free zero page only if last reference remains */
215 	return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
216 }
217 
218 static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
219 				       struct shrink_control *sc)
220 {
221 	if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
222 		struct page *zero_page = xchg(&huge_zero_page, NULL);
223 		BUG_ON(zero_page == NULL);
224 		WRITE_ONCE(huge_zero_pfn, ~0UL);
225 		__free_pages(zero_page, compound_order(zero_page));
226 		return HPAGE_PMD_NR;
227 	}
228 
229 	return 0;
230 }
231 
232 static struct shrinker huge_zero_page_shrinker = {
233 	.count_objects = shrink_huge_zero_page_count,
234 	.scan_objects = shrink_huge_zero_page_scan,
235 	.seeks = DEFAULT_SEEKS,
236 };
237 
238 #ifdef CONFIG_SYSFS
239 static ssize_t enabled_show(struct kobject *kobj,
240 			    struct kobj_attribute *attr, char *buf)
241 {
242 	const char *output;
243 
244 	if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags))
245 		output = "[always] madvise never";
246 	else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
247 			  &transparent_hugepage_flags))
248 		output = "always [madvise] never";
249 	else
250 		output = "always madvise [never]";
251 
252 	return sysfs_emit(buf, "%s\n", output);
253 }
254 
255 static ssize_t enabled_store(struct kobject *kobj,
256 			     struct kobj_attribute *attr,
257 			     const char *buf, size_t count)
258 {
259 	ssize_t ret = count;
260 
261 	if (sysfs_streq(buf, "always")) {
262 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
263 		set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
264 	} else if (sysfs_streq(buf, "madvise")) {
265 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
266 		set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
267 	} else if (sysfs_streq(buf, "never")) {
268 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
269 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
270 	} else
271 		ret = -EINVAL;
272 
273 	if (ret > 0) {
274 		int err = start_stop_khugepaged();
275 		if (err)
276 			ret = err;
277 	}
278 	return ret;
279 }
280 
281 static struct kobj_attribute enabled_attr = __ATTR_RW(enabled);
282 
283 ssize_t single_hugepage_flag_show(struct kobject *kobj,
284 				  struct kobj_attribute *attr, char *buf,
285 				  enum transparent_hugepage_flag flag)
286 {
287 	return sysfs_emit(buf, "%d\n",
288 			  !!test_bit(flag, &transparent_hugepage_flags));
289 }
290 
291 ssize_t single_hugepage_flag_store(struct kobject *kobj,
292 				 struct kobj_attribute *attr,
293 				 const char *buf, size_t count,
294 				 enum transparent_hugepage_flag flag)
295 {
296 	unsigned long value;
297 	int ret;
298 
299 	ret = kstrtoul(buf, 10, &value);
300 	if (ret < 0)
301 		return ret;
302 	if (value > 1)
303 		return -EINVAL;
304 
305 	if (value)
306 		set_bit(flag, &transparent_hugepage_flags);
307 	else
308 		clear_bit(flag, &transparent_hugepage_flags);
309 
310 	return count;
311 }
312 
313 static ssize_t defrag_show(struct kobject *kobj,
314 			   struct kobj_attribute *attr, char *buf)
315 {
316 	const char *output;
317 
318 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
319 		     &transparent_hugepage_flags))
320 		output = "[always] defer defer+madvise madvise never";
321 	else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
322 			  &transparent_hugepage_flags))
323 		output = "always [defer] defer+madvise madvise never";
324 	else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
325 			  &transparent_hugepage_flags))
326 		output = "always defer [defer+madvise] madvise never";
327 	else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
328 			  &transparent_hugepage_flags))
329 		output = "always defer defer+madvise [madvise] never";
330 	else
331 		output = "always defer defer+madvise madvise [never]";
332 
333 	return sysfs_emit(buf, "%s\n", output);
334 }
335 
336 static ssize_t defrag_store(struct kobject *kobj,
337 			    struct kobj_attribute *attr,
338 			    const char *buf, size_t count)
339 {
340 	if (sysfs_streq(buf, "always")) {
341 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
342 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
343 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
344 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
345 	} else if (sysfs_streq(buf, "defer+madvise")) {
346 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
347 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
348 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
349 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
350 	} else if (sysfs_streq(buf, "defer")) {
351 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
352 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
353 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
354 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
355 	} else if (sysfs_streq(buf, "madvise")) {
356 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
357 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
358 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
359 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
360 	} else if (sysfs_streq(buf, "never")) {
361 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
362 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
363 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
364 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
365 	} else
366 		return -EINVAL;
367 
368 	return count;
369 }
370 static struct kobj_attribute defrag_attr = __ATTR_RW(defrag);
371 
372 static ssize_t use_zero_page_show(struct kobject *kobj,
373 				  struct kobj_attribute *attr, char *buf)
374 {
375 	return single_hugepage_flag_show(kobj, attr, buf,
376 					 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
377 }
378 static ssize_t use_zero_page_store(struct kobject *kobj,
379 		struct kobj_attribute *attr, const char *buf, size_t count)
380 {
381 	return single_hugepage_flag_store(kobj, attr, buf, count,
382 				 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
383 }
384 static struct kobj_attribute use_zero_page_attr = __ATTR_RW(use_zero_page);
385 
386 static ssize_t hpage_pmd_size_show(struct kobject *kobj,
387 				   struct kobj_attribute *attr, char *buf)
388 {
389 	return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE);
390 }
391 static struct kobj_attribute hpage_pmd_size_attr =
392 	__ATTR_RO(hpage_pmd_size);
393 
394 static struct attribute *hugepage_attr[] = {
395 	&enabled_attr.attr,
396 	&defrag_attr.attr,
397 	&use_zero_page_attr.attr,
398 	&hpage_pmd_size_attr.attr,
399 #ifdef CONFIG_SHMEM
400 	&shmem_enabled_attr.attr,
401 #endif
402 	NULL,
403 };
404 
405 static const struct attribute_group hugepage_attr_group = {
406 	.attrs = hugepage_attr,
407 };
408 
409 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
410 {
411 	int err;
412 
413 	*hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
414 	if (unlikely(!*hugepage_kobj)) {
415 		pr_err("failed to create transparent hugepage kobject\n");
416 		return -ENOMEM;
417 	}
418 
419 	err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
420 	if (err) {
421 		pr_err("failed to register transparent hugepage group\n");
422 		goto delete_obj;
423 	}
424 
425 	err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
426 	if (err) {
427 		pr_err("failed to register transparent hugepage group\n");
428 		goto remove_hp_group;
429 	}
430 
431 	return 0;
432 
433 remove_hp_group:
434 	sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
435 delete_obj:
436 	kobject_put(*hugepage_kobj);
437 	return err;
438 }
439 
440 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
441 {
442 	sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
443 	sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
444 	kobject_put(hugepage_kobj);
445 }
446 #else
447 static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
448 {
449 	return 0;
450 }
451 
452 static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
453 {
454 }
455 #endif /* CONFIG_SYSFS */
456 
457 static int __init hugepage_init(void)
458 {
459 	int err;
460 	struct kobject *hugepage_kobj;
461 
462 	if (!has_transparent_hugepage()) {
463 		/*
464 		 * Hardware doesn't support hugepages, hence disable
465 		 * DAX PMD support.
466 		 */
467 		transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_NEVER_DAX;
468 		return -EINVAL;
469 	}
470 
471 	/*
472 	 * hugepages can't be allocated by the buddy allocator
473 	 */
474 	MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER >= MAX_ORDER);
475 	/*
476 	 * we use page->mapping and page->index in second tail page
477 	 * as list_head: assuming THP order >= 2
478 	 */
479 	MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2);
480 
481 	err = hugepage_init_sysfs(&hugepage_kobj);
482 	if (err)
483 		goto err_sysfs;
484 
485 	err = khugepaged_init();
486 	if (err)
487 		goto err_slab;
488 
489 	err = register_shrinker(&huge_zero_page_shrinker, "thp-zero");
490 	if (err)
491 		goto err_hzp_shrinker;
492 	err = register_shrinker(&deferred_split_shrinker, "thp-deferred_split");
493 	if (err)
494 		goto err_split_shrinker;
495 
496 	/*
497 	 * By default disable transparent hugepages on smaller systems,
498 	 * where the extra memory used could hurt more than TLB overhead
499 	 * is likely to save.  The admin can still enable it through /sys.
500 	 */
501 	if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) {
502 		transparent_hugepage_flags = 0;
503 		return 0;
504 	}
505 
506 	err = start_stop_khugepaged();
507 	if (err)
508 		goto err_khugepaged;
509 
510 	return 0;
511 err_khugepaged:
512 	unregister_shrinker(&deferred_split_shrinker);
513 err_split_shrinker:
514 	unregister_shrinker(&huge_zero_page_shrinker);
515 err_hzp_shrinker:
516 	khugepaged_destroy();
517 err_slab:
518 	hugepage_exit_sysfs(hugepage_kobj);
519 err_sysfs:
520 	return err;
521 }
522 subsys_initcall(hugepage_init);
523 
524 static int __init setup_transparent_hugepage(char *str)
525 {
526 	int ret = 0;
527 	if (!str)
528 		goto out;
529 	if (!strcmp(str, "always")) {
530 		set_bit(TRANSPARENT_HUGEPAGE_FLAG,
531 			&transparent_hugepage_flags);
532 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
533 			  &transparent_hugepage_flags);
534 		ret = 1;
535 	} else if (!strcmp(str, "madvise")) {
536 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
537 			  &transparent_hugepage_flags);
538 		set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
539 			&transparent_hugepage_flags);
540 		ret = 1;
541 	} else if (!strcmp(str, "never")) {
542 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
543 			  &transparent_hugepage_flags);
544 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
545 			  &transparent_hugepage_flags);
546 		ret = 1;
547 	}
548 out:
549 	if (!ret)
550 		pr_warn("transparent_hugepage= cannot parse, ignored\n");
551 	return ret;
552 }
553 __setup("transparent_hugepage=", setup_transparent_hugepage);
554 
555 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
556 {
557 	if (likely(vma->vm_flags & VM_WRITE))
558 		pmd = pmd_mkwrite(pmd);
559 	return pmd;
560 }
561 
562 #ifdef CONFIG_MEMCG
563 static inline
564 struct deferred_split *get_deferred_split_queue(struct folio *folio)
565 {
566 	struct mem_cgroup *memcg = folio_memcg(folio);
567 	struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
568 
569 	if (memcg)
570 		return &memcg->deferred_split_queue;
571 	else
572 		return &pgdat->deferred_split_queue;
573 }
574 #else
575 static inline
576 struct deferred_split *get_deferred_split_queue(struct folio *folio)
577 {
578 	struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
579 
580 	return &pgdat->deferred_split_queue;
581 }
582 #endif
583 
584 void prep_transhuge_page(struct page *page)
585 {
586 	struct folio *folio = (struct folio *)page;
587 
588 	VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
589 	INIT_LIST_HEAD(&folio->_deferred_list);
590 	set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
591 }
592 
593 static inline bool is_transparent_hugepage(struct page *page)
594 {
595 	struct folio *folio;
596 
597 	if (!PageCompound(page))
598 		return false;
599 
600 	folio = page_folio(page);
601 	return is_huge_zero_page(&folio->page) ||
602 	       folio->_folio_dtor == TRANSHUGE_PAGE_DTOR;
603 }
604 
605 static unsigned long __thp_get_unmapped_area(struct file *filp,
606 		unsigned long addr, unsigned long len,
607 		loff_t off, unsigned long flags, unsigned long size)
608 {
609 	loff_t off_end = off + len;
610 	loff_t off_align = round_up(off, size);
611 	unsigned long len_pad, ret;
612 
613 	if (off_end <= off_align || (off_end - off_align) < size)
614 		return 0;
615 
616 	len_pad = len + size;
617 	if (len_pad < len || (off + len_pad) < off)
618 		return 0;
619 
620 	ret = current->mm->get_unmapped_area(filp, addr, len_pad,
621 					      off >> PAGE_SHIFT, flags);
622 
623 	/*
624 	 * The failure might be due to length padding. The caller will retry
625 	 * without the padding.
626 	 */
627 	if (IS_ERR_VALUE(ret))
628 		return 0;
629 
630 	/*
631 	 * Do not try to align to THP boundary if allocation at the address
632 	 * hint succeeds.
633 	 */
634 	if (ret == addr)
635 		return addr;
636 
637 	ret += (off - ret) & (size - 1);
638 	return ret;
639 }
640 
641 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
642 		unsigned long len, unsigned long pgoff, unsigned long flags)
643 {
644 	unsigned long ret;
645 	loff_t off = (loff_t)pgoff << PAGE_SHIFT;
646 
647 	ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE);
648 	if (ret)
649 		return ret;
650 
651 	return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
652 }
653 EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
654 
655 static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
656 			struct page *page, gfp_t gfp)
657 {
658 	struct vm_area_struct *vma = vmf->vma;
659 	pgtable_t pgtable;
660 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
661 	vm_fault_t ret = 0;
662 
663 	VM_BUG_ON_PAGE(!PageCompound(page), page);
664 
665 	if (mem_cgroup_charge(page_folio(page), vma->vm_mm, gfp)) {
666 		put_page(page);
667 		count_vm_event(THP_FAULT_FALLBACK);
668 		count_vm_event(THP_FAULT_FALLBACK_CHARGE);
669 		return VM_FAULT_FALLBACK;
670 	}
671 	cgroup_throttle_swaprate(page, gfp);
672 
673 	pgtable = pte_alloc_one(vma->vm_mm);
674 	if (unlikely(!pgtable)) {
675 		ret = VM_FAULT_OOM;
676 		goto release;
677 	}
678 
679 	clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
680 	/*
681 	 * The memory barrier inside __SetPageUptodate makes sure that
682 	 * clear_huge_page writes become visible before the set_pmd_at()
683 	 * write.
684 	 */
685 	__SetPageUptodate(page);
686 
687 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
688 	if (unlikely(!pmd_none(*vmf->pmd))) {
689 		goto unlock_release;
690 	} else {
691 		pmd_t entry;
692 
693 		ret = check_stable_address_space(vma->vm_mm);
694 		if (ret)
695 			goto unlock_release;
696 
697 		/* Deliver the page fault to userland */
698 		if (userfaultfd_missing(vma)) {
699 			spin_unlock(vmf->ptl);
700 			put_page(page);
701 			pte_free(vma->vm_mm, pgtable);
702 			ret = handle_userfault(vmf, VM_UFFD_MISSING);
703 			VM_BUG_ON(ret & VM_FAULT_FALLBACK);
704 			return ret;
705 		}
706 
707 		entry = mk_huge_pmd(page, vma->vm_page_prot);
708 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
709 		page_add_new_anon_rmap(page, vma, haddr);
710 		lru_cache_add_inactive_or_unevictable(page, vma);
711 		pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
712 		set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
713 		update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
714 		add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
715 		mm_inc_nr_ptes(vma->vm_mm);
716 		spin_unlock(vmf->ptl);
717 		count_vm_event(THP_FAULT_ALLOC);
718 		count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
719 	}
720 
721 	return 0;
722 unlock_release:
723 	spin_unlock(vmf->ptl);
724 release:
725 	if (pgtable)
726 		pte_free(vma->vm_mm, pgtable);
727 	put_page(page);
728 	return ret;
729 
730 }
731 
732 /*
733  * always: directly stall for all thp allocations
734  * defer: wake kswapd and fail if not immediately available
735  * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
736  *		  fail if not immediately available
737  * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
738  *	    available
739  * never: never stall for any thp allocation
740  */
741 gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma)
742 {
743 	const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE);
744 
745 	/* Always do synchronous compaction */
746 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
747 		return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
748 
749 	/* Kick kcompactd and fail quickly */
750 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
751 		return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
752 
753 	/* Synchronous compaction if madvised, otherwise kick kcompactd */
754 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
755 		return GFP_TRANSHUGE_LIGHT |
756 			(vma_madvised ? __GFP_DIRECT_RECLAIM :
757 					__GFP_KSWAPD_RECLAIM);
758 
759 	/* Only do synchronous compaction if madvised */
760 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
761 		return GFP_TRANSHUGE_LIGHT |
762 		       (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
763 
764 	return GFP_TRANSHUGE_LIGHT;
765 }
766 
767 /* Caller must hold page table lock. */
768 static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
769 		struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
770 		struct page *zero_page)
771 {
772 	pmd_t entry;
773 	if (!pmd_none(*pmd))
774 		return;
775 	entry = mk_pmd(zero_page, vma->vm_page_prot);
776 	entry = pmd_mkhuge(entry);
777 	pgtable_trans_huge_deposit(mm, pmd, pgtable);
778 	set_pmd_at(mm, haddr, pmd, entry);
779 	mm_inc_nr_ptes(mm);
780 }
781 
782 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
783 {
784 	struct vm_area_struct *vma = vmf->vma;
785 	gfp_t gfp;
786 	struct folio *folio;
787 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
788 
789 	if (!transhuge_vma_suitable(vma, haddr))
790 		return VM_FAULT_FALLBACK;
791 	if (unlikely(anon_vma_prepare(vma)))
792 		return VM_FAULT_OOM;
793 	khugepaged_enter_vma(vma, vma->vm_flags);
794 
795 	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
796 			!mm_forbids_zeropage(vma->vm_mm) &&
797 			transparent_hugepage_use_zero_page()) {
798 		pgtable_t pgtable;
799 		struct page *zero_page;
800 		vm_fault_t ret;
801 		pgtable = pte_alloc_one(vma->vm_mm);
802 		if (unlikely(!pgtable))
803 			return VM_FAULT_OOM;
804 		zero_page = mm_get_huge_zero_page(vma->vm_mm);
805 		if (unlikely(!zero_page)) {
806 			pte_free(vma->vm_mm, pgtable);
807 			count_vm_event(THP_FAULT_FALLBACK);
808 			return VM_FAULT_FALLBACK;
809 		}
810 		vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
811 		ret = 0;
812 		if (pmd_none(*vmf->pmd)) {
813 			ret = check_stable_address_space(vma->vm_mm);
814 			if (ret) {
815 				spin_unlock(vmf->ptl);
816 				pte_free(vma->vm_mm, pgtable);
817 			} else if (userfaultfd_missing(vma)) {
818 				spin_unlock(vmf->ptl);
819 				pte_free(vma->vm_mm, pgtable);
820 				ret = handle_userfault(vmf, VM_UFFD_MISSING);
821 				VM_BUG_ON(ret & VM_FAULT_FALLBACK);
822 			} else {
823 				set_huge_zero_page(pgtable, vma->vm_mm, vma,
824 						   haddr, vmf->pmd, zero_page);
825 				update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
826 				spin_unlock(vmf->ptl);
827 			}
828 		} else {
829 			spin_unlock(vmf->ptl);
830 			pte_free(vma->vm_mm, pgtable);
831 		}
832 		return ret;
833 	}
834 	gfp = vma_thp_gfp_mask(vma);
835 	folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr, true);
836 	if (unlikely(!folio)) {
837 		count_vm_event(THP_FAULT_FALLBACK);
838 		return VM_FAULT_FALLBACK;
839 	}
840 	return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp);
841 }
842 
843 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
844 		pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
845 		pgtable_t pgtable)
846 {
847 	struct mm_struct *mm = vma->vm_mm;
848 	pmd_t entry;
849 	spinlock_t *ptl;
850 
851 	ptl = pmd_lock(mm, pmd);
852 	if (!pmd_none(*pmd)) {
853 		if (write) {
854 			if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
855 				WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
856 				goto out_unlock;
857 			}
858 			entry = pmd_mkyoung(*pmd);
859 			entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
860 			if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
861 				update_mmu_cache_pmd(vma, addr, pmd);
862 		}
863 
864 		goto out_unlock;
865 	}
866 
867 	entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
868 	if (pfn_t_devmap(pfn))
869 		entry = pmd_mkdevmap(entry);
870 	if (write) {
871 		entry = pmd_mkyoung(pmd_mkdirty(entry));
872 		entry = maybe_pmd_mkwrite(entry, vma);
873 	}
874 
875 	if (pgtable) {
876 		pgtable_trans_huge_deposit(mm, pmd, pgtable);
877 		mm_inc_nr_ptes(mm);
878 		pgtable = NULL;
879 	}
880 
881 	set_pmd_at(mm, addr, pmd, entry);
882 	update_mmu_cache_pmd(vma, addr, pmd);
883 
884 out_unlock:
885 	spin_unlock(ptl);
886 	if (pgtable)
887 		pte_free(mm, pgtable);
888 }
889 
890 /**
891  * vmf_insert_pfn_pmd_prot - insert a pmd size pfn
892  * @vmf: Structure describing the fault
893  * @pfn: pfn to insert
894  * @pgprot: page protection to use
895  * @write: whether it's a write fault
896  *
897  * Insert a pmd size pfn. See vmf_insert_pfn() for additional info and
898  * also consult the vmf_insert_mixed_prot() documentation when
899  * @pgprot != @vmf->vma->vm_page_prot.
900  *
901  * Return: vm_fault_t value.
902  */
903 vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
904 				   pgprot_t pgprot, bool write)
905 {
906 	unsigned long addr = vmf->address & PMD_MASK;
907 	struct vm_area_struct *vma = vmf->vma;
908 	pgtable_t pgtable = NULL;
909 
910 	/*
911 	 * If we had pmd_special, we could avoid all these restrictions,
912 	 * but we need to be consistent with PTEs and architectures that
913 	 * can't support a 'special' bit.
914 	 */
915 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
916 			!pfn_t_devmap(pfn));
917 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
918 						(VM_PFNMAP|VM_MIXEDMAP));
919 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
920 
921 	if (addr < vma->vm_start || addr >= vma->vm_end)
922 		return VM_FAULT_SIGBUS;
923 
924 	if (arch_needs_pgtable_deposit()) {
925 		pgtable = pte_alloc_one(vma->vm_mm);
926 		if (!pgtable)
927 			return VM_FAULT_OOM;
928 	}
929 
930 	track_pfn_insert(vma, &pgprot, pfn);
931 
932 	insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
933 	return VM_FAULT_NOPAGE;
934 }
935 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd_prot);
936 
937 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
938 static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
939 {
940 	if (likely(vma->vm_flags & VM_WRITE))
941 		pud = pud_mkwrite(pud);
942 	return pud;
943 }
944 
945 static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
946 		pud_t *pud, pfn_t pfn, pgprot_t prot, bool write)
947 {
948 	struct mm_struct *mm = vma->vm_mm;
949 	pud_t entry;
950 	spinlock_t *ptl;
951 
952 	ptl = pud_lock(mm, pud);
953 	if (!pud_none(*pud)) {
954 		if (write) {
955 			if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
956 				WARN_ON_ONCE(!is_huge_zero_pud(*pud));
957 				goto out_unlock;
958 			}
959 			entry = pud_mkyoung(*pud);
960 			entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
961 			if (pudp_set_access_flags(vma, addr, pud, entry, 1))
962 				update_mmu_cache_pud(vma, addr, pud);
963 		}
964 		goto out_unlock;
965 	}
966 
967 	entry = pud_mkhuge(pfn_t_pud(pfn, prot));
968 	if (pfn_t_devmap(pfn))
969 		entry = pud_mkdevmap(entry);
970 	if (write) {
971 		entry = pud_mkyoung(pud_mkdirty(entry));
972 		entry = maybe_pud_mkwrite(entry, vma);
973 	}
974 	set_pud_at(mm, addr, pud, entry);
975 	update_mmu_cache_pud(vma, addr, pud);
976 
977 out_unlock:
978 	spin_unlock(ptl);
979 }
980 
981 /**
982  * vmf_insert_pfn_pud_prot - insert a pud size pfn
983  * @vmf: Structure describing the fault
984  * @pfn: pfn to insert
985  * @pgprot: page protection to use
986  * @write: whether it's a write fault
987  *
988  * Insert a pud size pfn. See vmf_insert_pfn() for additional info and
989  * also consult the vmf_insert_mixed_prot() documentation when
990  * @pgprot != @vmf->vma->vm_page_prot.
991  *
992  * Return: vm_fault_t value.
993  */
994 vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
995 				   pgprot_t pgprot, bool write)
996 {
997 	unsigned long addr = vmf->address & PUD_MASK;
998 	struct vm_area_struct *vma = vmf->vma;
999 
1000 	/*
1001 	 * If we had pud_special, we could avoid all these restrictions,
1002 	 * but we need to be consistent with PTEs and architectures that
1003 	 * can't support a 'special' bit.
1004 	 */
1005 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
1006 			!pfn_t_devmap(pfn));
1007 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1008 						(VM_PFNMAP|VM_MIXEDMAP));
1009 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1010 
1011 	if (addr < vma->vm_start || addr >= vma->vm_end)
1012 		return VM_FAULT_SIGBUS;
1013 
1014 	track_pfn_insert(vma, &pgprot, pfn);
1015 
1016 	insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write);
1017 	return VM_FAULT_NOPAGE;
1018 }
1019 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud_prot);
1020 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1021 
1022 static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1023 		      pmd_t *pmd, bool write)
1024 {
1025 	pmd_t _pmd;
1026 
1027 	_pmd = pmd_mkyoung(*pmd);
1028 	if (write)
1029 		_pmd = pmd_mkdirty(_pmd);
1030 	if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
1031 				  pmd, _pmd, write))
1032 		update_mmu_cache_pmd(vma, addr, pmd);
1033 }
1034 
1035 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
1036 		pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
1037 {
1038 	unsigned long pfn = pmd_pfn(*pmd);
1039 	struct mm_struct *mm = vma->vm_mm;
1040 	struct page *page;
1041 	int ret;
1042 
1043 	assert_spin_locked(pmd_lockptr(mm, pmd));
1044 
1045 	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
1046 	if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
1047 			 (FOLL_PIN | FOLL_GET)))
1048 		return NULL;
1049 
1050 	if (flags & FOLL_WRITE && !pmd_write(*pmd))
1051 		return NULL;
1052 
1053 	if (pmd_present(*pmd) && pmd_devmap(*pmd))
1054 		/* pass */;
1055 	else
1056 		return NULL;
1057 
1058 	if (flags & FOLL_TOUCH)
1059 		touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
1060 
1061 	/*
1062 	 * device mapped pages can only be returned if the
1063 	 * caller will manage the page reference count.
1064 	 */
1065 	if (!(flags & (FOLL_GET | FOLL_PIN)))
1066 		return ERR_PTR(-EEXIST);
1067 
1068 	pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
1069 	*pgmap = get_dev_pagemap(pfn, *pgmap);
1070 	if (!*pgmap)
1071 		return ERR_PTR(-EFAULT);
1072 	page = pfn_to_page(pfn);
1073 	ret = try_grab_page(page, flags);
1074 	if (ret)
1075 		page = ERR_PTR(ret);
1076 
1077 	return page;
1078 }
1079 
1080 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1081 		  pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1082 		  struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1083 {
1084 	spinlock_t *dst_ptl, *src_ptl;
1085 	struct page *src_page;
1086 	pmd_t pmd;
1087 	pgtable_t pgtable = NULL;
1088 	int ret = -ENOMEM;
1089 
1090 	/* Skip if can be re-fill on fault */
1091 	if (!vma_is_anonymous(dst_vma))
1092 		return 0;
1093 
1094 	pgtable = pte_alloc_one(dst_mm);
1095 	if (unlikely(!pgtable))
1096 		goto out;
1097 
1098 	dst_ptl = pmd_lock(dst_mm, dst_pmd);
1099 	src_ptl = pmd_lockptr(src_mm, src_pmd);
1100 	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1101 
1102 	ret = -EAGAIN;
1103 	pmd = *src_pmd;
1104 
1105 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1106 	if (unlikely(is_swap_pmd(pmd))) {
1107 		swp_entry_t entry = pmd_to_swp_entry(pmd);
1108 
1109 		VM_BUG_ON(!is_pmd_migration_entry(pmd));
1110 		if (!is_readable_migration_entry(entry)) {
1111 			entry = make_readable_migration_entry(
1112 							swp_offset(entry));
1113 			pmd = swp_entry_to_pmd(entry);
1114 			if (pmd_swp_soft_dirty(*src_pmd))
1115 				pmd = pmd_swp_mksoft_dirty(pmd);
1116 			if (pmd_swp_uffd_wp(*src_pmd))
1117 				pmd = pmd_swp_mkuffd_wp(pmd);
1118 			set_pmd_at(src_mm, addr, src_pmd, pmd);
1119 		}
1120 		add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1121 		mm_inc_nr_ptes(dst_mm);
1122 		pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1123 		if (!userfaultfd_wp(dst_vma))
1124 			pmd = pmd_swp_clear_uffd_wp(pmd);
1125 		set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1126 		ret = 0;
1127 		goto out_unlock;
1128 	}
1129 #endif
1130 
1131 	if (unlikely(!pmd_trans_huge(pmd))) {
1132 		pte_free(dst_mm, pgtable);
1133 		goto out_unlock;
1134 	}
1135 	/*
1136 	 * When page table lock is held, the huge zero pmd should not be
1137 	 * under splitting since we don't split the page itself, only pmd to
1138 	 * a page table.
1139 	 */
1140 	if (is_huge_zero_pmd(pmd)) {
1141 		/*
1142 		 * get_huge_zero_page() will never allocate a new page here,
1143 		 * since we already have a zero page to copy. It just takes a
1144 		 * reference.
1145 		 */
1146 		mm_get_huge_zero_page(dst_mm);
1147 		goto out_zero_page;
1148 	}
1149 
1150 	src_page = pmd_page(pmd);
1151 	VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
1152 
1153 	get_page(src_page);
1154 	if (unlikely(page_try_dup_anon_rmap(src_page, true, src_vma))) {
1155 		/* Page maybe pinned: split and retry the fault on PTEs. */
1156 		put_page(src_page);
1157 		pte_free(dst_mm, pgtable);
1158 		spin_unlock(src_ptl);
1159 		spin_unlock(dst_ptl);
1160 		__split_huge_pmd(src_vma, src_pmd, addr, false, NULL);
1161 		return -EAGAIN;
1162 	}
1163 	add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1164 out_zero_page:
1165 	mm_inc_nr_ptes(dst_mm);
1166 	pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1167 	pmdp_set_wrprotect(src_mm, addr, src_pmd);
1168 	if (!userfaultfd_wp(dst_vma))
1169 		pmd = pmd_clear_uffd_wp(pmd);
1170 	pmd = pmd_mkold(pmd_wrprotect(pmd));
1171 	set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1172 
1173 	ret = 0;
1174 out_unlock:
1175 	spin_unlock(src_ptl);
1176 	spin_unlock(dst_ptl);
1177 out:
1178 	return ret;
1179 }
1180 
1181 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1182 static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1183 		      pud_t *pud, bool write)
1184 {
1185 	pud_t _pud;
1186 
1187 	_pud = pud_mkyoung(*pud);
1188 	if (write)
1189 		_pud = pud_mkdirty(_pud);
1190 	if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
1191 				  pud, _pud, write))
1192 		update_mmu_cache_pud(vma, addr, pud);
1193 }
1194 
1195 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
1196 		pud_t *pud, int flags, struct dev_pagemap **pgmap)
1197 {
1198 	unsigned long pfn = pud_pfn(*pud);
1199 	struct mm_struct *mm = vma->vm_mm;
1200 	struct page *page;
1201 	int ret;
1202 
1203 	assert_spin_locked(pud_lockptr(mm, pud));
1204 
1205 	if (flags & FOLL_WRITE && !pud_write(*pud))
1206 		return NULL;
1207 
1208 	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
1209 	if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
1210 			 (FOLL_PIN | FOLL_GET)))
1211 		return NULL;
1212 
1213 	if (pud_present(*pud) && pud_devmap(*pud))
1214 		/* pass */;
1215 	else
1216 		return NULL;
1217 
1218 	if (flags & FOLL_TOUCH)
1219 		touch_pud(vma, addr, pud, flags & FOLL_WRITE);
1220 
1221 	/*
1222 	 * device mapped pages can only be returned if the
1223 	 * caller will manage the page reference count.
1224 	 *
1225 	 * At least one of FOLL_GET | FOLL_PIN must be set, so assert that here:
1226 	 */
1227 	if (!(flags & (FOLL_GET | FOLL_PIN)))
1228 		return ERR_PTR(-EEXIST);
1229 
1230 	pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
1231 	*pgmap = get_dev_pagemap(pfn, *pgmap);
1232 	if (!*pgmap)
1233 		return ERR_PTR(-EFAULT);
1234 	page = pfn_to_page(pfn);
1235 
1236 	ret = try_grab_page(page, flags);
1237 	if (ret)
1238 		page = ERR_PTR(ret);
1239 
1240 	return page;
1241 }
1242 
1243 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1244 		  pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1245 		  struct vm_area_struct *vma)
1246 {
1247 	spinlock_t *dst_ptl, *src_ptl;
1248 	pud_t pud;
1249 	int ret;
1250 
1251 	dst_ptl = pud_lock(dst_mm, dst_pud);
1252 	src_ptl = pud_lockptr(src_mm, src_pud);
1253 	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1254 
1255 	ret = -EAGAIN;
1256 	pud = *src_pud;
1257 	if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
1258 		goto out_unlock;
1259 
1260 	/*
1261 	 * When page table lock is held, the huge zero pud should not be
1262 	 * under splitting since we don't split the page itself, only pud to
1263 	 * a page table.
1264 	 */
1265 	if (is_huge_zero_pud(pud)) {
1266 		/* No huge zero pud yet */
1267 	}
1268 
1269 	/*
1270 	 * TODO: once we support anonymous pages, use page_try_dup_anon_rmap()
1271 	 * and split if duplicating fails.
1272 	 */
1273 	pudp_set_wrprotect(src_mm, addr, src_pud);
1274 	pud = pud_mkold(pud_wrprotect(pud));
1275 	set_pud_at(dst_mm, addr, dst_pud, pud);
1276 
1277 	ret = 0;
1278 out_unlock:
1279 	spin_unlock(src_ptl);
1280 	spin_unlock(dst_ptl);
1281 	return ret;
1282 }
1283 
1284 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
1285 {
1286 	bool write = vmf->flags & FAULT_FLAG_WRITE;
1287 
1288 	vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
1289 	if (unlikely(!pud_same(*vmf->pud, orig_pud)))
1290 		goto unlock;
1291 
1292 	touch_pud(vmf->vma, vmf->address, vmf->pud, write);
1293 unlock:
1294 	spin_unlock(vmf->ptl);
1295 }
1296 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1297 
1298 void huge_pmd_set_accessed(struct vm_fault *vmf)
1299 {
1300 	bool write = vmf->flags & FAULT_FLAG_WRITE;
1301 
1302 	vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1303 	if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd)))
1304 		goto unlock;
1305 
1306 	touch_pmd(vmf->vma, vmf->address, vmf->pmd, write);
1307 
1308 unlock:
1309 	spin_unlock(vmf->ptl);
1310 }
1311 
1312 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
1313 {
1314 	const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
1315 	struct vm_area_struct *vma = vmf->vma;
1316 	struct folio *folio;
1317 	struct page *page;
1318 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1319 	pmd_t orig_pmd = vmf->orig_pmd;
1320 
1321 	vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
1322 	VM_BUG_ON_VMA(!vma->anon_vma, vma);
1323 
1324 	if (is_huge_zero_pmd(orig_pmd))
1325 		goto fallback;
1326 
1327 	spin_lock(vmf->ptl);
1328 
1329 	if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1330 		spin_unlock(vmf->ptl);
1331 		return 0;
1332 	}
1333 
1334 	page = pmd_page(orig_pmd);
1335 	folio = page_folio(page);
1336 	VM_BUG_ON_PAGE(!PageHead(page), page);
1337 
1338 	/* Early check when only holding the PT lock. */
1339 	if (PageAnonExclusive(page))
1340 		goto reuse;
1341 
1342 	if (!folio_trylock(folio)) {
1343 		folio_get(folio);
1344 		spin_unlock(vmf->ptl);
1345 		folio_lock(folio);
1346 		spin_lock(vmf->ptl);
1347 		if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1348 			spin_unlock(vmf->ptl);
1349 			folio_unlock(folio);
1350 			folio_put(folio);
1351 			return 0;
1352 		}
1353 		folio_put(folio);
1354 	}
1355 
1356 	/* Recheck after temporarily dropping the PT lock. */
1357 	if (PageAnonExclusive(page)) {
1358 		folio_unlock(folio);
1359 		goto reuse;
1360 	}
1361 
1362 	/*
1363 	 * See do_wp_page(): we can only reuse the folio exclusively if
1364 	 * there are no additional references. Note that we always drain
1365 	 * the LRU pagevecs immediately after adding a THP.
1366 	 */
1367 	if (folio_ref_count(folio) >
1368 			1 + folio_test_swapcache(folio) * folio_nr_pages(folio))
1369 		goto unlock_fallback;
1370 	if (folio_test_swapcache(folio))
1371 		folio_free_swap(folio);
1372 	if (folio_ref_count(folio) == 1) {
1373 		pmd_t entry;
1374 
1375 		page_move_anon_rmap(page, vma);
1376 		folio_unlock(folio);
1377 reuse:
1378 		if (unlikely(unshare)) {
1379 			spin_unlock(vmf->ptl);
1380 			return 0;
1381 		}
1382 		entry = pmd_mkyoung(orig_pmd);
1383 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1384 		if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
1385 			update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1386 		spin_unlock(vmf->ptl);
1387 		return 0;
1388 	}
1389 
1390 unlock_fallback:
1391 	folio_unlock(folio);
1392 	spin_unlock(vmf->ptl);
1393 fallback:
1394 	__split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
1395 	return VM_FAULT_FALLBACK;
1396 }
1397 
1398 static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
1399 					   unsigned long addr, pmd_t pmd)
1400 {
1401 	struct page *page;
1402 
1403 	if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
1404 		return false;
1405 
1406 	/* Don't touch entries that are not even readable (NUMA hinting). */
1407 	if (pmd_protnone(pmd))
1408 		return false;
1409 
1410 	/* Do we need write faults for softdirty tracking? */
1411 	if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
1412 		return false;
1413 
1414 	/* Do we need write faults for uffd-wp tracking? */
1415 	if (userfaultfd_huge_pmd_wp(vma, pmd))
1416 		return false;
1417 
1418 	if (!(vma->vm_flags & VM_SHARED)) {
1419 		/* See can_change_pte_writable(). */
1420 		page = vm_normal_page_pmd(vma, addr, pmd);
1421 		return page && PageAnon(page) && PageAnonExclusive(page);
1422 	}
1423 
1424 	/* See can_change_pte_writable(). */
1425 	return pmd_dirty(pmd);
1426 }
1427 
1428 /* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */
1429 static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
1430 					struct vm_area_struct *vma,
1431 					unsigned int flags)
1432 {
1433 	/* If the pmd is writable, we can write to the page. */
1434 	if (pmd_write(pmd))
1435 		return true;
1436 
1437 	/* Maybe FOLL_FORCE is set to override it? */
1438 	if (!(flags & FOLL_FORCE))
1439 		return false;
1440 
1441 	/* But FOLL_FORCE has no effect on shared mappings */
1442 	if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
1443 		return false;
1444 
1445 	/* ... or read-only private ones */
1446 	if (!(vma->vm_flags & VM_MAYWRITE))
1447 		return false;
1448 
1449 	/* ... or already writable ones that just need to take a write fault */
1450 	if (vma->vm_flags & VM_WRITE)
1451 		return false;
1452 
1453 	/*
1454 	 * See can_change_pte_writable(): we broke COW and could map the page
1455 	 * writable if we have an exclusive anonymous page ...
1456 	 */
1457 	if (!page || !PageAnon(page) || !PageAnonExclusive(page))
1458 		return false;
1459 
1460 	/* ... and a write-fault isn't required for other reasons. */
1461 	if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
1462 		return false;
1463 	return !userfaultfd_huge_pmd_wp(vma, pmd);
1464 }
1465 
1466 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1467 				   unsigned long addr,
1468 				   pmd_t *pmd,
1469 				   unsigned int flags)
1470 {
1471 	struct mm_struct *mm = vma->vm_mm;
1472 	struct page *page;
1473 	int ret;
1474 
1475 	assert_spin_locked(pmd_lockptr(mm, pmd));
1476 
1477 	page = pmd_page(*pmd);
1478 	VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
1479 
1480 	if ((flags & FOLL_WRITE) &&
1481 	    !can_follow_write_pmd(*pmd, page, vma, flags))
1482 		return NULL;
1483 
1484 	/* Avoid dumping huge zero page */
1485 	if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1486 		return ERR_PTR(-EFAULT);
1487 
1488 	/* Full NUMA hinting faults to serialise migration in fault paths */
1489 	if (pmd_protnone(*pmd) && !gup_can_follow_protnone(flags))
1490 		return NULL;
1491 
1492 	if (!pmd_write(*pmd) && gup_must_unshare(vma, flags, page))
1493 		return ERR_PTR(-EMLINK);
1494 
1495 	VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
1496 			!PageAnonExclusive(page), page);
1497 
1498 	ret = try_grab_page(page, flags);
1499 	if (ret)
1500 		return ERR_PTR(ret);
1501 
1502 	if (flags & FOLL_TOUCH)
1503 		touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
1504 
1505 	page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
1506 	VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
1507 
1508 	return page;
1509 }
1510 
1511 /* NUMA hinting page fault entry point for trans huge pmds */
1512 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
1513 {
1514 	struct vm_area_struct *vma = vmf->vma;
1515 	pmd_t oldpmd = vmf->orig_pmd;
1516 	pmd_t pmd;
1517 	struct page *page;
1518 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1519 	int page_nid = NUMA_NO_NODE;
1520 	int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK);
1521 	bool migrated = false, writable = false;
1522 	int flags = 0;
1523 
1524 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1525 	if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
1526 		spin_unlock(vmf->ptl);
1527 		goto out;
1528 	}
1529 
1530 	pmd = pmd_modify(oldpmd, vma->vm_page_prot);
1531 
1532 	/*
1533 	 * Detect now whether the PMD could be writable; this information
1534 	 * is only valid while holding the PT lock.
1535 	 */
1536 	writable = pmd_write(pmd);
1537 	if (!writable && vma_wants_manual_pte_write_upgrade(vma) &&
1538 	    can_change_pmd_writable(vma, vmf->address, pmd))
1539 		writable = true;
1540 
1541 	page = vm_normal_page_pmd(vma, haddr, pmd);
1542 	if (!page)
1543 		goto out_map;
1544 
1545 	/* See similar comment in do_numa_page for explanation */
1546 	if (!writable)
1547 		flags |= TNF_NO_GROUP;
1548 
1549 	page_nid = page_to_nid(page);
1550 	/*
1551 	 * For memory tiering mode, cpupid of slow memory page is used
1552 	 * to record page access time.  So use default value.
1553 	 */
1554 	if (node_is_toptier(page_nid))
1555 		last_cpupid = page_cpupid_last(page);
1556 	target_nid = numa_migrate_prep(page, vma, haddr, page_nid,
1557 				       &flags);
1558 
1559 	if (target_nid == NUMA_NO_NODE) {
1560 		put_page(page);
1561 		goto out_map;
1562 	}
1563 
1564 	spin_unlock(vmf->ptl);
1565 	writable = false;
1566 
1567 	migrated = migrate_misplaced_page(page, vma, target_nid);
1568 	if (migrated) {
1569 		flags |= TNF_MIGRATED;
1570 		page_nid = target_nid;
1571 	} else {
1572 		flags |= TNF_MIGRATE_FAIL;
1573 		vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1574 		if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
1575 			spin_unlock(vmf->ptl);
1576 			goto out;
1577 		}
1578 		goto out_map;
1579 	}
1580 
1581 out:
1582 	if (page_nid != NUMA_NO_NODE)
1583 		task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR,
1584 				flags);
1585 
1586 	return 0;
1587 
1588 out_map:
1589 	/* Restore the PMD */
1590 	pmd = pmd_modify(oldpmd, vma->vm_page_prot);
1591 	pmd = pmd_mkyoung(pmd);
1592 	if (writable)
1593 		pmd = pmd_mkwrite(pmd);
1594 	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
1595 	update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1596 	spin_unlock(vmf->ptl);
1597 	goto out;
1598 }
1599 
1600 /*
1601  * Return true if we do MADV_FREE successfully on entire pmd page.
1602  * Otherwise, return false.
1603  */
1604 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1605 		pmd_t *pmd, unsigned long addr, unsigned long next)
1606 {
1607 	spinlock_t *ptl;
1608 	pmd_t orig_pmd;
1609 	struct folio *folio;
1610 	struct mm_struct *mm = tlb->mm;
1611 	bool ret = false;
1612 
1613 	tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
1614 
1615 	ptl = pmd_trans_huge_lock(pmd, vma);
1616 	if (!ptl)
1617 		goto out_unlocked;
1618 
1619 	orig_pmd = *pmd;
1620 	if (is_huge_zero_pmd(orig_pmd))
1621 		goto out;
1622 
1623 	if (unlikely(!pmd_present(orig_pmd))) {
1624 		VM_BUG_ON(thp_migration_supported() &&
1625 				  !is_pmd_migration_entry(orig_pmd));
1626 		goto out;
1627 	}
1628 
1629 	folio = pfn_folio(pmd_pfn(orig_pmd));
1630 	/*
1631 	 * If other processes are mapping this folio, we couldn't discard
1632 	 * the folio unless they all do MADV_FREE so let's skip the folio.
1633 	 */
1634 	if (folio_mapcount(folio) != 1)
1635 		goto out;
1636 
1637 	if (!folio_trylock(folio))
1638 		goto out;
1639 
1640 	/*
1641 	 * If user want to discard part-pages of THP, split it so MADV_FREE
1642 	 * will deactivate only them.
1643 	 */
1644 	if (next - addr != HPAGE_PMD_SIZE) {
1645 		folio_get(folio);
1646 		spin_unlock(ptl);
1647 		split_folio(folio);
1648 		folio_unlock(folio);
1649 		folio_put(folio);
1650 		goto out_unlocked;
1651 	}
1652 
1653 	if (folio_test_dirty(folio))
1654 		folio_clear_dirty(folio);
1655 	folio_unlock(folio);
1656 
1657 	if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
1658 		pmdp_invalidate(vma, addr, pmd);
1659 		orig_pmd = pmd_mkold(orig_pmd);
1660 		orig_pmd = pmd_mkclean(orig_pmd);
1661 
1662 		set_pmd_at(mm, addr, pmd, orig_pmd);
1663 		tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1664 	}
1665 
1666 	folio_mark_lazyfree(folio);
1667 	ret = true;
1668 out:
1669 	spin_unlock(ptl);
1670 out_unlocked:
1671 	return ret;
1672 }
1673 
1674 static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
1675 {
1676 	pgtable_t pgtable;
1677 
1678 	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1679 	pte_free(mm, pgtable);
1680 	mm_dec_nr_ptes(mm);
1681 }
1682 
1683 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1684 		 pmd_t *pmd, unsigned long addr)
1685 {
1686 	pmd_t orig_pmd;
1687 	spinlock_t *ptl;
1688 
1689 	tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
1690 
1691 	ptl = __pmd_trans_huge_lock(pmd, vma);
1692 	if (!ptl)
1693 		return 0;
1694 	/*
1695 	 * For architectures like ppc64 we look at deposited pgtable
1696 	 * when calling pmdp_huge_get_and_clear. So do the
1697 	 * pgtable_trans_huge_withdraw after finishing pmdp related
1698 	 * operations.
1699 	 */
1700 	orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd,
1701 						tlb->fullmm);
1702 	tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1703 	if (vma_is_special_huge(vma)) {
1704 		if (arch_needs_pgtable_deposit())
1705 			zap_deposited_table(tlb->mm, pmd);
1706 		spin_unlock(ptl);
1707 	} else if (is_huge_zero_pmd(orig_pmd)) {
1708 		zap_deposited_table(tlb->mm, pmd);
1709 		spin_unlock(ptl);
1710 	} else {
1711 		struct page *page = NULL;
1712 		int flush_needed = 1;
1713 
1714 		if (pmd_present(orig_pmd)) {
1715 			page = pmd_page(orig_pmd);
1716 			page_remove_rmap(page, vma, true);
1717 			VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1718 			VM_BUG_ON_PAGE(!PageHead(page), page);
1719 		} else if (thp_migration_supported()) {
1720 			swp_entry_t entry;
1721 
1722 			VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
1723 			entry = pmd_to_swp_entry(orig_pmd);
1724 			page = pfn_swap_entry_to_page(entry);
1725 			flush_needed = 0;
1726 		} else
1727 			WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
1728 
1729 		if (PageAnon(page)) {
1730 			zap_deposited_table(tlb->mm, pmd);
1731 			add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1732 		} else {
1733 			if (arch_needs_pgtable_deposit())
1734 				zap_deposited_table(tlb->mm, pmd);
1735 			add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR);
1736 		}
1737 
1738 		spin_unlock(ptl);
1739 		if (flush_needed)
1740 			tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE);
1741 	}
1742 	return 1;
1743 }
1744 
1745 #ifndef pmd_move_must_withdraw
1746 static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
1747 					 spinlock_t *old_pmd_ptl,
1748 					 struct vm_area_struct *vma)
1749 {
1750 	/*
1751 	 * With split pmd lock we also need to move preallocated
1752 	 * PTE page table if new_pmd is on different PMD page table.
1753 	 *
1754 	 * We also don't deposit and withdraw tables for file pages.
1755 	 */
1756 	return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
1757 }
1758 #endif
1759 
1760 static pmd_t move_soft_dirty_pmd(pmd_t pmd)
1761 {
1762 #ifdef CONFIG_MEM_SOFT_DIRTY
1763 	if (unlikely(is_pmd_migration_entry(pmd)))
1764 		pmd = pmd_swp_mksoft_dirty(pmd);
1765 	else if (pmd_present(pmd))
1766 		pmd = pmd_mksoft_dirty(pmd);
1767 #endif
1768 	return pmd;
1769 }
1770 
1771 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
1772 		  unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
1773 {
1774 	spinlock_t *old_ptl, *new_ptl;
1775 	pmd_t pmd;
1776 	struct mm_struct *mm = vma->vm_mm;
1777 	bool force_flush = false;
1778 
1779 	/*
1780 	 * The destination pmd shouldn't be established, free_pgtables()
1781 	 * should have release it.
1782 	 */
1783 	if (WARN_ON(!pmd_none(*new_pmd))) {
1784 		VM_BUG_ON(pmd_trans_huge(*new_pmd));
1785 		return false;
1786 	}
1787 
1788 	/*
1789 	 * We don't have to worry about the ordering of src and dst
1790 	 * ptlocks because exclusive mmap_lock prevents deadlock.
1791 	 */
1792 	old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
1793 	if (old_ptl) {
1794 		new_ptl = pmd_lockptr(mm, new_pmd);
1795 		if (new_ptl != old_ptl)
1796 			spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
1797 		pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
1798 		if (pmd_present(pmd))
1799 			force_flush = true;
1800 		VM_BUG_ON(!pmd_none(*new_pmd));
1801 
1802 		if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) {
1803 			pgtable_t pgtable;
1804 			pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
1805 			pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
1806 		}
1807 		pmd = move_soft_dirty_pmd(pmd);
1808 		set_pmd_at(mm, new_addr, new_pmd, pmd);
1809 		if (force_flush)
1810 			flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
1811 		if (new_ptl != old_ptl)
1812 			spin_unlock(new_ptl);
1813 		spin_unlock(old_ptl);
1814 		return true;
1815 	}
1816 	return false;
1817 }
1818 
1819 /*
1820  * Returns
1821  *  - 0 if PMD could not be locked
1822  *  - 1 if PMD was locked but protections unchanged and TLB flush unnecessary
1823  *      or if prot_numa but THP migration is not supported
1824  *  - HPAGE_PMD_NR if protections changed and TLB flush necessary
1825  */
1826 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1827 		    pmd_t *pmd, unsigned long addr, pgprot_t newprot,
1828 		    unsigned long cp_flags)
1829 {
1830 	struct mm_struct *mm = vma->vm_mm;
1831 	spinlock_t *ptl;
1832 	pmd_t oldpmd, entry;
1833 	bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
1834 	bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
1835 	bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
1836 	int ret = 1;
1837 
1838 	tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
1839 
1840 	if (prot_numa && !thp_migration_supported())
1841 		return 1;
1842 
1843 	ptl = __pmd_trans_huge_lock(pmd, vma);
1844 	if (!ptl)
1845 		return 0;
1846 
1847 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1848 	if (is_swap_pmd(*pmd)) {
1849 		swp_entry_t entry = pmd_to_swp_entry(*pmd);
1850 		struct page *page = pfn_swap_entry_to_page(entry);
1851 
1852 		VM_BUG_ON(!is_pmd_migration_entry(*pmd));
1853 		if (is_writable_migration_entry(entry)) {
1854 			pmd_t newpmd;
1855 			/*
1856 			 * A protection check is difficult so
1857 			 * just be safe and disable write
1858 			 */
1859 			if (PageAnon(page))
1860 				entry = make_readable_exclusive_migration_entry(swp_offset(entry));
1861 			else
1862 				entry = make_readable_migration_entry(swp_offset(entry));
1863 			newpmd = swp_entry_to_pmd(entry);
1864 			if (pmd_swp_soft_dirty(*pmd))
1865 				newpmd = pmd_swp_mksoft_dirty(newpmd);
1866 			if (pmd_swp_uffd_wp(*pmd))
1867 				newpmd = pmd_swp_mkuffd_wp(newpmd);
1868 			set_pmd_at(mm, addr, pmd, newpmd);
1869 		}
1870 		goto unlock;
1871 	}
1872 #endif
1873 
1874 	if (prot_numa) {
1875 		struct page *page;
1876 		bool toptier;
1877 		/*
1878 		 * Avoid trapping faults against the zero page. The read-only
1879 		 * data is likely to be read-cached on the local CPU and
1880 		 * local/remote hits to the zero page are not interesting.
1881 		 */
1882 		if (is_huge_zero_pmd(*pmd))
1883 			goto unlock;
1884 
1885 		if (pmd_protnone(*pmd))
1886 			goto unlock;
1887 
1888 		page = pmd_page(*pmd);
1889 		toptier = node_is_toptier(page_to_nid(page));
1890 		/*
1891 		 * Skip scanning top tier node if normal numa
1892 		 * balancing is disabled
1893 		 */
1894 		if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
1895 		    toptier)
1896 			goto unlock;
1897 
1898 		if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
1899 		    !toptier)
1900 			xchg_page_access_time(page, jiffies_to_msecs(jiffies));
1901 	}
1902 	/*
1903 	 * In case prot_numa, we are under mmap_read_lock(mm). It's critical
1904 	 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
1905 	 * which is also under mmap_read_lock(mm):
1906 	 *
1907 	 *	CPU0:				CPU1:
1908 	 *				change_huge_pmd(prot_numa=1)
1909 	 *				 pmdp_huge_get_and_clear_notify()
1910 	 * madvise_dontneed()
1911 	 *  zap_pmd_range()
1912 	 *   pmd_trans_huge(*pmd) == 0 (without ptl)
1913 	 *   // skip the pmd
1914 	 *				 set_pmd_at();
1915 	 *				 // pmd is re-established
1916 	 *
1917 	 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
1918 	 * which may break userspace.
1919 	 *
1920 	 * pmdp_invalidate_ad() is required to make sure we don't miss
1921 	 * dirty/young flags set by hardware.
1922 	 */
1923 	oldpmd = pmdp_invalidate_ad(vma, addr, pmd);
1924 
1925 	entry = pmd_modify(oldpmd, newprot);
1926 	if (uffd_wp)
1927 		entry = pmd_mkuffd_wp(entry);
1928 	else if (uffd_wp_resolve)
1929 		/*
1930 		 * Leave the write bit to be handled by PF interrupt
1931 		 * handler, then things like COW could be properly
1932 		 * handled.
1933 		 */
1934 		entry = pmd_clear_uffd_wp(entry);
1935 
1936 	/* See change_pte_range(). */
1937 	if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && !pmd_write(entry) &&
1938 	    can_change_pmd_writable(vma, addr, entry))
1939 		entry = pmd_mkwrite(entry);
1940 
1941 	ret = HPAGE_PMD_NR;
1942 	set_pmd_at(mm, addr, pmd, entry);
1943 
1944 	if (huge_pmd_needs_flush(oldpmd, entry))
1945 		tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE);
1946 unlock:
1947 	spin_unlock(ptl);
1948 	return ret;
1949 }
1950 
1951 /*
1952  * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
1953  *
1954  * Note that if it returns page table lock pointer, this routine returns without
1955  * unlocking page table lock. So callers must unlock it.
1956  */
1957 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
1958 {
1959 	spinlock_t *ptl;
1960 	ptl = pmd_lock(vma->vm_mm, pmd);
1961 	if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
1962 			pmd_devmap(*pmd)))
1963 		return ptl;
1964 	spin_unlock(ptl);
1965 	return NULL;
1966 }
1967 
1968 /*
1969  * Returns page table lock pointer if a given pud maps a thp, NULL otherwise.
1970  *
1971  * Note that if it returns page table lock pointer, this routine returns without
1972  * unlocking page table lock. So callers must unlock it.
1973  */
1974 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
1975 {
1976 	spinlock_t *ptl;
1977 
1978 	ptl = pud_lock(vma->vm_mm, pud);
1979 	if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
1980 		return ptl;
1981 	spin_unlock(ptl);
1982 	return NULL;
1983 }
1984 
1985 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1986 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
1987 		 pud_t *pud, unsigned long addr)
1988 {
1989 	spinlock_t *ptl;
1990 
1991 	ptl = __pud_trans_huge_lock(pud, vma);
1992 	if (!ptl)
1993 		return 0;
1994 
1995 	pudp_huge_get_and_clear_full(tlb->mm, addr, pud, tlb->fullmm);
1996 	tlb_remove_pud_tlb_entry(tlb, pud, addr);
1997 	if (vma_is_special_huge(vma)) {
1998 		spin_unlock(ptl);
1999 		/* No zero page support yet */
2000 	} else {
2001 		/* No support for anonymous PUD pages yet */
2002 		BUG();
2003 	}
2004 	return 1;
2005 }
2006 
2007 static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
2008 		unsigned long haddr)
2009 {
2010 	VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
2011 	VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2012 	VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
2013 	VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
2014 
2015 	count_vm_event(THP_SPLIT_PUD);
2016 
2017 	pudp_huge_clear_flush_notify(vma, haddr, pud);
2018 }
2019 
2020 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
2021 		unsigned long address)
2022 {
2023 	spinlock_t *ptl;
2024 	struct mmu_notifier_range range;
2025 
2026 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2027 				address & HPAGE_PUD_MASK,
2028 				(address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
2029 	mmu_notifier_invalidate_range_start(&range);
2030 	ptl = pud_lock(vma->vm_mm, pud);
2031 	if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
2032 		goto out;
2033 	__split_huge_pud_locked(vma, pud, range.start);
2034 
2035 out:
2036 	spin_unlock(ptl);
2037 	/*
2038 	 * No need to double call mmu_notifier->invalidate_range() callback as
2039 	 * the above pudp_huge_clear_flush_notify() did already call it.
2040 	 */
2041 	mmu_notifier_invalidate_range_only_end(&range);
2042 }
2043 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
2044 
2045 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2046 		unsigned long haddr, pmd_t *pmd)
2047 {
2048 	struct mm_struct *mm = vma->vm_mm;
2049 	pgtable_t pgtable;
2050 	pmd_t _pmd;
2051 	int i;
2052 
2053 	/*
2054 	 * Leave pmd empty until pte is filled note that it is fine to delay
2055 	 * notification until mmu_notifier_invalidate_range_end() as we are
2056 	 * replacing a zero pmd write protected page with a zero pte write
2057 	 * protected page.
2058 	 *
2059 	 * See Documentation/mm/mmu_notifier.rst
2060 	 */
2061 	pmdp_huge_clear_flush(vma, haddr, pmd);
2062 
2063 	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2064 	pmd_populate(mm, &_pmd, pgtable);
2065 
2066 	for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
2067 		pte_t *pte, entry;
2068 		entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
2069 		entry = pte_mkspecial(entry);
2070 		pte = pte_offset_map(&_pmd, haddr);
2071 		VM_BUG_ON(!pte_none(*pte));
2072 		set_pte_at(mm, haddr, pte, entry);
2073 		pte_unmap(pte);
2074 	}
2075 	smp_wmb(); /* make pte visible before pmd */
2076 	pmd_populate(mm, pmd, pgtable);
2077 }
2078 
2079 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2080 		unsigned long haddr, bool freeze)
2081 {
2082 	struct mm_struct *mm = vma->vm_mm;
2083 	struct page *page;
2084 	pgtable_t pgtable;
2085 	pmd_t old_pmd, _pmd;
2086 	bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false;
2087 	bool anon_exclusive = false, dirty = false;
2088 	unsigned long addr;
2089 	int i;
2090 
2091 	VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
2092 	VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2093 	VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
2094 	VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
2095 				&& !pmd_devmap(*pmd));
2096 
2097 	count_vm_event(THP_SPLIT_PMD);
2098 
2099 	if (!vma_is_anonymous(vma)) {
2100 		old_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
2101 		/*
2102 		 * We are going to unmap this huge page. So
2103 		 * just go ahead and zap it
2104 		 */
2105 		if (arch_needs_pgtable_deposit())
2106 			zap_deposited_table(mm, pmd);
2107 		if (vma_is_special_huge(vma))
2108 			return;
2109 		if (unlikely(is_pmd_migration_entry(old_pmd))) {
2110 			swp_entry_t entry;
2111 
2112 			entry = pmd_to_swp_entry(old_pmd);
2113 			page = pfn_swap_entry_to_page(entry);
2114 		} else {
2115 			page = pmd_page(old_pmd);
2116 			if (!PageDirty(page) && pmd_dirty(old_pmd))
2117 				set_page_dirty(page);
2118 			if (!PageReferenced(page) && pmd_young(old_pmd))
2119 				SetPageReferenced(page);
2120 			page_remove_rmap(page, vma, true);
2121 			put_page(page);
2122 		}
2123 		add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
2124 		return;
2125 	}
2126 
2127 	if (is_huge_zero_pmd(*pmd)) {
2128 		/*
2129 		 * FIXME: Do we want to invalidate secondary mmu by calling
2130 		 * mmu_notifier_invalidate_range() see comments below inside
2131 		 * __split_huge_pmd() ?
2132 		 *
2133 		 * We are going from a zero huge page write protected to zero
2134 		 * small page also write protected so it does not seems useful
2135 		 * to invalidate secondary mmu at this time.
2136 		 */
2137 		return __split_huge_zero_page_pmd(vma, haddr, pmd);
2138 	}
2139 
2140 	/*
2141 	 * Up to this point the pmd is present and huge and userland has the
2142 	 * whole access to the hugepage during the split (which happens in
2143 	 * place). If we overwrite the pmd with the not-huge version pointing
2144 	 * to the pte here (which of course we could if all CPUs were bug
2145 	 * free), userland could trigger a small page size TLB miss on the
2146 	 * small sized TLB while the hugepage TLB entry is still established in
2147 	 * the huge TLB. Some CPU doesn't like that.
2148 	 * See http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
2149 	 * 383 on page 105. Intel should be safe but is also warns that it's
2150 	 * only safe if the permission and cache attributes of the two entries
2151 	 * loaded in the two TLB is identical (which should be the case here).
2152 	 * But it is generally safer to never allow small and huge TLB entries
2153 	 * for the same virtual address to be loaded simultaneously. So instead
2154 	 * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
2155 	 * current pmd notpresent (atomically because here the pmd_trans_huge
2156 	 * must remain set at all times on the pmd until the split is complete
2157 	 * for this pmd), then we flush the SMP TLB and finally we write the
2158 	 * non-huge version of the pmd entry with pmd_populate.
2159 	 */
2160 	old_pmd = pmdp_invalidate(vma, haddr, pmd);
2161 
2162 	pmd_migration = is_pmd_migration_entry(old_pmd);
2163 	if (unlikely(pmd_migration)) {
2164 		swp_entry_t entry;
2165 
2166 		entry = pmd_to_swp_entry(old_pmd);
2167 		page = pfn_swap_entry_to_page(entry);
2168 		write = is_writable_migration_entry(entry);
2169 		if (PageAnon(page))
2170 			anon_exclusive = is_readable_exclusive_migration_entry(entry);
2171 		young = is_migration_entry_young(entry);
2172 		dirty = is_migration_entry_dirty(entry);
2173 		soft_dirty = pmd_swp_soft_dirty(old_pmd);
2174 		uffd_wp = pmd_swp_uffd_wp(old_pmd);
2175 	} else {
2176 		page = pmd_page(old_pmd);
2177 		if (pmd_dirty(old_pmd)) {
2178 			dirty = true;
2179 			SetPageDirty(page);
2180 		}
2181 		write = pmd_write(old_pmd);
2182 		young = pmd_young(old_pmd);
2183 		soft_dirty = pmd_soft_dirty(old_pmd);
2184 		uffd_wp = pmd_uffd_wp(old_pmd);
2185 
2186 		VM_BUG_ON_PAGE(!page_count(page), page);
2187 
2188 		/*
2189 		 * Without "freeze", we'll simply split the PMD, propagating the
2190 		 * PageAnonExclusive() flag for each PTE by setting it for
2191 		 * each subpage -- no need to (temporarily) clear.
2192 		 *
2193 		 * With "freeze" we want to replace mapped pages by
2194 		 * migration entries right away. This is only possible if we
2195 		 * managed to clear PageAnonExclusive() -- see
2196 		 * set_pmd_migration_entry().
2197 		 *
2198 		 * In case we cannot clear PageAnonExclusive(), split the PMD
2199 		 * only and let try_to_migrate_one() fail later.
2200 		 *
2201 		 * See page_try_share_anon_rmap(): invalidate PMD first.
2202 		 */
2203 		anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
2204 		if (freeze && anon_exclusive && page_try_share_anon_rmap(page))
2205 			freeze = false;
2206 		if (!freeze)
2207 			page_ref_add(page, HPAGE_PMD_NR - 1);
2208 	}
2209 
2210 	/*
2211 	 * Withdraw the table only after we mark the pmd entry invalid.
2212 	 * This's critical for some architectures (Power).
2213 	 */
2214 	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2215 	pmd_populate(mm, &_pmd, pgtable);
2216 
2217 	for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
2218 		pte_t entry, *pte;
2219 		/*
2220 		 * Note that NUMA hinting access restrictions are not
2221 		 * transferred to avoid any possibility of altering
2222 		 * permissions across VMAs.
2223 		 */
2224 		if (freeze || pmd_migration) {
2225 			swp_entry_t swp_entry;
2226 			if (write)
2227 				swp_entry = make_writable_migration_entry(
2228 							page_to_pfn(page + i));
2229 			else if (anon_exclusive)
2230 				swp_entry = make_readable_exclusive_migration_entry(
2231 							page_to_pfn(page + i));
2232 			else
2233 				swp_entry = make_readable_migration_entry(
2234 							page_to_pfn(page + i));
2235 			if (young)
2236 				swp_entry = make_migration_entry_young(swp_entry);
2237 			if (dirty)
2238 				swp_entry = make_migration_entry_dirty(swp_entry);
2239 			entry = swp_entry_to_pte(swp_entry);
2240 			if (soft_dirty)
2241 				entry = pte_swp_mksoft_dirty(entry);
2242 			if (uffd_wp)
2243 				entry = pte_swp_mkuffd_wp(entry);
2244 		} else {
2245 			entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
2246 			entry = maybe_mkwrite(entry, vma);
2247 			if (anon_exclusive)
2248 				SetPageAnonExclusive(page + i);
2249 			if (!young)
2250 				entry = pte_mkold(entry);
2251 			/* NOTE: this may set soft-dirty too on some archs */
2252 			if (dirty)
2253 				entry = pte_mkdirty(entry);
2254 			/*
2255 			 * NOTE: this needs to happen after pte_mkdirty,
2256 			 * because some archs (sparc64, loongarch) could
2257 			 * set hw write bit when mkdirty.
2258 			 */
2259 			if (!write)
2260 				entry = pte_wrprotect(entry);
2261 			if (soft_dirty)
2262 				entry = pte_mksoft_dirty(entry);
2263 			if (uffd_wp)
2264 				entry = pte_mkuffd_wp(entry);
2265 			page_add_anon_rmap(page + i, vma, addr, false);
2266 		}
2267 		pte = pte_offset_map(&_pmd, addr);
2268 		BUG_ON(!pte_none(*pte));
2269 		set_pte_at(mm, addr, pte, entry);
2270 		pte_unmap(pte);
2271 	}
2272 
2273 	if (!pmd_migration)
2274 		page_remove_rmap(page, vma, true);
2275 	if (freeze)
2276 		put_page(page);
2277 
2278 	smp_wmb(); /* make pte visible before pmd */
2279 	pmd_populate(mm, pmd, pgtable);
2280 }
2281 
2282 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
2283 		unsigned long address, bool freeze, struct folio *folio)
2284 {
2285 	spinlock_t *ptl;
2286 	struct mmu_notifier_range range;
2287 
2288 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2289 				address & HPAGE_PMD_MASK,
2290 				(address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
2291 	mmu_notifier_invalidate_range_start(&range);
2292 	ptl = pmd_lock(vma->vm_mm, pmd);
2293 
2294 	/*
2295 	 * If caller asks to setup a migration entry, we need a folio to check
2296 	 * pmd against. Otherwise we can end up replacing wrong folio.
2297 	 */
2298 	VM_BUG_ON(freeze && !folio);
2299 	VM_WARN_ON_ONCE(folio && !folio_test_locked(folio));
2300 
2301 	if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
2302 	    is_pmd_migration_entry(*pmd)) {
2303 		/*
2304 		 * It's safe to call pmd_page when folio is set because it's
2305 		 * guaranteed that pmd is present.
2306 		 */
2307 		if (folio && folio != page_folio(pmd_page(*pmd)))
2308 			goto out;
2309 		__split_huge_pmd_locked(vma, pmd, range.start, freeze);
2310 	}
2311 
2312 out:
2313 	spin_unlock(ptl);
2314 	/*
2315 	 * No need to double call mmu_notifier->invalidate_range() callback.
2316 	 * They are 3 cases to consider inside __split_huge_pmd_locked():
2317 	 *  1) pmdp_huge_clear_flush_notify() call invalidate_range() obvious
2318 	 *  2) __split_huge_zero_page_pmd() read only zero page and any write
2319 	 *    fault will trigger a flush_notify before pointing to a new page
2320 	 *    (it is fine if the secondary mmu keeps pointing to the old zero
2321 	 *    page in the meantime)
2322 	 *  3) Split a huge pmd into pte pointing to the same page. No need
2323 	 *     to invalidate secondary tlb entry they are all still valid.
2324 	 *     any further changes to individual pte will notify. So no need
2325 	 *     to call mmu_notifier->invalidate_range()
2326 	 */
2327 	mmu_notifier_invalidate_range_only_end(&range);
2328 }
2329 
2330 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
2331 		bool freeze, struct folio *folio)
2332 {
2333 	pmd_t *pmd = mm_find_pmd(vma->vm_mm, address);
2334 
2335 	if (!pmd)
2336 		return;
2337 
2338 	__split_huge_pmd(vma, pmd, address, freeze, folio);
2339 }
2340 
2341 static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address)
2342 {
2343 	/*
2344 	 * If the new address isn't hpage aligned and it could previously
2345 	 * contain an hugepage: check if we need to split an huge pmd.
2346 	 */
2347 	if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) &&
2348 	    range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE),
2349 			 ALIGN(address, HPAGE_PMD_SIZE)))
2350 		split_huge_pmd_address(vma, address, false, NULL);
2351 }
2352 
2353 void vma_adjust_trans_huge(struct vm_area_struct *vma,
2354 			     unsigned long start,
2355 			     unsigned long end,
2356 			     long adjust_next)
2357 {
2358 	/* Check if we need to split start first. */
2359 	split_huge_pmd_if_needed(vma, start);
2360 
2361 	/* Check if we need to split end next. */
2362 	split_huge_pmd_if_needed(vma, end);
2363 
2364 	/*
2365 	 * If we're also updating the next vma vm_start,
2366 	 * check if we need to split it.
2367 	 */
2368 	if (adjust_next > 0) {
2369 		struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end);
2370 		unsigned long nstart = next->vm_start;
2371 		nstart += adjust_next;
2372 		split_huge_pmd_if_needed(next, nstart);
2373 	}
2374 }
2375 
2376 static void unmap_folio(struct folio *folio)
2377 {
2378 	enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
2379 		TTU_SYNC;
2380 
2381 	VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
2382 
2383 	/*
2384 	 * Anon pages need migration entries to preserve them, but file
2385 	 * pages can simply be left unmapped, then faulted back on demand.
2386 	 * If that is ever changed (perhaps for mlock), update remap_page().
2387 	 */
2388 	if (folio_test_anon(folio))
2389 		try_to_migrate(folio, ttu_flags);
2390 	else
2391 		try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK);
2392 }
2393 
2394 static void remap_page(struct folio *folio, unsigned long nr)
2395 {
2396 	int i = 0;
2397 
2398 	/* If unmap_folio() uses try_to_migrate() on file, remove this check */
2399 	if (!folio_test_anon(folio))
2400 		return;
2401 	for (;;) {
2402 		remove_migration_ptes(folio, folio, true);
2403 		i += folio_nr_pages(folio);
2404 		if (i >= nr)
2405 			break;
2406 		folio = folio_next(folio);
2407 	}
2408 }
2409 
2410 static void lru_add_page_tail(struct page *head, struct page *tail,
2411 		struct lruvec *lruvec, struct list_head *list)
2412 {
2413 	VM_BUG_ON_PAGE(!PageHead(head), head);
2414 	VM_BUG_ON_PAGE(PageCompound(tail), head);
2415 	VM_BUG_ON_PAGE(PageLRU(tail), head);
2416 	lockdep_assert_held(&lruvec->lru_lock);
2417 
2418 	if (list) {
2419 		/* page reclaim is reclaiming a huge page */
2420 		VM_WARN_ON(PageLRU(head));
2421 		get_page(tail);
2422 		list_add_tail(&tail->lru, list);
2423 	} else {
2424 		/* head is still on lru (and we have it frozen) */
2425 		VM_WARN_ON(!PageLRU(head));
2426 		if (PageUnevictable(tail))
2427 			tail->mlock_count = 0;
2428 		else
2429 			list_add_tail(&tail->lru, &head->lru);
2430 		SetPageLRU(tail);
2431 	}
2432 }
2433 
2434 static void __split_huge_page_tail(struct page *head, int tail,
2435 		struct lruvec *lruvec, struct list_head *list)
2436 {
2437 	struct page *page_tail = head + tail;
2438 
2439 	VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
2440 
2441 	/*
2442 	 * Clone page flags before unfreezing refcount.
2443 	 *
2444 	 * After successful get_page_unless_zero() might follow flags change,
2445 	 * for example lock_page() which set PG_waiters.
2446 	 *
2447 	 * Note that for mapped sub-pages of an anonymous THP,
2448 	 * PG_anon_exclusive has been cleared in unmap_folio() and is stored in
2449 	 * the migration entry instead from where remap_page() will restore it.
2450 	 * We can still have PG_anon_exclusive set on effectively unmapped and
2451 	 * unreferenced sub-pages of an anonymous THP: we can simply drop
2452 	 * PG_anon_exclusive (-> PG_mappedtodisk) for these here.
2453 	 */
2454 	page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
2455 	page_tail->flags |= (head->flags &
2456 			((1L << PG_referenced) |
2457 			 (1L << PG_swapbacked) |
2458 			 (1L << PG_swapcache) |
2459 			 (1L << PG_mlocked) |
2460 			 (1L << PG_uptodate) |
2461 			 (1L << PG_active) |
2462 			 (1L << PG_workingset) |
2463 			 (1L << PG_locked) |
2464 			 (1L << PG_unevictable) |
2465 #ifdef CONFIG_ARCH_USES_PG_ARCH_X
2466 			 (1L << PG_arch_2) |
2467 			 (1L << PG_arch_3) |
2468 #endif
2469 			 (1L << PG_dirty) |
2470 			 LRU_GEN_MASK | LRU_REFS_MASK));
2471 
2472 	/* ->mapping in first and second tail page is replaced by other uses */
2473 	VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
2474 			page_tail);
2475 	page_tail->mapping = head->mapping;
2476 	page_tail->index = head->index + tail;
2477 
2478 	/*
2479 	 * page->private should not be set in tail pages with the exception
2480 	 * of swap cache pages that store the swp_entry_t in tail pages.
2481 	 * Fix up and warn once if private is unexpectedly set.
2482 	 *
2483 	 * What of 32-bit systems, on which folio->_pincount overlays
2484 	 * head[1].private?  No problem: THP_SWAP is not enabled on 32-bit, and
2485 	 * pincount must be 0 for folio_ref_freeze() to have succeeded.
2486 	 */
2487 	if (!folio_test_swapcache(page_folio(head))) {
2488 		VM_WARN_ON_ONCE_PAGE(page_tail->private != 0, page_tail);
2489 		page_tail->private = 0;
2490 	}
2491 
2492 	/* Page flags must be visible before we make the page non-compound. */
2493 	smp_wmb();
2494 
2495 	/*
2496 	 * Clear PageTail before unfreezing page refcount.
2497 	 *
2498 	 * After successful get_page_unless_zero() might follow put_page()
2499 	 * which needs correct compound_head().
2500 	 */
2501 	clear_compound_head(page_tail);
2502 
2503 	/* Finally unfreeze refcount. Additional reference from page cache. */
2504 	page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) ||
2505 					  PageSwapCache(head)));
2506 
2507 	if (page_is_young(head))
2508 		set_page_young(page_tail);
2509 	if (page_is_idle(head))
2510 		set_page_idle(page_tail);
2511 
2512 	page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
2513 
2514 	/*
2515 	 * always add to the tail because some iterators expect new
2516 	 * pages to show after the currently processed elements - e.g.
2517 	 * migrate_pages
2518 	 */
2519 	lru_add_page_tail(head, page_tail, lruvec, list);
2520 }
2521 
2522 static void __split_huge_page(struct page *page, struct list_head *list,
2523 		pgoff_t end)
2524 {
2525 	struct folio *folio = page_folio(page);
2526 	struct page *head = &folio->page;
2527 	struct lruvec *lruvec;
2528 	struct address_space *swap_cache = NULL;
2529 	unsigned long offset = 0;
2530 	unsigned int nr = thp_nr_pages(head);
2531 	int i;
2532 
2533 	/* complete memcg works before add pages to LRU */
2534 	split_page_memcg(head, nr);
2535 
2536 	if (PageAnon(head) && PageSwapCache(head)) {
2537 		swp_entry_t entry = { .val = page_private(head) };
2538 
2539 		offset = swp_offset(entry);
2540 		swap_cache = swap_address_space(entry);
2541 		xa_lock(&swap_cache->i_pages);
2542 	}
2543 
2544 	/* lock lru list/PageCompound, ref frozen by page_ref_freeze */
2545 	lruvec = folio_lruvec_lock(folio);
2546 
2547 	ClearPageHasHWPoisoned(head);
2548 
2549 	for (i = nr - 1; i >= 1; i--) {
2550 		__split_huge_page_tail(head, i, lruvec, list);
2551 		/* Some pages can be beyond EOF: drop them from page cache */
2552 		if (head[i].index >= end) {
2553 			struct folio *tail = page_folio(head + i);
2554 
2555 			if (shmem_mapping(head->mapping))
2556 				shmem_uncharge(head->mapping->host, 1);
2557 			else if (folio_test_clear_dirty(tail))
2558 				folio_account_cleaned(tail,
2559 					inode_to_wb(folio->mapping->host));
2560 			__filemap_remove_folio(tail, NULL);
2561 			folio_put(tail);
2562 		} else if (!PageAnon(page)) {
2563 			__xa_store(&head->mapping->i_pages, head[i].index,
2564 					head + i, 0);
2565 		} else if (swap_cache) {
2566 			__xa_store(&swap_cache->i_pages, offset + i,
2567 					head + i, 0);
2568 		}
2569 	}
2570 
2571 	ClearPageCompound(head);
2572 	unlock_page_lruvec(lruvec);
2573 	/* Caller disabled irqs, so they are still disabled here */
2574 
2575 	split_page_owner(head, nr);
2576 
2577 	/* See comment in __split_huge_page_tail() */
2578 	if (PageAnon(head)) {
2579 		/* Additional pin to swap cache */
2580 		if (PageSwapCache(head)) {
2581 			page_ref_add(head, 2);
2582 			xa_unlock(&swap_cache->i_pages);
2583 		} else {
2584 			page_ref_inc(head);
2585 		}
2586 	} else {
2587 		/* Additional pin to page cache */
2588 		page_ref_add(head, 2);
2589 		xa_unlock(&head->mapping->i_pages);
2590 	}
2591 	local_irq_enable();
2592 
2593 	remap_page(folio, nr);
2594 
2595 	if (PageSwapCache(head)) {
2596 		swp_entry_t entry = { .val = page_private(head) };
2597 
2598 		split_swap_cluster(entry);
2599 	}
2600 
2601 	for (i = 0; i < nr; i++) {
2602 		struct page *subpage = head + i;
2603 		if (subpage == page)
2604 			continue;
2605 		unlock_page(subpage);
2606 
2607 		/*
2608 		 * Subpages may be freed if there wasn't any mapping
2609 		 * like if add_to_swap() is running on a lru page that
2610 		 * had its mapping zapped. And freeing these pages
2611 		 * requires taking the lru_lock so we do the put_page
2612 		 * of the tail pages after the split is complete.
2613 		 */
2614 		free_page_and_swap_cache(subpage);
2615 	}
2616 }
2617 
2618 /* Racy check whether the huge page can be split */
2619 bool can_split_folio(struct folio *folio, int *pextra_pins)
2620 {
2621 	int extra_pins;
2622 
2623 	/* Additional pins from page cache */
2624 	if (folio_test_anon(folio))
2625 		extra_pins = folio_test_swapcache(folio) ?
2626 				folio_nr_pages(folio) : 0;
2627 	else
2628 		extra_pins = folio_nr_pages(folio);
2629 	if (pextra_pins)
2630 		*pextra_pins = extra_pins;
2631 	return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - 1;
2632 }
2633 
2634 /*
2635  * This function splits huge page into normal pages. @page can point to any
2636  * subpage of huge page to split. Split doesn't change the position of @page.
2637  *
2638  * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
2639  * The huge page must be locked.
2640  *
2641  * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
2642  *
2643  * Both head page and tail pages will inherit mapping, flags, and so on from
2644  * the hugepage.
2645  *
2646  * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
2647  * they are not mapped.
2648  *
2649  * Returns 0 if the hugepage is split successfully.
2650  * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
2651  * us.
2652  */
2653 int split_huge_page_to_list(struct page *page, struct list_head *list)
2654 {
2655 	struct folio *folio = page_folio(page);
2656 	struct deferred_split *ds_queue = get_deferred_split_queue(folio);
2657 	XA_STATE(xas, &folio->mapping->i_pages, folio->index);
2658 	struct anon_vma *anon_vma = NULL;
2659 	struct address_space *mapping = NULL;
2660 	int extra_pins, ret;
2661 	pgoff_t end;
2662 	bool is_hzp;
2663 
2664 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
2665 	VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
2666 
2667 	is_hzp = is_huge_zero_page(&folio->page);
2668 	VM_WARN_ON_ONCE_FOLIO(is_hzp, folio);
2669 	if (is_hzp)
2670 		return -EBUSY;
2671 
2672 	if (folio_test_writeback(folio))
2673 		return -EBUSY;
2674 
2675 	if (folio_test_anon(folio)) {
2676 		/*
2677 		 * The caller does not necessarily hold an mmap_lock that would
2678 		 * prevent the anon_vma disappearing so we first we take a
2679 		 * reference to it and then lock the anon_vma for write. This
2680 		 * is similar to folio_lock_anon_vma_read except the write lock
2681 		 * is taken to serialise against parallel split or collapse
2682 		 * operations.
2683 		 */
2684 		anon_vma = folio_get_anon_vma(folio);
2685 		if (!anon_vma) {
2686 			ret = -EBUSY;
2687 			goto out;
2688 		}
2689 		end = -1;
2690 		mapping = NULL;
2691 		anon_vma_lock_write(anon_vma);
2692 	} else {
2693 		gfp_t gfp;
2694 
2695 		mapping = folio->mapping;
2696 
2697 		/* Truncated ? */
2698 		if (!mapping) {
2699 			ret = -EBUSY;
2700 			goto out;
2701 		}
2702 
2703 		gfp = current_gfp_context(mapping_gfp_mask(mapping) &
2704 							GFP_RECLAIM_MASK);
2705 
2706 		if (folio_test_private(folio) &&
2707 				!filemap_release_folio(folio, gfp)) {
2708 			ret = -EBUSY;
2709 			goto out;
2710 		}
2711 
2712 		xas_split_alloc(&xas, folio, folio_order(folio), gfp);
2713 		if (xas_error(&xas)) {
2714 			ret = xas_error(&xas);
2715 			goto out;
2716 		}
2717 
2718 		anon_vma = NULL;
2719 		i_mmap_lock_read(mapping);
2720 
2721 		/*
2722 		 *__split_huge_page() may need to trim off pages beyond EOF:
2723 		 * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
2724 		 * which cannot be nested inside the page tree lock. So note
2725 		 * end now: i_size itself may be changed at any moment, but
2726 		 * folio lock is good enough to serialize the trimming.
2727 		 */
2728 		end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
2729 		if (shmem_mapping(mapping))
2730 			end = shmem_fallocend(mapping->host, end);
2731 	}
2732 
2733 	/*
2734 	 * Racy check if we can split the page, before unmap_folio() will
2735 	 * split PMDs
2736 	 */
2737 	if (!can_split_folio(folio, &extra_pins)) {
2738 		ret = -EAGAIN;
2739 		goto out_unlock;
2740 	}
2741 
2742 	unmap_folio(folio);
2743 
2744 	/* block interrupt reentry in xa_lock and spinlock */
2745 	local_irq_disable();
2746 	if (mapping) {
2747 		/*
2748 		 * Check if the folio is present in page cache.
2749 		 * We assume all tail are present too, if folio is there.
2750 		 */
2751 		xas_lock(&xas);
2752 		xas_reset(&xas);
2753 		if (xas_load(&xas) != folio)
2754 			goto fail;
2755 	}
2756 
2757 	/* Prevent deferred_split_scan() touching ->_refcount */
2758 	spin_lock(&ds_queue->split_queue_lock);
2759 	if (folio_ref_freeze(folio, 1 + extra_pins)) {
2760 		if (!list_empty(&folio->_deferred_list)) {
2761 			ds_queue->split_queue_len--;
2762 			list_del(&folio->_deferred_list);
2763 		}
2764 		spin_unlock(&ds_queue->split_queue_lock);
2765 		if (mapping) {
2766 			int nr = folio_nr_pages(folio);
2767 
2768 			xas_split(&xas, folio, folio_order(folio));
2769 			if (folio_test_swapbacked(folio)) {
2770 				__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS,
2771 							-nr);
2772 			} else {
2773 				__lruvec_stat_mod_folio(folio, NR_FILE_THPS,
2774 							-nr);
2775 				filemap_nr_thps_dec(mapping);
2776 			}
2777 		}
2778 
2779 		__split_huge_page(page, list, end);
2780 		ret = 0;
2781 	} else {
2782 		spin_unlock(&ds_queue->split_queue_lock);
2783 fail:
2784 		if (mapping)
2785 			xas_unlock(&xas);
2786 		local_irq_enable();
2787 		remap_page(folio, folio_nr_pages(folio));
2788 		ret = -EAGAIN;
2789 	}
2790 
2791 out_unlock:
2792 	if (anon_vma) {
2793 		anon_vma_unlock_write(anon_vma);
2794 		put_anon_vma(anon_vma);
2795 	}
2796 	if (mapping)
2797 		i_mmap_unlock_read(mapping);
2798 out:
2799 	xas_destroy(&xas);
2800 	count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
2801 	return ret;
2802 }
2803 
2804 void free_transhuge_page(struct page *page)
2805 {
2806 	struct folio *folio = (struct folio *)page;
2807 	struct deferred_split *ds_queue = get_deferred_split_queue(folio);
2808 	unsigned long flags;
2809 
2810 	spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
2811 	if (!list_empty(&folio->_deferred_list)) {
2812 		ds_queue->split_queue_len--;
2813 		list_del(&folio->_deferred_list);
2814 	}
2815 	spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
2816 	free_compound_page(page);
2817 }
2818 
2819 void deferred_split_folio(struct folio *folio)
2820 {
2821 	struct deferred_split *ds_queue = get_deferred_split_queue(folio);
2822 #ifdef CONFIG_MEMCG
2823 	struct mem_cgroup *memcg = folio_memcg(folio);
2824 #endif
2825 	unsigned long flags;
2826 
2827 	VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
2828 
2829 	/*
2830 	 * The try_to_unmap() in page reclaim path might reach here too,
2831 	 * this may cause a race condition to corrupt deferred split queue.
2832 	 * And, if page reclaim is already handling the same folio, it is
2833 	 * unnecessary to handle it again in shrinker.
2834 	 *
2835 	 * Check the swapcache flag to determine if the folio is being
2836 	 * handled by page reclaim since THP swap would add the folio into
2837 	 * swap cache before calling try_to_unmap().
2838 	 */
2839 	if (folio_test_swapcache(folio))
2840 		return;
2841 
2842 	if (!list_empty(&folio->_deferred_list))
2843 		return;
2844 
2845 	spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
2846 	if (list_empty(&folio->_deferred_list)) {
2847 		count_vm_event(THP_DEFERRED_SPLIT_PAGE);
2848 		list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
2849 		ds_queue->split_queue_len++;
2850 #ifdef CONFIG_MEMCG
2851 		if (memcg)
2852 			set_shrinker_bit(memcg, folio_nid(folio),
2853 					 deferred_split_shrinker.id);
2854 #endif
2855 	}
2856 	spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
2857 }
2858 
2859 static unsigned long deferred_split_count(struct shrinker *shrink,
2860 		struct shrink_control *sc)
2861 {
2862 	struct pglist_data *pgdata = NODE_DATA(sc->nid);
2863 	struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
2864 
2865 #ifdef CONFIG_MEMCG
2866 	if (sc->memcg)
2867 		ds_queue = &sc->memcg->deferred_split_queue;
2868 #endif
2869 	return READ_ONCE(ds_queue->split_queue_len);
2870 }
2871 
2872 static unsigned long deferred_split_scan(struct shrinker *shrink,
2873 		struct shrink_control *sc)
2874 {
2875 	struct pglist_data *pgdata = NODE_DATA(sc->nid);
2876 	struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
2877 	unsigned long flags;
2878 	LIST_HEAD(list);
2879 	struct folio *folio, *next;
2880 	int split = 0;
2881 
2882 #ifdef CONFIG_MEMCG
2883 	if (sc->memcg)
2884 		ds_queue = &sc->memcg->deferred_split_queue;
2885 #endif
2886 
2887 	spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
2888 	/* Take pin on all head pages to avoid freeing them under us */
2889 	list_for_each_entry_safe(folio, next, &ds_queue->split_queue,
2890 							_deferred_list) {
2891 		if (folio_try_get(folio)) {
2892 			list_move(&folio->_deferred_list, &list);
2893 		} else {
2894 			/* We lost race with folio_put() */
2895 			list_del_init(&folio->_deferred_list);
2896 			ds_queue->split_queue_len--;
2897 		}
2898 		if (!--sc->nr_to_scan)
2899 			break;
2900 	}
2901 	spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
2902 
2903 	list_for_each_entry_safe(folio, next, &list, _deferred_list) {
2904 		if (!folio_trylock(folio))
2905 			goto next;
2906 		/* split_huge_page() removes page from list on success */
2907 		if (!split_folio(folio))
2908 			split++;
2909 		folio_unlock(folio);
2910 next:
2911 		folio_put(folio);
2912 	}
2913 
2914 	spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
2915 	list_splice_tail(&list, &ds_queue->split_queue);
2916 	spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
2917 
2918 	/*
2919 	 * Stop shrinker if we didn't split any page, but the queue is empty.
2920 	 * This can happen if pages were freed under us.
2921 	 */
2922 	if (!split && list_empty(&ds_queue->split_queue))
2923 		return SHRINK_STOP;
2924 	return split;
2925 }
2926 
2927 static struct shrinker deferred_split_shrinker = {
2928 	.count_objects = deferred_split_count,
2929 	.scan_objects = deferred_split_scan,
2930 	.seeks = DEFAULT_SEEKS,
2931 	.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE |
2932 		 SHRINKER_NONSLAB,
2933 };
2934 
2935 #ifdef CONFIG_DEBUG_FS
2936 static void split_huge_pages_all(void)
2937 {
2938 	struct zone *zone;
2939 	struct page *page;
2940 	struct folio *folio;
2941 	unsigned long pfn, max_zone_pfn;
2942 	unsigned long total = 0, split = 0;
2943 
2944 	pr_debug("Split all THPs\n");
2945 	for_each_zone(zone) {
2946 		if (!managed_zone(zone))
2947 			continue;
2948 		max_zone_pfn = zone_end_pfn(zone);
2949 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
2950 			int nr_pages;
2951 
2952 			page = pfn_to_online_page(pfn);
2953 			if (!page || PageTail(page))
2954 				continue;
2955 			folio = page_folio(page);
2956 			if (!folio_try_get(folio))
2957 				continue;
2958 
2959 			if (unlikely(page_folio(page) != folio))
2960 				goto next;
2961 
2962 			if (zone != folio_zone(folio))
2963 				goto next;
2964 
2965 			if (!folio_test_large(folio)
2966 				|| folio_test_hugetlb(folio)
2967 				|| !folio_test_lru(folio))
2968 				goto next;
2969 
2970 			total++;
2971 			folio_lock(folio);
2972 			nr_pages = folio_nr_pages(folio);
2973 			if (!split_folio(folio))
2974 				split++;
2975 			pfn += nr_pages - 1;
2976 			folio_unlock(folio);
2977 next:
2978 			folio_put(folio);
2979 			cond_resched();
2980 		}
2981 	}
2982 
2983 	pr_debug("%lu of %lu THP split\n", split, total);
2984 }
2985 
2986 static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma)
2987 {
2988 	return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) ||
2989 		    is_vm_hugetlb_page(vma);
2990 }
2991 
2992 static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
2993 				unsigned long vaddr_end)
2994 {
2995 	int ret = 0;
2996 	struct task_struct *task;
2997 	struct mm_struct *mm;
2998 	unsigned long total = 0, split = 0;
2999 	unsigned long addr;
3000 
3001 	vaddr_start &= PAGE_MASK;
3002 	vaddr_end &= PAGE_MASK;
3003 
3004 	/* Find the task_struct from pid */
3005 	rcu_read_lock();
3006 	task = find_task_by_vpid(pid);
3007 	if (!task) {
3008 		rcu_read_unlock();
3009 		ret = -ESRCH;
3010 		goto out;
3011 	}
3012 	get_task_struct(task);
3013 	rcu_read_unlock();
3014 
3015 	/* Find the mm_struct */
3016 	mm = get_task_mm(task);
3017 	put_task_struct(task);
3018 
3019 	if (!mm) {
3020 		ret = -EINVAL;
3021 		goto out;
3022 	}
3023 
3024 	pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n",
3025 		 pid, vaddr_start, vaddr_end);
3026 
3027 	mmap_read_lock(mm);
3028 	/*
3029 	 * always increase addr by PAGE_SIZE, since we could have a PTE page
3030 	 * table filled with PTE-mapped THPs, each of which is distinct.
3031 	 */
3032 	for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) {
3033 		struct vm_area_struct *vma = vma_lookup(mm, addr);
3034 		struct page *page;
3035 
3036 		if (!vma)
3037 			break;
3038 
3039 		/* skip special VMA and hugetlb VMA */
3040 		if (vma_not_suitable_for_thp_split(vma)) {
3041 			addr = vma->vm_end;
3042 			continue;
3043 		}
3044 
3045 		/* FOLL_DUMP to ignore special (like zero) pages */
3046 		page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
3047 
3048 		if (IS_ERR_OR_NULL(page))
3049 			continue;
3050 
3051 		if (!is_transparent_hugepage(page))
3052 			goto next;
3053 
3054 		total++;
3055 		if (!can_split_folio(page_folio(page), NULL))
3056 			goto next;
3057 
3058 		if (!trylock_page(page))
3059 			goto next;
3060 
3061 		if (!split_huge_page(page))
3062 			split++;
3063 
3064 		unlock_page(page);
3065 next:
3066 		put_page(page);
3067 		cond_resched();
3068 	}
3069 	mmap_read_unlock(mm);
3070 	mmput(mm);
3071 
3072 	pr_debug("%lu of %lu THP split\n", split, total);
3073 
3074 out:
3075 	return ret;
3076 }
3077 
3078 static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
3079 				pgoff_t off_end)
3080 {
3081 	struct filename *file;
3082 	struct file *candidate;
3083 	struct address_space *mapping;
3084 	int ret = -EINVAL;
3085 	pgoff_t index;
3086 	int nr_pages = 1;
3087 	unsigned long total = 0, split = 0;
3088 
3089 	file = getname_kernel(file_path);
3090 	if (IS_ERR(file))
3091 		return ret;
3092 
3093 	candidate = file_open_name(file, O_RDONLY, 0);
3094 	if (IS_ERR(candidate))
3095 		goto out;
3096 
3097 	pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n",
3098 		 file_path, off_start, off_end);
3099 
3100 	mapping = candidate->f_mapping;
3101 
3102 	for (index = off_start; index < off_end; index += nr_pages) {
3103 		struct folio *folio = __filemap_get_folio(mapping, index,
3104 						FGP_ENTRY, 0);
3105 
3106 		nr_pages = 1;
3107 		if (xa_is_value(folio) || !folio)
3108 			continue;
3109 
3110 		if (!folio_test_large(folio))
3111 			goto next;
3112 
3113 		total++;
3114 		nr_pages = folio_nr_pages(folio);
3115 
3116 		if (!folio_trylock(folio))
3117 			goto next;
3118 
3119 		if (!split_folio(folio))
3120 			split++;
3121 
3122 		folio_unlock(folio);
3123 next:
3124 		folio_put(folio);
3125 		cond_resched();
3126 	}
3127 
3128 	filp_close(candidate, NULL);
3129 	ret = 0;
3130 
3131 	pr_debug("%lu of %lu file-backed THP split\n", split, total);
3132 out:
3133 	putname(file);
3134 	return ret;
3135 }
3136 
3137 #define MAX_INPUT_BUF_SZ 255
3138 
3139 static ssize_t split_huge_pages_write(struct file *file, const char __user *buf,
3140 				size_t count, loff_t *ppops)
3141 {
3142 	static DEFINE_MUTEX(split_debug_mutex);
3143 	ssize_t ret;
3144 	/* hold pid, start_vaddr, end_vaddr or file_path, off_start, off_end */
3145 	char input_buf[MAX_INPUT_BUF_SZ];
3146 	int pid;
3147 	unsigned long vaddr_start, vaddr_end;
3148 
3149 	ret = mutex_lock_interruptible(&split_debug_mutex);
3150 	if (ret)
3151 		return ret;
3152 
3153 	ret = -EFAULT;
3154 
3155 	memset(input_buf, 0, MAX_INPUT_BUF_SZ);
3156 	if (copy_from_user(input_buf, buf, min_t(size_t, count, MAX_INPUT_BUF_SZ)))
3157 		goto out;
3158 
3159 	input_buf[MAX_INPUT_BUF_SZ - 1] = '\0';
3160 
3161 	if (input_buf[0] == '/') {
3162 		char *tok;
3163 		char *buf = input_buf;
3164 		char file_path[MAX_INPUT_BUF_SZ];
3165 		pgoff_t off_start = 0, off_end = 0;
3166 		size_t input_len = strlen(input_buf);
3167 
3168 		tok = strsep(&buf, ",");
3169 		if (tok) {
3170 			strcpy(file_path, tok);
3171 		} else {
3172 			ret = -EINVAL;
3173 			goto out;
3174 		}
3175 
3176 		ret = sscanf(buf, "0x%lx,0x%lx", &off_start, &off_end);
3177 		if (ret != 2) {
3178 			ret = -EINVAL;
3179 			goto out;
3180 		}
3181 		ret = split_huge_pages_in_file(file_path, off_start, off_end);
3182 		if (!ret)
3183 			ret = input_len;
3184 
3185 		goto out;
3186 	}
3187 
3188 	ret = sscanf(input_buf, "%d,0x%lx,0x%lx", &pid, &vaddr_start, &vaddr_end);
3189 	if (ret == 1 && pid == 1) {
3190 		split_huge_pages_all();
3191 		ret = strlen(input_buf);
3192 		goto out;
3193 	} else if (ret != 3) {
3194 		ret = -EINVAL;
3195 		goto out;
3196 	}
3197 
3198 	ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end);
3199 	if (!ret)
3200 		ret = strlen(input_buf);
3201 out:
3202 	mutex_unlock(&split_debug_mutex);
3203 	return ret;
3204 
3205 }
3206 
3207 static const struct file_operations split_huge_pages_fops = {
3208 	.owner	 = THIS_MODULE,
3209 	.write	 = split_huge_pages_write,
3210 	.llseek  = no_llseek,
3211 };
3212 
3213 static int __init split_huge_pages_debugfs(void)
3214 {
3215 	debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
3216 			    &split_huge_pages_fops);
3217 	return 0;
3218 }
3219 late_initcall(split_huge_pages_debugfs);
3220 #endif
3221 
3222 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
3223 int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
3224 		struct page *page)
3225 {
3226 	struct vm_area_struct *vma = pvmw->vma;
3227 	struct mm_struct *mm = vma->vm_mm;
3228 	unsigned long address = pvmw->address;
3229 	bool anon_exclusive;
3230 	pmd_t pmdval;
3231 	swp_entry_t entry;
3232 	pmd_t pmdswp;
3233 
3234 	if (!(pvmw->pmd && !pvmw->pte))
3235 		return 0;
3236 
3237 	flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
3238 	pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
3239 
3240 	/* See page_try_share_anon_rmap(): invalidate PMD first. */
3241 	anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
3242 	if (anon_exclusive && page_try_share_anon_rmap(page)) {
3243 		set_pmd_at(mm, address, pvmw->pmd, pmdval);
3244 		return -EBUSY;
3245 	}
3246 
3247 	if (pmd_dirty(pmdval))
3248 		set_page_dirty(page);
3249 	if (pmd_write(pmdval))
3250 		entry = make_writable_migration_entry(page_to_pfn(page));
3251 	else if (anon_exclusive)
3252 		entry = make_readable_exclusive_migration_entry(page_to_pfn(page));
3253 	else
3254 		entry = make_readable_migration_entry(page_to_pfn(page));
3255 	if (pmd_young(pmdval))
3256 		entry = make_migration_entry_young(entry);
3257 	if (pmd_dirty(pmdval))
3258 		entry = make_migration_entry_dirty(entry);
3259 	pmdswp = swp_entry_to_pmd(entry);
3260 	if (pmd_soft_dirty(pmdval))
3261 		pmdswp = pmd_swp_mksoft_dirty(pmdswp);
3262 	set_pmd_at(mm, address, pvmw->pmd, pmdswp);
3263 	page_remove_rmap(page, vma, true);
3264 	put_page(page);
3265 	trace_set_migration_pmd(address, pmd_val(pmdswp));
3266 
3267 	return 0;
3268 }
3269 
3270 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
3271 {
3272 	struct vm_area_struct *vma = pvmw->vma;
3273 	struct mm_struct *mm = vma->vm_mm;
3274 	unsigned long address = pvmw->address;
3275 	unsigned long haddr = address & HPAGE_PMD_MASK;
3276 	pmd_t pmde;
3277 	swp_entry_t entry;
3278 
3279 	if (!(pvmw->pmd && !pvmw->pte))
3280 		return;
3281 
3282 	entry = pmd_to_swp_entry(*pvmw->pmd);
3283 	get_page(new);
3284 	pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot));
3285 	if (pmd_swp_soft_dirty(*pvmw->pmd))
3286 		pmde = pmd_mksoft_dirty(pmde);
3287 	if (is_writable_migration_entry(entry))
3288 		pmde = maybe_pmd_mkwrite(pmde, vma);
3289 	if (pmd_swp_uffd_wp(*pvmw->pmd))
3290 		pmde = pmd_mkuffd_wp(pmde);
3291 	if (!is_migration_entry_young(entry))
3292 		pmde = pmd_mkold(pmde);
3293 	/* NOTE: this may contain setting soft-dirty on some archs */
3294 	if (PageDirty(new) && is_migration_entry_dirty(entry))
3295 		pmde = pmd_mkdirty(pmde);
3296 
3297 	if (PageAnon(new)) {
3298 		rmap_t rmap_flags = RMAP_COMPOUND;
3299 
3300 		if (!is_readable_migration_entry(entry))
3301 			rmap_flags |= RMAP_EXCLUSIVE;
3302 
3303 		page_add_anon_rmap(new, vma, haddr, rmap_flags);
3304 	} else {
3305 		page_add_file_rmap(new, vma, true);
3306 	}
3307 	VM_BUG_ON(pmd_write(pmde) && PageAnon(new) && !PageAnonExclusive(new));
3308 	set_pmd_at(mm, haddr, pvmw->pmd, pmde);
3309 
3310 	/* No need to invalidate - it was non-present before */
3311 	update_mmu_cache_pmd(vma, address, pvmw->pmd);
3312 	trace_remove_migration_pmd(address, pmd_val(pmde));
3313 }
3314 #endif
3315