xref: /linux/mm/huge_memory.c (revision 6bc0987d0b508b3768808efafa1e90041713526b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Copyright (C) 2009  Red Hat, Inc.
4  */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/sched/mm.h>
11 #include <linux/sched/numa_balancing.h>
12 #include <linux/highmem.h>
13 #include <linux/hugetlb.h>
14 #include <linux/mmu_notifier.h>
15 #include <linux/rmap.h>
16 #include <linux/swap.h>
17 #include <linux/shrinker.h>
18 #include <linux/mm_inline.h>
19 #include <linux/swapops.h>
20 #include <linux/backing-dev.h>
21 #include <linux/dax.h>
22 #include <linux/mm_types.h>
23 #include <linux/khugepaged.h>
24 #include <linux/freezer.h>
25 #include <linux/mman.h>
26 #include <linux/memremap.h>
27 #include <linux/pagemap.h>
28 #include <linux/debugfs.h>
29 #include <linux/migrate.h>
30 #include <linux/hashtable.h>
31 #include <linux/userfaultfd_k.h>
32 #include <linux/page_idle.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/oom.h>
35 #include <linux/numa.h>
36 #include <linux/page_owner.h>
37 #include <linux/sched/sysctl.h>
38 #include <linux/memory-tiers.h>
39 #include <linux/compat.h>
40 #include <linux/pgalloc.h>
41 #include <linux/pgalloc_tag.h>
42 #include <linux/pagewalk.h>
43 
44 #include <asm/tlb.h>
45 #include "internal.h"
46 #include "swap.h"
47 
48 #define CREATE_TRACE_POINTS
49 #include <trace/events/thp.h>
50 
51 /*
52  * By default, transparent hugepage support is disabled in order to avoid
53  * risking an increased memory footprint for applications that are not
54  * guaranteed to benefit from it. When transparent hugepage support is
55  * enabled, it is for all mappings, and khugepaged scans all mappings.
56  * Defrag is invoked by khugepaged hugepage allocations and by page faults
57  * for all hugepage allocations.
58  */
59 unsigned long transparent_hugepage_flags __read_mostly =
60 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
61 	(1<<TRANSPARENT_HUGEPAGE_FLAG)|
62 #endif
63 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
64 	(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
65 #endif
66 	(1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
67 	(1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
68 	(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
69 
70 static struct shrinker *deferred_split_shrinker;
71 static unsigned long deferred_split_count(struct shrinker *shrink,
72 					  struct shrink_control *sc);
73 static unsigned long deferred_split_scan(struct shrinker *shrink,
74 					 struct shrink_control *sc);
75 static bool split_underused_thp = true;
76 
77 static atomic_t huge_zero_refcount;
78 struct folio *huge_zero_folio __read_mostly;
79 unsigned long huge_zero_pfn __read_mostly = ~0UL;
80 unsigned long huge_anon_orders_always __read_mostly;
81 unsigned long huge_anon_orders_madvise __read_mostly;
82 unsigned long huge_anon_orders_inherit __read_mostly;
83 static bool anon_orders_configured __initdata;
84 
85 static inline bool file_thp_enabled(struct vm_area_struct *vma)
86 {
87 	struct inode *inode;
88 
89 	if (!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS))
90 		return false;
91 
92 	if (!vma->vm_file)
93 		return false;
94 
95 	inode = file_inode(vma->vm_file);
96 
97 	if (IS_ANON_FILE(inode))
98 		return false;
99 
100 	return !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
101 }
102 
103 unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
104 					 vm_flags_t vm_flags,
105 					 enum tva_type type,
106 					 unsigned long orders)
107 {
108 	const bool smaps = type == TVA_SMAPS;
109 	const bool in_pf = type == TVA_PAGEFAULT;
110 	const bool forced_collapse = type == TVA_FORCED_COLLAPSE;
111 	unsigned long supported_orders;
112 
113 	/* Check the intersection of requested and supported orders. */
114 	if (vma_is_anonymous(vma))
115 		supported_orders = THP_ORDERS_ALL_ANON;
116 	else if (vma_is_special_huge(vma))
117 		supported_orders = THP_ORDERS_ALL_SPECIAL;
118 	else
119 		supported_orders = THP_ORDERS_ALL_FILE_DEFAULT;
120 
121 	orders &= supported_orders;
122 	if (!orders)
123 		return 0;
124 
125 	if (!vma->vm_mm)		/* vdso */
126 		return 0;
127 
128 	if (thp_disabled_by_hw() || vma_thp_disabled(vma, vm_flags, forced_collapse))
129 		return 0;
130 
131 	/* khugepaged doesn't collapse DAX vma, but page fault is fine. */
132 	if (vma_is_dax(vma))
133 		return in_pf ? orders : 0;
134 
135 	/*
136 	 * khugepaged special VMA and hugetlb VMA.
137 	 * Must be checked after dax since some dax mappings may have
138 	 * VM_MIXEDMAP set.
139 	 */
140 	if (!in_pf && !smaps && (vm_flags & VM_NO_KHUGEPAGED))
141 		return 0;
142 
143 	/*
144 	 * Check alignment for file vma and size for both file and anon vma by
145 	 * filtering out the unsuitable orders.
146 	 *
147 	 * Skip the check for page fault. Huge fault does the check in fault
148 	 * handlers.
149 	 */
150 	if (!in_pf) {
151 		int order = highest_order(orders);
152 		unsigned long addr;
153 
154 		while (orders) {
155 			addr = vma->vm_end - (PAGE_SIZE << order);
156 			if (thp_vma_suitable_order(vma, addr, order))
157 				break;
158 			order = next_order(&orders, order);
159 		}
160 
161 		if (!orders)
162 			return 0;
163 	}
164 
165 	/*
166 	 * Enabled via shmem mount options or sysfs settings.
167 	 * Must be done before hugepage flags check since shmem has its
168 	 * own flags.
169 	 */
170 	if (!in_pf && shmem_file(vma->vm_file))
171 		return orders & shmem_allowable_huge_orders(file_inode(vma->vm_file),
172 						   vma, vma->vm_pgoff, 0,
173 						   forced_collapse);
174 
175 	if (!vma_is_anonymous(vma)) {
176 		/*
177 		 * Enforce THP collapse requirements as necessary. Anonymous vmas
178 		 * were already handled in thp_vma_allowable_orders().
179 		 */
180 		if (!forced_collapse &&
181 		    (!hugepage_global_enabled() || (!(vm_flags & VM_HUGEPAGE) &&
182 						    !hugepage_global_always())))
183 			return 0;
184 
185 		/*
186 		 * Trust that ->huge_fault() handlers know what they are doing
187 		 * in fault path.
188 		 */
189 		if (((in_pf || smaps)) && vma->vm_ops->huge_fault)
190 			return orders;
191 		/* Only regular file is valid in collapse path */
192 		if (((!in_pf || smaps)) && file_thp_enabled(vma))
193 			return orders;
194 		return 0;
195 	}
196 
197 	if (vma_is_temporary_stack(vma))
198 		return 0;
199 
200 	/*
201 	 * THPeligible bit of smaps should show 1 for proper VMAs even
202 	 * though anon_vma is not initialized yet.
203 	 *
204 	 * Allow page fault since anon_vma may be not initialized until
205 	 * the first page fault.
206 	 */
207 	if (!vma->anon_vma)
208 		return (smaps || in_pf) ? orders : 0;
209 
210 	return orders;
211 }
212 
213 static bool get_huge_zero_folio(void)
214 {
215 	struct folio *zero_folio;
216 retry:
217 	if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
218 		return true;
219 
220 	zero_folio = folio_alloc((GFP_TRANSHUGE | __GFP_ZERO | __GFP_ZEROTAGS) &
221 				 ~__GFP_MOVABLE,
222 			HPAGE_PMD_ORDER);
223 	if (!zero_folio) {
224 		count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
225 		return false;
226 	}
227 	/* Ensure zero folio won't have large_rmappable flag set. */
228 	folio_clear_large_rmappable(zero_folio);
229 	preempt_disable();
230 	if (cmpxchg(&huge_zero_folio, NULL, zero_folio)) {
231 		preempt_enable();
232 		folio_put(zero_folio);
233 		goto retry;
234 	}
235 	WRITE_ONCE(huge_zero_pfn, folio_pfn(zero_folio));
236 
237 	/* We take additional reference here. It will be put back by shrinker */
238 	atomic_set(&huge_zero_refcount, 2);
239 	preempt_enable();
240 	count_vm_event(THP_ZERO_PAGE_ALLOC);
241 	return true;
242 }
243 
244 static void put_huge_zero_folio(void)
245 {
246 	/*
247 	 * Counter should never go to zero here. Only shrinker can put
248 	 * last reference.
249 	 */
250 	BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
251 }
252 
253 struct folio *mm_get_huge_zero_folio(struct mm_struct *mm)
254 {
255 	if (IS_ENABLED(CONFIG_PERSISTENT_HUGE_ZERO_FOLIO))
256 		return huge_zero_folio;
257 
258 	if (mm_flags_test(MMF_HUGE_ZERO_FOLIO, mm))
259 		return READ_ONCE(huge_zero_folio);
260 
261 	if (!get_huge_zero_folio())
262 		return NULL;
263 
264 	if (mm_flags_test_and_set(MMF_HUGE_ZERO_FOLIO, mm))
265 		put_huge_zero_folio();
266 
267 	return READ_ONCE(huge_zero_folio);
268 }
269 
270 void mm_put_huge_zero_folio(struct mm_struct *mm)
271 {
272 	if (IS_ENABLED(CONFIG_PERSISTENT_HUGE_ZERO_FOLIO))
273 		return;
274 
275 	if (mm_flags_test(MMF_HUGE_ZERO_FOLIO, mm))
276 		put_huge_zero_folio();
277 }
278 
279 static unsigned long shrink_huge_zero_folio_count(struct shrinker *shrink,
280 						  struct shrink_control *sc)
281 {
282 	/* we can free zero page only if last reference remains */
283 	return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
284 }
285 
286 static unsigned long shrink_huge_zero_folio_scan(struct shrinker *shrink,
287 						 struct shrink_control *sc)
288 {
289 	if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
290 		struct folio *zero_folio = xchg(&huge_zero_folio, NULL);
291 		BUG_ON(zero_folio == NULL);
292 		WRITE_ONCE(huge_zero_pfn, ~0UL);
293 		folio_put(zero_folio);
294 		return HPAGE_PMD_NR;
295 	}
296 
297 	return 0;
298 }
299 
300 static struct shrinker *huge_zero_folio_shrinker;
301 
302 #ifdef CONFIG_SYSFS
303 static ssize_t enabled_show(struct kobject *kobj,
304 			    struct kobj_attribute *attr, char *buf)
305 {
306 	const char *output;
307 
308 	if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags))
309 		output = "[always] madvise never";
310 	else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
311 			  &transparent_hugepage_flags))
312 		output = "always [madvise] never";
313 	else
314 		output = "always madvise [never]";
315 
316 	return sysfs_emit(buf, "%s\n", output);
317 }
318 
319 enum anon_enabled_mode {
320 	ANON_ENABLED_ALWAYS	= 0,
321 	ANON_ENABLED_INHERIT	= 1,
322 	ANON_ENABLED_MADVISE	= 2,
323 	ANON_ENABLED_NEVER	= 3,
324 };
325 
326 static const char * const anon_enabled_mode_strings[] = {
327 	[ANON_ENABLED_ALWAYS]	= "always",
328 	[ANON_ENABLED_INHERIT]	= "inherit",
329 	[ANON_ENABLED_MADVISE]	= "madvise",
330 	[ANON_ENABLED_NEVER]	= "never",
331 };
332 
333 enum global_enabled_mode {
334 	GLOBAL_ENABLED_ALWAYS	= 0,
335 	GLOBAL_ENABLED_MADVISE	= 1,
336 	GLOBAL_ENABLED_NEVER	= 2,
337 };
338 
339 static const char * const global_enabled_mode_strings[] = {
340 	[GLOBAL_ENABLED_ALWAYS]		= "always",
341 	[GLOBAL_ENABLED_MADVISE]	= "madvise",
342 	[GLOBAL_ENABLED_NEVER]		= "never",
343 };
344 
345 static bool set_global_enabled_mode(enum global_enabled_mode mode)
346 {
347 	static const unsigned long thp_flags[] = {
348 		TRANSPARENT_HUGEPAGE_FLAG,
349 		TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
350 	};
351 	enum global_enabled_mode m;
352 	bool changed = false;
353 
354 	for (m = 0; m < ARRAY_SIZE(thp_flags); m++) {
355 		if (m == mode)
356 			changed |= !test_and_set_bit(thp_flags[m],
357 						     &transparent_hugepage_flags);
358 		else
359 			changed |= test_and_clear_bit(thp_flags[m],
360 						      &transparent_hugepage_flags);
361 	}
362 
363 	return changed;
364 }
365 
366 static ssize_t enabled_store(struct kobject *kobj,
367 			     struct kobj_attribute *attr,
368 			     const char *buf, size_t count)
369 {
370 	int mode;
371 
372 	mode = sysfs_match_string(global_enabled_mode_strings, buf);
373 	if (mode < 0)
374 		return -EINVAL;
375 
376 	if (set_global_enabled_mode(mode)) {
377 		int err = start_stop_khugepaged();
378 
379 		if (err)
380 			return err;
381 	} else {
382 		/*
383 		 * Recalculate watermarks even when the mode didn't
384 		 * change, as the previous code always called
385 		 * start_stop_khugepaged() which does this internally.
386 		 */
387 		set_recommended_min_free_kbytes();
388 	}
389 	return count;
390 }
391 
392 static struct kobj_attribute enabled_attr = __ATTR_RW(enabled);
393 
394 ssize_t single_hugepage_flag_show(struct kobject *kobj,
395 				  struct kobj_attribute *attr, char *buf,
396 				  enum transparent_hugepage_flag flag)
397 {
398 	return sysfs_emit(buf, "%d\n",
399 			  !!test_bit(flag, &transparent_hugepage_flags));
400 }
401 
402 ssize_t single_hugepage_flag_store(struct kobject *kobj,
403 				 struct kobj_attribute *attr,
404 				 const char *buf, size_t count,
405 				 enum transparent_hugepage_flag flag)
406 {
407 	unsigned long value;
408 	int ret;
409 
410 	ret = kstrtoul(buf, 10, &value);
411 	if (ret < 0)
412 		return ret;
413 	if (value > 1)
414 		return -EINVAL;
415 
416 	if (value)
417 		set_bit(flag, &transparent_hugepage_flags);
418 	else
419 		clear_bit(flag, &transparent_hugepage_flags);
420 
421 	return count;
422 }
423 
424 static ssize_t defrag_show(struct kobject *kobj,
425 			   struct kobj_attribute *attr, char *buf)
426 {
427 	const char *output;
428 
429 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
430 		     &transparent_hugepage_flags))
431 		output = "[always] defer defer+madvise madvise never";
432 	else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
433 			  &transparent_hugepage_flags))
434 		output = "always [defer] defer+madvise madvise never";
435 	else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
436 			  &transparent_hugepage_flags))
437 		output = "always defer [defer+madvise] madvise never";
438 	else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
439 			  &transparent_hugepage_flags))
440 		output = "always defer defer+madvise [madvise] never";
441 	else
442 		output = "always defer defer+madvise madvise [never]";
443 
444 	return sysfs_emit(buf, "%s\n", output);
445 }
446 
447 static ssize_t defrag_store(struct kobject *kobj,
448 			    struct kobj_attribute *attr,
449 			    const char *buf, size_t count)
450 {
451 	if (sysfs_streq(buf, "always")) {
452 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
453 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
454 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
455 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
456 	} else if (sysfs_streq(buf, "defer+madvise")) {
457 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
458 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
459 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
460 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
461 	} else if (sysfs_streq(buf, "defer")) {
462 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
463 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
464 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
465 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
466 	} else if (sysfs_streq(buf, "madvise")) {
467 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
468 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
469 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
470 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
471 	} else if (sysfs_streq(buf, "never")) {
472 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
473 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
474 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
475 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
476 	} else
477 		return -EINVAL;
478 
479 	return count;
480 }
481 static struct kobj_attribute defrag_attr = __ATTR_RW(defrag);
482 
483 static ssize_t use_zero_page_show(struct kobject *kobj,
484 				  struct kobj_attribute *attr, char *buf)
485 {
486 	return single_hugepage_flag_show(kobj, attr, buf,
487 					 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
488 }
489 static ssize_t use_zero_page_store(struct kobject *kobj,
490 		struct kobj_attribute *attr, const char *buf, size_t count)
491 {
492 	return single_hugepage_flag_store(kobj, attr, buf, count,
493 				 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
494 }
495 static struct kobj_attribute use_zero_page_attr = __ATTR_RW(use_zero_page);
496 
497 static ssize_t hpage_pmd_size_show(struct kobject *kobj,
498 				   struct kobj_attribute *attr, char *buf)
499 {
500 	return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE);
501 }
502 static struct kobj_attribute hpage_pmd_size_attr =
503 	__ATTR_RO(hpage_pmd_size);
504 
505 static ssize_t split_underused_thp_show(struct kobject *kobj,
506 			    struct kobj_attribute *attr, char *buf)
507 {
508 	return sysfs_emit(buf, "%d\n", split_underused_thp);
509 }
510 
511 static ssize_t split_underused_thp_store(struct kobject *kobj,
512 			     struct kobj_attribute *attr,
513 			     const char *buf, size_t count)
514 {
515 	int err = kstrtobool(buf, &split_underused_thp);
516 
517 	if (err < 0)
518 		return err;
519 
520 	return count;
521 }
522 
523 static struct kobj_attribute split_underused_thp_attr = __ATTR(
524 	shrink_underused, 0644, split_underused_thp_show, split_underused_thp_store);
525 
526 static struct attribute *hugepage_attr[] = {
527 	&enabled_attr.attr,
528 	&defrag_attr.attr,
529 	&use_zero_page_attr.attr,
530 	&hpage_pmd_size_attr.attr,
531 #ifdef CONFIG_SHMEM
532 	&shmem_enabled_attr.attr,
533 #endif
534 	&split_underused_thp_attr.attr,
535 	NULL,
536 };
537 
538 static const struct attribute_group hugepage_attr_group = {
539 	.attrs = hugepage_attr,
540 };
541 
542 static void hugepage_exit_sysfs(struct kobject *hugepage_kobj);
543 static void thpsize_release(struct kobject *kobj);
544 static DEFINE_SPINLOCK(huge_anon_orders_lock);
545 static LIST_HEAD(thpsize_list);
546 
547 static ssize_t anon_enabled_show(struct kobject *kobj,
548 				 struct kobj_attribute *attr, char *buf)
549 {
550 	int order = to_thpsize(kobj)->order;
551 	const char *output;
552 
553 	if (test_bit(order, &huge_anon_orders_always))
554 		output = "[always] inherit madvise never";
555 	else if (test_bit(order, &huge_anon_orders_inherit))
556 		output = "always [inherit] madvise never";
557 	else if (test_bit(order, &huge_anon_orders_madvise))
558 		output = "always inherit [madvise] never";
559 	else
560 		output = "always inherit madvise [never]";
561 
562 	return sysfs_emit(buf, "%s\n", output);
563 }
564 
565 static bool set_anon_enabled_mode(int order, enum anon_enabled_mode mode)
566 {
567 	static unsigned long *enabled_orders[] = {
568 		&huge_anon_orders_always,
569 		&huge_anon_orders_inherit,
570 		&huge_anon_orders_madvise,
571 	};
572 	enum anon_enabled_mode m;
573 	bool changed = false;
574 
575 	spin_lock(&huge_anon_orders_lock);
576 	for (m = 0; m < ARRAY_SIZE(enabled_orders); m++) {
577 		if (m == mode)
578 			changed |= !__test_and_set_bit(order, enabled_orders[m]);
579 		else
580 			changed |= __test_and_clear_bit(order, enabled_orders[m]);
581 	}
582 	spin_unlock(&huge_anon_orders_lock);
583 
584 	return changed;
585 }
586 
587 static ssize_t anon_enabled_store(struct kobject *kobj,
588 				  struct kobj_attribute *attr,
589 				  const char *buf, size_t count)
590 {
591 	int order = to_thpsize(kobj)->order;
592 	int mode;
593 
594 	mode = sysfs_match_string(anon_enabled_mode_strings, buf);
595 	if (mode < 0)
596 		return -EINVAL;
597 
598 	if (set_anon_enabled_mode(order, mode)) {
599 		int err = start_stop_khugepaged();
600 
601 		if (err)
602 			return err;
603 	} else {
604 		/*
605 		 * Recalculate watermarks even when the mode didn't
606 		 * change, as the previous code always called
607 		 * start_stop_khugepaged() which does this internally.
608 		 */
609 		set_recommended_min_free_kbytes();
610 	}
611 
612 	return count;
613 }
614 
615 static struct kobj_attribute anon_enabled_attr =
616 	__ATTR(enabled, 0644, anon_enabled_show, anon_enabled_store);
617 
618 static struct attribute *anon_ctrl_attrs[] = {
619 	&anon_enabled_attr.attr,
620 	NULL,
621 };
622 
623 static const struct attribute_group anon_ctrl_attr_grp = {
624 	.attrs = anon_ctrl_attrs,
625 };
626 
627 static struct attribute *file_ctrl_attrs[] = {
628 #ifdef CONFIG_SHMEM
629 	&thpsize_shmem_enabled_attr.attr,
630 #endif
631 	NULL,
632 };
633 
634 static const struct attribute_group file_ctrl_attr_grp = {
635 	.attrs = file_ctrl_attrs,
636 };
637 
638 static struct attribute *any_ctrl_attrs[] = {
639 	NULL,
640 };
641 
642 static const struct attribute_group any_ctrl_attr_grp = {
643 	.attrs = any_ctrl_attrs,
644 };
645 
646 static const struct kobj_type thpsize_ktype = {
647 	.release = &thpsize_release,
648 	.sysfs_ops = &kobj_sysfs_ops,
649 };
650 
651 DEFINE_PER_CPU(struct mthp_stat, mthp_stats) = {{{0}}};
652 
653 static unsigned long sum_mthp_stat(int order, enum mthp_stat_item item)
654 {
655 	unsigned long sum = 0;
656 	int cpu;
657 
658 	for_each_possible_cpu(cpu) {
659 		struct mthp_stat *this = &per_cpu(mthp_stats, cpu);
660 
661 		sum += this->stats[order][item];
662 	}
663 
664 	return sum;
665 }
666 
667 #define DEFINE_MTHP_STAT_ATTR(_name, _index)				\
668 static ssize_t _name##_show(struct kobject *kobj,			\
669 			struct kobj_attribute *attr, char *buf)		\
670 {									\
671 	int order = to_thpsize(kobj)->order;				\
672 									\
673 	return sysfs_emit(buf, "%lu\n", sum_mthp_stat(order, _index));	\
674 }									\
675 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
676 
677 DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC);
678 DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
679 DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
680 DEFINE_MTHP_STAT_ATTR(zswpout, MTHP_STAT_ZSWPOUT);
681 DEFINE_MTHP_STAT_ATTR(swpin, MTHP_STAT_SWPIN);
682 DEFINE_MTHP_STAT_ATTR(swpin_fallback, MTHP_STAT_SWPIN_FALLBACK);
683 DEFINE_MTHP_STAT_ATTR(swpin_fallback_charge, MTHP_STAT_SWPIN_FALLBACK_CHARGE);
684 DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT);
685 DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK);
686 #ifdef CONFIG_SHMEM
687 DEFINE_MTHP_STAT_ATTR(shmem_alloc, MTHP_STAT_SHMEM_ALLOC);
688 DEFINE_MTHP_STAT_ATTR(shmem_fallback, MTHP_STAT_SHMEM_FALLBACK);
689 DEFINE_MTHP_STAT_ATTR(shmem_fallback_charge, MTHP_STAT_SHMEM_FALLBACK_CHARGE);
690 #endif
691 DEFINE_MTHP_STAT_ATTR(split, MTHP_STAT_SPLIT);
692 DEFINE_MTHP_STAT_ATTR(split_failed, MTHP_STAT_SPLIT_FAILED);
693 DEFINE_MTHP_STAT_ATTR(split_deferred, MTHP_STAT_SPLIT_DEFERRED);
694 DEFINE_MTHP_STAT_ATTR(nr_anon, MTHP_STAT_NR_ANON);
695 DEFINE_MTHP_STAT_ATTR(nr_anon_partially_mapped, MTHP_STAT_NR_ANON_PARTIALLY_MAPPED);
696 
697 static struct attribute *anon_stats_attrs[] = {
698 	&anon_fault_alloc_attr.attr,
699 	&anon_fault_fallback_attr.attr,
700 	&anon_fault_fallback_charge_attr.attr,
701 #ifndef CONFIG_SHMEM
702 	&zswpout_attr.attr,
703 	&swpin_attr.attr,
704 	&swpin_fallback_attr.attr,
705 	&swpin_fallback_charge_attr.attr,
706 	&swpout_attr.attr,
707 	&swpout_fallback_attr.attr,
708 #endif
709 	&split_deferred_attr.attr,
710 	&nr_anon_attr.attr,
711 	&nr_anon_partially_mapped_attr.attr,
712 	NULL,
713 };
714 
715 static struct attribute_group anon_stats_attr_grp = {
716 	.name = "stats",
717 	.attrs = anon_stats_attrs,
718 };
719 
720 static struct attribute *file_stats_attrs[] = {
721 #ifdef CONFIG_SHMEM
722 	&shmem_alloc_attr.attr,
723 	&shmem_fallback_attr.attr,
724 	&shmem_fallback_charge_attr.attr,
725 #endif
726 	NULL,
727 };
728 
729 static struct attribute_group file_stats_attr_grp = {
730 	.name = "stats",
731 	.attrs = file_stats_attrs,
732 };
733 
734 static struct attribute *any_stats_attrs[] = {
735 #ifdef CONFIG_SHMEM
736 	&zswpout_attr.attr,
737 	&swpin_attr.attr,
738 	&swpin_fallback_attr.attr,
739 	&swpin_fallback_charge_attr.attr,
740 	&swpout_attr.attr,
741 	&swpout_fallback_attr.attr,
742 #endif
743 	&split_attr.attr,
744 	&split_failed_attr.attr,
745 	NULL,
746 };
747 
748 static struct attribute_group any_stats_attr_grp = {
749 	.name = "stats",
750 	.attrs = any_stats_attrs,
751 };
752 
753 static int sysfs_add_group(struct kobject *kobj,
754 			   const struct attribute_group *grp)
755 {
756 	int ret = -ENOENT;
757 
758 	/*
759 	 * If the group is named, try to merge first, assuming the subdirectory
760 	 * was already created. This avoids the warning emitted by
761 	 * sysfs_create_group() if the directory already exists.
762 	 */
763 	if (grp->name)
764 		ret = sysfs_merge_group(kobj, grp);
765 	if (ret)
766 		ret = sysfs_create_group(kobj, grp);
767 
768 	return ret;
769 }
770 
771 static struct thpsize *thpsize_create(int order, struct kobject *parent)
772 {
773 	unsigned long size = (PAGE_SIZE << order) / SZ_1K;
774 	struct thpsize *thpsize;
775 	int ret = -ENOMEM;
776 
777 	thpsize = kzalloc_obj(*thpsize);
778 	if (!thpsize)
779 		goto err;
780 
781 	thpsize->order = order;
782 
783 	ret = kobject_init_and_add(&thpsize->kobj, &thpsize_ktype, parent,
784 				   "hugepages-%lukB", size);
785 	if (ret) {
786 		kfree(thpsize);
787 		goto err;
788 	}
789 
790 
791 	ret = sysfs_add_group(&thpsize->kobj, &any_ctrl_attr_grp);
792 	if (ret)
793 		goto err_put;
794 
795 	ret = sysfs_add_group(&thpsize->kobj, &any_stats_attr_grp);
796 	if (ret)
797 		goto err_put;
798 
799 	if (BIT(order) & THP_ORDERS_ALL_ANON) {
800 		ret = sysfs_add_group(&thpsize->kobj, &anon_ctrl_attr_grp);
801 		if (ret)
802 			goto err_put;
803 
804 		ret = sysfs_add_group(&thpsize->kobj, &anon_stats_attr_grp);
805 		if (ret)
806 			goto err_put;
807 	}
808 
809 	if (BIT(order) & THP_ORDERS_ALL_FILE_DEFAULT) {
810 		ret = sysfs_add_group(&thpsize->kobj, &file_ctrl_attr_grp);
811 		if (ret)
812 			goto err_put;
813 
814 		ret = sysfs_add_group(&thpsize->kobj, &file_stats_attr_grp);
815 		if (ret)
816 			goto err_put;
817 	}
818 
819 	return thpsize;
820 err_put:
821 	kobject_put(&thpsize->kobj);
822 err:
823 	return ERR_PTR(ret);
824 }
825 
826 static void thpsize_release(struct kobject *kobj)
827 {
828 	kfree(to_thpsize(kobj));
829 }
830 
831 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
832 {
833 	int err;
834 	struct thpsize *thpsize;
835 	unsigned long orders;
836 	int order;
837 
838 	/*
839 	 * Default to setting PMD-sized THP to inherit the global setting and
840 	 * disable all other sizes. powerpc's PMD_ORDER isn't a compile-time
841 	 * constant so we have to do this here.
842 	 */
843 	if (!anon_orders_configured)
844 		huge_anon_orders_inherit = BIT(PMD_ORDER);
845 
846 	*hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
847 	if (unlikely(!*hugepage_kobj)) {
848 		pr_err("failed to create transparent hugepage kobject\n");
849 		return -ENOMEM;
850 	}
851 
852 	err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
853 	if (err) {
854 		pr_err("failed to register transparent hugepage group\n");
855 		goto delete_obj;
856 	}
857 
858 	err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
859 	if (err) {
860 		pr_err("failed to register transparent hugepage group\n");
861 		goto remove_hp_group;
862 	}
863 
864 	orders = THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE_DEFAULT;
865 	order = highest_order(orders);
866 	while (orders) {
867 		thpsize = thpsize_create(order, *hugepage_kobj);
868 		if (IS_ERR(thpsize)) {
869 			pr_err("failed to create thpsize for order %d\n", order);
870 			err = PTR_ERR(thpsize);
871 			goto remove_all;
872 		}
873 		list_add(&thpsize->node, &thpsize_list);
874 		order = next_order(&orders, order);
875 	}
876 
877 	return 0;
878 
879 remove_all:
880 	hugepage_exit_sysfs(*hugepage_kobj);
881 	return err;
882 remove_hp_group:
883 	sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
884 delete_obj:
885 	kobject_put(*hugepage_kobj);
886 	return err;
887 }
888 
889 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
890 {
891 	struct thpsize *thpsize, *tmp;
892 
893 	list_for_each_entry_safe(thpsize, tmp, &thpsize_list, node) {
894 		list_del(&thpsize->node);
895 		kobject_put(&thpsize->kobj);
896 	}
897 
898 	sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
899 	sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
900 	kobject_put(hugepage_kobj);
901 }
902 #else
903 static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
904 {
905 	return 0;
906 }
907 
908 static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
909 {
910 }
911 #endif /* CONFIG_SYSFS */
912 
913 static int __init thp_shrinker_init(void)
914 {
915 	deferred_split_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE |
916 						 SHRINKER_MEMCG_AWARE |
917 						 SHRINKER_NONSLAB,
918 						 "thp-deferred_split");
919 	if (!deferred_split_shrinker)
920 		return -ENOMEM;
921 
922 	deferred_split_shrinker->count_objects = deferred_split_count;
923 	deferred_split_shrinker->scan_objects = deferred_split_scan;
924 	shrinker_register(deferred_split_shrinker);
925 
926 	if (IS_ENABLED(CONFIG_PERSISTENT_HUGE_ZERO_FOLIO)) {
927 		/*
928 		 * Bump the reference of the huge_zero_folio and do not
929 		 * initialize the shrinker.
930 		 *
931 		 * huge_zero_folio will always be NULL on failure. We assume
932 		 * that get_huge_zero_folio() will most likely not fail as
933 		 * thp_shrinker_init() is invoked early on during boot.
934 		 */
935 		if (!get_huge_zero_folio())
936 			pr_warn("Allocating persistent huge zero folio failed\n");
937 		return 0;
938 	}
939 
940 	huge_zero_folio_shrinker = shrinker_alloc(0, "thp-zero");
941 	if (!huge_zero_folio_shrinker) {
942 		shrinker_free(deferred_split_shrinker);
943 		return -ENOMEM;
944 	}
945 
946 	huge_zero_folio_shrinker->count_objects = shrink_huge_zero_folio_count;
947 	huge_zero_folio_shrinker->scan_objects = shrink_huge_zero_folio_scan;
948 	shrinker_register(huge_zero_folio_shrinker);
949 
950 	return 0;
951 }
952 
953 static void __init thp_shrinker_exit(void)
954 {
955 	shrinker_free(huge_zero_folio_shrinker);
956 	shrinker_free(deferred_split_shrinker);
957 }
958 
959 static int __init hugepage_init(void)
960 {
961 	int err;
962 	struct kobject *hugepage_kobj;
963 
964 	if (!has_transparent_hugepage()) {
965 		transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED;
966 		return -EINVAL;
967 	}
968 
969 	/*
970 	 * hugepages can't be allocated by the buddy allocator
971 	 */
972 	MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER > MAX_PAGE_ORDER);
973 
974 	err = hugepage_init_sysfs(&hugepage_kobj);
975 	if (err)
976 		goto err_sysfs;
977 
978 	err = khugepaged_init();
979 	if (err)
980 		goto err_slab;
981 
982 	err = thp_shrinker_init();
983 	if (err)
984 		goto err_shrinker;
985 
986 	/*
987 	 * By default disable transparent hugepages on smaller systems,
988 	 * where the extra memory used could hurt more than TLB overhead
989 	 * is likely to save.  The admin can still enable it through /sys.
990 	 */
991 	if (totalram_pages() < MB_TO_PAGES(512)) {
992 		transparent_hugepage_flags = 0;
993 		return 0;
994 	}
995 
996 	err = start_stop_khugepaged();
997 	if (err)
998 		goto err_khugepaged;
999 
1000 	return 0;
1001 err_khugepaged:
1002 	thp_shrinker_exit();
1003 err_shrinker:
1004 	khugepaged_destroy();
1005 err_slab:
1006 	hugepage_exit_sysfs(hugepage_kobj);
1007 err_sysfs:
1008 	return err;
1009 }
1010 subsys_initcall(hugepage_init);
1011 
1012 static int __init setup_transparent_hugepage(char *str)
1013 {
1014 	int ret = 0;
1015 	if (!str)
1016 		goto out;
1017 	if (!strcmp(str, "always")) {
1018 		set_bit(TRANSPARENT_HUGEPAGE_FLAG,
1019 			&transparent_hugepage_flags);
1020 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
1021 			  &transparent_hugepage_flags);
1022 		ret = 1;
1023 	} else if (!strcmp(str, "madvise")) {
1024 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
1025 			  &transparent_hugepage_flags);
1026 		set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
1027 			&transparent_hugepage_flags);
1028 		ret = 1;
1029 	} else if (!strcmp(str, "never")) {
1030 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
1031 			  &transparent_hugepage_flags);
1032 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
1033 			  &transparent_hugepage_flags);
1034 		ret = 1;
1035 	}
1036 out:
1037 	if (!ret)
1038 		pr_warn("transparent_hugepage= cannot parse, ignored\n");
1039 	return ret;
1040 }
1041 __setup("transparent_hugepage=", setup_transparent_hugepage);
1042 
1043 static char str_dup[PAGE_SIZE] __initdata;
1044 static int __init setup_thp_anon(char *str)
1045 {
1046 	char *token, *range, *policy, *subtoken;
1047 	unsigned long always, inherit, madvise;
1048 	char *start_size, *end_size;
1049 	int start, end, nr;
1050 	char *p;
1051 
1052 	if (!str || strlen(str) + 1 > PAGE_SIZE)
1053 		goto err;
1054 	strscpy(str_dup, str);
1055 
1056 	always = huge_anon_orders_always;
1057 	madvise = huge_anon_orders_madvise;
1058 	inherit = huge_anon_orders_inherit;
1059 	p = str_dup;
1060 	while ((token = strsep(&p, ";")) != NULL) {
1061 		range = strsep(&token, ":");
1062 		policy = token;
1063 
1064 		if (!policy)
1065 			goto err;
1066 
1067 		while ((subtoken = strsep(&range, ",")) != NULL) {
1068 			if (strchr(subtoken, '-')) {
1069 				start_size = strsep(&subtoken, "-");
1070 				end_size = subtoken;
1071 
1072 				start = get_order_from_str(start_size, THP_ORDERS_ALL_ANON);
1073 				end = get_order_from_str(end_size, THP_ORDERS_ALL_ANON);
1074 			} else {
1075 				start_size = end_size = subtoken;
1076 				start = end = get_order_from_str(subtoken,
1077 								 THP_ORDERS_ALL_ANON);
1078 			}
1079 
1080 			if (start == -EINVAL) {
1081 				pr_err("invalid size %s in thp_anon boot parameter\n", start_size);
1082 				goto err;
1083 			}
1084 
1085 			if (end == -EINVAL) {
1086 				pr_err("invalid size %s in thp_anon boot parameter\n", end_size);
1087 				goto err;
1088 			}
1089 
1090 			if (start < 0 || end < 0 || start > end)
1091 				goto err;
1092 
1093 			nr = end - start + 1;
1094 			if (!strcmp(policy, "always")) {
1095 				bitmap_set(&always, start, nr);
1096 				bitmap_clear(&inherit, start, nr);
1097 				bitmap_clear(&madvise, start, nr);
1098 			} else if (!strcmp(policy, "madvise")) {
1099 				bitmap_set(&madvise, start, nr);
1100 				bitmap_clear(&inherit, start, nr);
1101 				bitmap_clear(&always, start, nr);
1102 			} else if (!strcmp(policy, "inherit")) {
1103 				bitmap_set(&inherit, start, nr);
1104 				bitmap_clear(&madvise, start, nr);
1105 				bitmap_clear(&always, start, nr);
1106 			} else if (!strcmp(policy, "never")) {
1107 				bitmap_clear(&inherit, start, nr);
1108 				bitmap_clear(&madvise, start, nr);
1109 				bitmap_clear(&always, start, nr);
1110 			} else {
1111 				pr_err("invalid policy %s in thp_anon boot parameter\n", policy);
1112 				goto err;
1113 			}
1114 		}
1115 	}
1116 
1117 	huge_anon_orders_always = always;
1118 	huge_anon_orders_madvise = madvise;
1119 	huge_anon_orders_inherit = inherit;
1120 	anon_orders_configured = true;
1121 	return 1;
1122 
1123 err:
1124 	pr_warn("thp_anon=%s: error parsing string, ignoring setting\n", str);
1125 	return 0;
1126 }
1127 __setup("thp_anon=", setup_thp_anon);
1128 
1129 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
1130 {
1131 	if (likely(vma->vm_flags & VM_WRITE))
1132 		pmd = pmd_mkwrite(pmd, vma);
1133 	return pmd;
1134 }
1135 
1136 static struct deferred_split *split_queue_node(int nid)
1137 {
1138 	struct pglist_data *pgdata = NODE_DATA(nid);
1139 
1140 	return &pgdata->deferred_split_queue;
1141 }
1142 
1143 #ifdef CONFIG_MEMCG
1144 static inline
1145 struct mem_cgroup *folio_split_queue_memcg(struct folio *folio,
1146 					   struct deferred_split *queue)
1147 {
1148 	if (mem_cgroup_disabled())
1149 		return NULL;
1150 	if (split_queue_node(folio_nid(folio)) == queue)
1151 		return NULL;
1152 	return container_of(queue, struct mem_cgroup, deferred_split_queue);
1153 }
1154 
1155 static struct deferred_split *memcg_split_queue(int nid, struct mem_cgroup *memcg)
1156 {
1157 	return memcg ? &memcg->deferred_split_queue : split_queue_node(nid);
1158 }
1159 #else
1160 static inline
1161 struct mem_cgroup *folio_split_queue_memcg(struct folio *folio,
1162 					   struct deferred_split *queue)
1163 {
1164 	return NULL;
1165 }
1166 
1167 static struct deferred_split *memcg_split_queue(int nid, struct mem_cgroup *memcg)
1168 {
1169 	return split_queue_node(nid);
1170 }
1171 #endif
1172 
1173 static struct deferred_split *split_queue_lock(int nid, struct mem_cgroup *memcg)
1174 {
1175 	struct deferred_split *queue;
1176 
1177 retry:
1178 	queue = memcg_split_queue(nid, memcg);
1179 	spin_lock(&queue->split_queue_lock);
1180 	/*
1181 	 * There is a period between setting memcg to dying and reparenting
1182 	 * deferred split queue, and during this period the THPs in the deferred
1183 	 * split queue will be hidden from the shrinker side.
1184 	 */
1185 	if (unlikely(memcg_is_dying(memcg))) {
1186 		spin_unlock(&queue->split_queue_lock);
1187 		memcg = parent_mem_cgroup(memcg);
1188 		goto retry;
1189 	}
1190 
1191 	return queue;
1192 }
1193 
1194 static struct deferred_split *
1195 split_queue_lock_irqsave(int nid, struct mem_cgroup *memcg, unsigned long *flags)
1196 {
1197 	struct deferred_split *queue;
1198 
1199 retry:
1200 	queue = memcg_split_queue(nid, memcg);
1201 	spin_lock_irqsave(&queue->split_queue_lock, *flags);
1202 	if (unlikely(memcg_is_dying(memcg))) {
1203 		spin_unlock_irqrestore(&queue->split_queue_lock, *flags);
1204 		memcg = parent_mem_cgroup(memcg);
1205 		goto retry;
1206 	}
1207 
1208 	return queue;
1209 }
1210 
1211 static struct deferred_split *folio_split_queue_lock(struct folio *folio)
1212 {
1213 	return split_queue_lock(folio_nid(folio), folio_memcg(folio));
1214 }
1215 
1216 static struct deferred_split *
1217 folio_split_queue_lock_irqsave(struct folio *folio, unsigned long *flags)
1218 {
1219 	return split_queue_lock_irqsave(folio_nid(folio), folio_memcg(folio), flags);
1220 }
1221 
1222 static inline void split_queue_unlock(struct deferred_split *queue)
1223 {
1224 	spin_unlock(&queue->split_queue_lock);
1225 }
1226 
1227 static inline void split_queue_unlock_irqrestore(struct deferred_split *queue,
1228 						 unsigned long flags)
1229 {
1230 	spin_unlock_irqrestore(&queue->split_queue_lock, flags);
1231 }
1232 
1233 static inline bool is_transparent_hugepage(const struct folio *folio)
1234 {
1235 	if (!folio_test_large(folio))
1236 		return false;
1237 
1238 	return is_huge_zero_folio(folio) ||
1239 		folio_test_large_rmappable(folio);
1240 }
1241 
1242 static unsigned long __thp_get_unmapped_area(struct file *filp,
1243 		unsigned long addr, unsigned long len,
1244 		loff_t off, unsigned long flags, unsigned long size,
1245 		vm_flags_t vm_flags)
1246 {
1247 	loff_t off_end = off + len;
1248 	loff_t off_align = round_up(off, size);
1249 	unsigned long len_pad, ret, off_sub;
1250 
1251 	if (!IS_ENABLED(CONFIG_64BIT) || in_compat_syscall())
1252 		return 0;
1253 
1254 	if (off_end <= off_align || (off_end - off_align) < size)
1255 		return 0;
1256 
1257 	len_pad = len + size;
1258 	if (len_pad < len || (off + len_pad) < off)
1259 		return 0;
1260 
1261 	ret = mm_get_unmapped_area_vmflags(filp, addr, len_pad,
1262 					   off >> PAGE_SHIFT, flags, vm_flags);
1263 
1264 	/*
1265 	 * The failure might be due to length padding. The caller will retry
1266 	 * without the padding.
1267 	 */
1268 	if (IS_ERR_VALUE(ret))
1269 		return 0;
1270 
1271 	/*
1272 	 * Do not try to align to THP boundary if allocation at the address
1273 	 * hint succeeds.
1274 	 */
1275 	if (ret == addr)
1276 		return addr;
1277 
1278 	off_sub = (off - ret) & (size - 1);
1279 
1280 	if (mm_flags_test(MMF_TOPDOWN, current->mm) && !off_sub)
1281 		return ret + size;
1282 
1283 	ret += off_sub;
1284 	return ret;
1285 }
1286 
1287 unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
1288 		unsigned long len, unsigned long pgoff, unsigned long flags,
1289 		vm_flags_t vm_flags)
1290 {
1291 	unsigned long ret;
1292 	loff_t off = (loff_t)pgoff << PAGE_SHIFT;
1293 
1294 	ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE, vm_flags);
1295 	if (ret)
1296 		return ret;
1297 
1298 	return mm_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags,
1299 					    vm_flags);
1300 }
1301 
1302 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
1303 		unsigned long len, unsigned long pgoff, unsigned long flags)
1304 {
1305 	return thp_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags, 0);
1306 }
1307 EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
1308 
1309 static struct folio *vma_alloc_anon_folio_pmd(struct vm_area_struct *vma,
1310 		unsigned long addr)
1311 {
1312 	gfp_t gfp = vma_thp_gfp_mask(vma);
1313 	const int order = HPAGE_PMD_ORDER;
1314 	struct folio *folio;
1315 
1316 	folio = vma_alloc_folio(gfp, order, vma, addr & HPAGE_PMD_MASK);
1317 
1318 	if (unlikely(!folio)) {
1319 		count_vm_event(THP_FAULT_FALLBACK);
1320 		count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK);
1321 		return NULL;
1322 	}
1323 
1324 	VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
1325 	if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
1326 		folio_put(folio);
1327 		count_vm_event(THP_FAULT_FALLBACK);
1328 		count_vm_event(THP_FAULT_FALLBACK_CHARGE);
1329 		count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK);
1330 		count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
1331 		return NULL;
1332 	}
1333 	folio_throttle_swaprate(folio, gfp);
1334 
1335        /*
1336 	* When a folio is not zeroed during allocation (__GFP_ZERO not used)
1337 	* or user folios require special handling, folio_zero_user() is used to
1338 	* make sure that the page corresponding to the faulting address will be
1339 	* hot in the cache after zeroing.
1340 	*/
1341 	if (user_alloc_needs_zeroing())
1342 		folio_zero_user(folio, addr);
1343 	/*
1344 	 * The memory barrier inside __folio_mark_uptodate makes sure that
1345 	 * folio_zero_user writes become visible before the set_pmd_at()
1346 	 * write.
1347 	 */
1348 	__folio_mark_uptodate(folio);
1349 	return folio;
1350 }
1351 
1352 void map_anon_folio_pmd_nopf(struct folio *folio, pmd_t *pmd,
1353 		struct vm_area_struct *vma, unsigned long haddr)
1354 {
1355 	pmd_t entry;
1356 
1357 	entry = folio_mk_pmd(folio, vma->vm_page_prot);
1358 	entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1359 	folio_add_new_anon_rmap(folio, vma, haddr, RMAP_EXCLUSIVE);
1360 	folio_add_lru_vma(folio, vma);
1361 	set_pmd_at(vma->vm_mm, haddr, pmd, entry);
1362 	update_mmu_cache_pmd(vma, haddr, pmd);
1363 	deferred_split_folio(folio, false);
1364 }
1365 
1366 static void map_anon_folio_pmd_pf(struct folio *folio, pmd_t *pmd,
1367 		struct vm_area_struct *vma, unsigned long haddr)
1368 {
1369 	map_anon_folio_pmd_nopf(folio, pmd, vma, haddr);
1370 	add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1371 	count_vm_event(THP_FAULT_ALLOC);
1372 	count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_ALLOC);
1373 	count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
1374 }
1375 
1376 static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf)
1377 {
1378 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1379 	struct vm_area_struct *vma = vmf->vma;
1380 	struct folio *folio;
1381 	pgtable_t pgtable;
1382 	vm_fault_t ret = 0;
1383 
1384 	folio = vma_alloc_anon_folio_pmd(vma, vmf->address);
1385 	if (unlikely(!folio))
1386 		return VM_FAULT_FALLBACK;
1387 
1388 	pgtable = pte_alloc_one(vma->vm_mm);
1389 	if (unlikely(!pgtable)) {
1390 		ret = VM_FAULT_OOM;
1391 		goto release;
1392 	}
1393 
1394 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1395 	if (unlikely(!pmd_none(*vmf->pmd))) {
1396 		goto unlock_release;
1397 	} else {
1398 		ret = check_stable_address_space(vma->vm_mm);
1399 		if (ret)
1400 			goto unlock_release;
1401 
1402 		/* Deliver the page fault to userland */
1403 		if (userfaultfd_missing(vma)) {
1404 			spin_unlock(vmf->ptl);
1405 			folio_put(folio);
1406 			pte_free(vma->vm_mm, pgtable);
1407 			ret = handle_userfault(vmf, VM_UFFD_MISSING);
1408 			VM_BUG_ON(ret & VM_FAULT_FALLBACK);
1409 			return ret;
1410 		}
1411 		pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
1412 		map_anon_folio_pmd_pf(folio, vmf->pmd, vma, haddr);
1413 		mm_inc_nr_ptes(vma->vm_mm);
1414 		spin_unlock(vmf->ptl);
1415 	}
1416 
1417 	return 0;
1418 unlock_release:
1419 	spin_unlock(vmf->ptl);
1420 release:
1421 	if (pgtable)
1422 		pte_free(vma->vm_mm, pgtable);
1423 	folio_put(folio);
1424 	return ret;
1425 
1426 }
1427 
1428 vm_fault_t do_huge_pmd_device_private(struct vm_fault *vmf)
1429 {
1430 	struct vm_area_struct *vma = vmf->vma;
1431 	vm_fault_t ret = 0;
1432 	spinlock_t *ptl;
1433 	softleaf_t entry;
1434 	struct page *page;
1435 	struct folio *folio;
1436 
1437 	if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
1438 		vma_end_read(vma);
1439 		return VM_FAULT_RETRY;
1440 	}
1441 
1442 	ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1443 	if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd))) {
1444 		spin_unlock(ptl);
1445 		return 0;
1446 	}
1447 
1448 	entry = softleaf_from_pmd(vmf->orig_pmd);
1449 	page = softleaf_to_page(entry);
1450 	folio = page_folio(page);
1451 	vmf->page = page;
1452 	vmf->pte = NULL;
1453 	if (folio_trylock(folio)) {
1454 		folio_get(folio);
1455 		spin_unlock(ptl);
1456 		ret = page_pgmap(page)->ops->migrate_to_ram(vmf);
1457 		folio_unlock(folio);
1458 		folio_put(folio);
1459 	} else {
1460 		spin_unlock(ptl);
1461 	}
1462 
1463 	return ret;
1464 }
1465 
1466 /*
1467  * always: directly stall for all thp allocations
1468  * defer: wake kswapd and fail if not immediately available
1469  * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
1470  *		  fail if not immediately available
1471  * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
1472  *	    available
1473  * never: never stall for any thp allocation
1474  */
1475 gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma)
1476 {
1477 	const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE);
1478 
1479 	/* Always do synchronous compaction */
1480 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
1481 		return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
1482 
1483 	/* Kick kcompactd and fail quickly */
1484 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
1485 		return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
1486 
1487 	/* Synchronous compaction if madvised, otherwise kick kcompactd */
1488 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
1489 		return GFP_TRANSHUGE_LIGHT |
1490 			(vma_madvised ? __GFP_DIRECT_RECLAIM :
1491 					__GFP_KSWAPD_RECLAIM);
1492 
1493 	/* Only do synchronous compaction if madvised */
1494 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
1495 		return GFP_TRANSHUGE_LIGHT |
1496 		       (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
1497 
1498 	return GFP_TRANSHUGE_LIGHT;
1499 }
1500 
1501 /* Caller must hold page table lock. */
1502 static void set_huge_zero_folio(pgtable_t pgtable, struct mm_struct *mm,
1503 		struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
1504 		struct folio *zero_folio)
1505 {
1506 	pmd_t entry;
1507 	entry = folio_mk_pmd(zero_folio, vma->vm_page_prot);
1508 	entry = pmd_mkspecial(entry);
1509 	pgtable_trans_huge_deposit(mm, pmd, pgtable);
1510 	set_pmd_at(mm, haddr, pmd, entry);
1511 	mm_inc_nr_ptes(mm);
1512 }
1513 
1514 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
1515 {
1516 	struct vm_area_struct *vma = vmf->vma;
1517 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1518 	vm_fault_t ret;
1519 
1520 	if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
1521 		return VM_FAULT_FALLBACK;
1522 	ret = vmf_anon_prepare(vmf);
1523 	if (ret)
1524 		return ret;
1525 	khugepaged_enter_vma(vma, vma->vm_flags);
1526 
1527 	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
1528 			!mm_forbids_zeropage(vma->vm_mm) &&
1529 			transparent_hugepage_use_zero_page()) {
1530 		pgtable_t pgtable;
1531 		struct folio *zero_folio;
1532 		vm_fault_t ret;
1533 
1534 		pgtable = pte_alloc_one(vma->vm_mm);
1535 		if (unlikely(!pgtable))
1536 			return VM_FAULT_OOM;
1537 		zero_folio = mm_get_huge_zero_folio(vma->vm_mm);
1538 		if (unlikely(!zero_folio)) {
1539 			pte_free(vma->vm_mm, pgtable);
1540 			count_vm_event(THP_FAULT_FALLBACK);
1541 			return VM_FAULT_FALLBACK;
1542 		}
1543 		vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1544 		ret = 0;
1545 		if (pmd_none(*vmf->pmd)) {
1546 			ret = check_stable_address_space(vma->vm_mm);
1547 			if (ret) {
1548 				spin_unlock(vmf->ptl);
1549 				pte_free(vma->vm_mm, pgtable);
1550 			} else if (userfaultfd_missing(vma)) {
1551 				spin_unlock(vmf->ptl);
1552 				pte_free(vma->vm_mm, pgtable);
1553 				ret = handle_userfault(vmf, VM_UFFD_MISSING);
1554 				VM_BUG_ON(ret & VM_FAULT_FALLBACK);
1555 			} else {
1556 				set_huge_zero_folio(pgtable, vma->vm_mm, vma,
1557 						   haddr, vmf->pmd, zero_folio);
1558 				update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1559 				spin_unlock(vmf->ptl);
1560 			}
1561 		} else {
1562 			spin_unlock(vmf->ptl);
1563 			pte_free(vma->vm_mm, pgtable);
1564 		}
1565 		return ret;
1566 	}
1567 
1568 	return __do_huge_pmd_anonymous_page(vmf);
1569 }
1570 
1571 struct folio_or_pfn {
1572 	union {
1573 		struct folio *folio;
1574 		unsigned long pfn;
1575 	};
1576 	bool is_folio;
1577 };
1578 
1579 static vm_fault_t insert_pmd(struct vm_area_struct *vma, unsigned long addr,
1580 		pmd_t *pmd, struct folio_or_pfn fop, pgprot_t prot,
1581 		bool write)
1582 {
1583 	struct mm_struct *mm = vma->vm_mm;
1584 	pgtable_t pgtable = NULL;
1585 	spinlock_t *ptl;
1586 	pmd_t entry;
1587 
1588 	if (addr < vma->vm_start || addr >= vma->vm_end)
1589 		return VM_FAULT_SIGBUS;
1590 
1591 	if (arch_needs_pgtable_deposit()) {
1592 		pgtable = pte_alloc_one(vma->vm_mm);
1593 		if (!pgtable)
1594 			return VM_FAULT_OOM;
1595 	}
1596 
1597 	ptl = pmd_lock(mm, pmd);
1598 	if (!pmd_none(*pmd)) {
1599 		const unsigned long pfn = fop.is_folio ? folio_pfn(fop.folio) :
1600 					  fop.pfn;
1601 
1602 		if (write) {
1603 			if (pmd_pfn(*pmd) != pfn) {
1604 				WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
1605 				goto out_unlock;
1606 			}
1607 			entry = pmd_mkyoung(*pmd);
1608 			entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1609 			if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
1610 				update_mmu_cache_pmd(vma, addr, pmd);
1611 		}
1612 		goto out_unlock;
1613 	}
1614 
1615 	if (fop.is_folio) {
1616 		entry = folio_mk_pmd(fop.folio, vma->vm_page_prot);
1617 
1618 		if (is_huge_zero_folio(fop.folio)) {
1619 			entry = pmd_mkspecial(entry);
1620 		} else {
1621 			folio_get(fop.folio);
1622 			folio_add_file_rmap_pmd(fop.folio, &fop.folio->page, vma);
1623 			add_mm_counter(mm, mm_counter_file(fop.folio), HPAGE_PMD_NR);
1624 		}
1625 	} else {
1626 		entry = pmd_mkhuge(pfn_pmd(fop.pfn, prot));
1627 		entry = pmd_mkspecial(entry);
1628 	}
1629 	if (write) {
1630 		entry = pmd_mkyoung(pmd_mkdirty(entry));
1631 		entry = maybe_pmd_mkwrite(entry, vma);
1632 	}
1633 
1634 	if (pgtable) {
1635 		pgtable_trans_huge_deposit(mm, pmd, pgtable);
1636 		mm_inc_nr_ptes(mm);
1637 		pgtable = NULL;
1638 	}
1639 
1640 	set_pmd_at(mm, addr, pmd, entry);
1641 	update_mmu_cache_pmd(vma, addr, pmd);
1642 
1643 out_unlock:
1644 	spin_unlock(ptl);
1645 	if (pgtable)
1646 		pte_free(mm, pgtable);
1647 	return VM_FAULT_NOPAGE;
1648 }
1649 
1650 /**
1651  * vmf_insert_pfn_pmd - insert a pmd size pfn
1652  * @vmf: Structure describing the fault
1653  * @pfn: pfn to insert
1654  * @write: whether it's a write fault
1655  *
1656  * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
1657  *
1658  * Return: vm_fault_t value.
1659  */
1660 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn,
1661 			      bool write)
1662 {
1663 	unsigned long addr = vmf->address & PMD_MASK;
1664 	struct vm_area_struct *vma = vmf->vma;
1665 	pgprot_t pgprot = vma->vm_page_prot;
1666 	struct folio_or_pfn fop = {
1667 		.pfn = pfn,
1668 	};
1669 
1670 	/*
1671 	 * If we had pmd_special, we could avoid all these restrictions,
1672 	 * but we need to be consistent with PTEs and architectures that
1673 	 * can't support a 'special' bit.
1674 	 */
1675 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
1676 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1677 						(VM_PFNMAP|VM_MIXEDMAP));
1678 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1679 
1680 	pfnmap_setup_cachemode_pfn(pfn, &pgprot);
1681 
1682 	return insert_pmd(vma, addr, vmf->pmd, fop, pgprot, write);
1683 }
1684 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
1685 
1686 vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio,
1687 				bool write)
1688 {
1689 	struct vm_area_struct *vma = vmf->vma;
1690 	unsigned long addr = vmf->address & PMD_MASK;
1691 	struct folio_or_pfn fop = {
1692 		.folio = folio,
1693 		.is_folio = true,
1694 	};
1695 
1696 	if (WARN_ON_ONCE(folio_order(folio) != PMD_ORDER))
1697 		return VM_FAULT_SIGBUS;
1698 
1699 	return insert_pmd(vma, addr, vmf->pmd, fop, vma->vm_page_prot, write);
1700 }
1701 EXPORT_SYMBOL_GPL(vmf_insert_folio_pmd);
1702 
1703 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1704 static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
1705 {
1706 	if (likely(vma->vm_flags & VM_WRITE))
1707 		pud = pud_mkwrite(pud);
1708 	return pud;
1709 }
1710 
1711 static vm_fault_t insert_pud(struct vm_area_struct *vma, unsigned long addr,
1712 		pud_t *pud, struct folio_or_pfn fop, pgprot_t prot, bool write)
1713 {
1714 	struct mm_struct *mm = vma->vm_mm;
1715 	spinlock_t *ptl;
1716 	pud_t entry;
1717 
1718 	if (addr < vma->vm_start || addr >= vma->vm_end)
1719 		return VM_FAULT_SIGBUS;
1720 
1721 	ptl = pud_lock(mm, pud);
1722 	if (!pud_none(*pud)) {
1723 		const unsigned long pfn = fop.is_folio ? folio_pfn(fop.folio) :
1724 					  fop.pfn;
1725 
1726 		if (write) {
1727 			if (WARN_ON_ONCE(pud_pfn(*pud) != pfn))
1728 				goto out_unlock;
1729 			entry = pud_mkyoung(*pud);
1730 			entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
1731 			if (pudp_set_access_flags(vma, addr, pud, entry, 1))
1732 				update_mmu_cache_pud(vma, addr, pud);
1733 		}
1734 		goto out_unlock;
1735 	}
1736 
1737 	if (fop.is_folio) {
1738 		entry = folio_mk_pud(fop.folio, vma->vm_page_prot);
1739 
1740 		folio_get(fop.folio);
1741 		folio_add_file_rmap_pud(fop.folio, &fop.folio->page, vma);
1742 		add_mm_counter(mm, mm_counter_file(fop.folio), HPAGE_PUD_NR);
1743 	} else {
1744 		entry = pud_mkhuge(pfn_pud(fop.pfn, prot));
1745 		entry = pud_mkspecial(entry);
1746 	}
1747 	if (write) {
1748 		entry = pud_mkyoung(pud_mkdirty(entry));
1749 		entry = maybe_pud_mkwrite(entry, vma);
1750 	}
1751 	set_pud_at(mm, addr, pud, entry);
1752 	update_mmu_cache_pud(vma, addr, pud);
1753 out_unlock:
1754 	spin_unlock(ptl);
1755 	return VM_FAULT_NOPAGE;
1756 }
1757 
1758 /**
1759  * vmf_insert_pfn_pud - insert a pud size pfn
1760  * @vmf: Structure describing the fault
1761  * @pfn: pfn to insert
1762  * @write: whether it's a write fault
1763  *
1764  * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
1765  *
1766  * Return: vm_fault_t value.
1767  */
1768 vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, unsigned long pfn,
1769 			      bool write)
1770 {
1771 	unsigned long addr = vmf->address & PUD_MASK;
1772 	struct vm_area_struct *vma = vmf->vma;
1773 	pgprot_t pgprot = vma->vm_page_prot;
1774 	struct folio_or_pfn fop = {
1775 		.pfn = pfn,
1776 	};
1777 
1778 	/*
1779 	 * If we had pud_special, we could avoid all these restrictions,
1780 	 * but we need to be consistent with PTEs and architectures that
1781 	 * can't support a 'special' bit.
1782 	 */
1783 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
1784 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1785 						(VM_PFNMAP|VM_MIXEDMAP));
1786 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1787 
1788 	pfnmap_setup_cachemode_pfn(pfn, &pgprot);
1789 
1790 	return insert_pud(vma, addr, vmf->pud, fop, pgprot, write);
1791 }
1792 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
1793 
1794 /**
1795  * vmf_insert_folio_pud - insert a pud size folio mapped by a pud entry
1796  * @vmf: Structure describing the fault
1797  * @folio: folio to insert
1798  * @write: whether it's a write fault
1799  *
1800  * Return: vm_fault_t value.
1801  */
1802 vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio,
1803 				bool write)
1804 {
1805 	struct vm_area_struct *vma = vmf->vma;
1806 	unsigned long addr = vmf->address & PUD_MASK;
1807 	struct folio_or_pfn fop = {
1808 		.folio = folio,
1809 		.is_folio = true,
1810 	};
1811 
1812 	if (WARN_ON_ONCE(folio_order(folio) != PUD_ORDER))
1813 		return VM_FAULT_SIGBUS;
1814 
1815 	return insert_pud(vma, addr, vmf->pud, fop, vma->vm_page_prot, write);
1816 }
1817 EXPORT_SYMBOL_GPL(vmf_insert_folio_pud);
1818 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1819 
1820 /**
1821  * touch_pmd - Mark page table pmd entry as accessed and dirty (for write)
1822  * @vma: The VMA covering @addr
1823  * @addr: The virtual address
1824  * @pmd: pmd pointer into the page table mapping @addr
1825  * @write: Whether it's a write access
1826  *
1827  * Return: whether the pmd entry is changed
1828  */
1829 bool touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1830 	       pmd_t *pmd, bool write)
1831 {
1832 	pmd_t entry;
1833 
1834 	entry = pmd_mkyoung(*pmd);
1835 	if (write)
1836 		entry = pmd_mkdirty(entry);
1837 	if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
1838 				  pmd, entry, write)) {
1839 		update_mmu_cache_pmd(vma, addr, pmd);
1840 		return true;
1841 	}
1842 
1843 	return false;
1844 }
1845 
1846 static void copy_huge_non_present_pmd(
1847 		struct mm_struct *dst_mm, struct mm_struct *src_mm,
1848 		pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1849 		struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1850 		pmd_t pmd, pgtable_t pgtable)
1851 {
1852 	softleaf_t entry = softleaf_from_pmd(pmd);
1853 	struct folio *src_folio;
1854 
1855 	VM_WARN_ON_ONCE(!pmd_is_valid_softleaf(pmd));
1856 
1857 	if (softleaf_is_migration_write(entry) ||
1858 	    softleaf_is_migration_read_exclusive(entry)) {
1859 		entry = make_readable_migration_entry(swp_offset(entry));
1860 		pmd = swp_entry_to_pmd(entry);
1861 		if (pmd_swp_soft_dirty(*src_pmd))
1862 			pmd = pmd_swp_mksoft_dirty(pmd);
1863 		if (pmd_swp_uffd_wp(*src_pmd))
1864 			pmd = pmd_swp_mkuffd_wp(pmd);
1865 		set_pmd_at(src_mm, addr, src_pmd, pmd);
1866 	} else if (softleaf_is_device_private(entry)) {
1867 		/*
1868 		 * For device private entries, since there are no
1869 		 * read exclusive entries, writable = !readable
1870 		 */
1871 		if (softleaf_is_device_private_write(entry)) {
1872 			entry = make_readable_device_private_entry(swp_offset(entry));
1873 			pmd = swp_entry_to_pmd(entry);
1874 
1875 			if (pmd_swp_soft_dirty(*src_pmd))
1876 				pmd = pmd_swp_mksoft_dirty(pmd);
1877 			if (pmd_swp_uffd_wp(*src_pmd))
1878 				pmd = pmd_swp_mkuffd_wp(pmd);
1879 			set_pmd_at(src_mm, addr, src_pmd, pmd);
1880 		}
1881 
1882 		src_folio = softleaf_to_folio(entry);
1883 		VM_WARN_ON(!folio_test_large(src_folio));
1884 
1885 		folio_get(src_folio);
1886 		/*
1887 		 * folio_try_dup_anon_rmap_pmd does not fail for
1888 		 * device private entries.
1889 		 */
1890 		folio_try_dup_anon_rmap_pmd(src_folio, &src_folio->page,
1891 					    dst_vma, src_vma);
1892 	}
1893 
1894 	add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1895 	mm_inc_nr_ptes(dst_mm);
1896 	pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1897 	if (!userfaultfd_wp(dst_vma))
1898 		pmd = pmd_swp_clear_uffd_wp(pmd);
1899 	set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1900 }
1901 
1902 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1903 		  pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1904 		  struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1905 {
1906 	spinlock_t *dst_ptl, *src_ptl;
1907 	struct page *src_page;
1908 	struct folio *src_folio;
1909 	pmd_t pmd;
1910 	pgtable_t pgtable = NULL;
1911 	int ret = -ENOMEM;
1912 
1913 	pmd = pmdp_get_lockless(src_pmd);
1914 	if (unlikely(pmd_present(pmd) && pmd_special(pmd) &&
1915 		     !is_huge_zero_pmd(pmd))) {
1916 		dst_ptl = pmd_lock(dst_mm, dst_pmd);
1917 		src_ptl = pmd_lockptr(src_mm, src_pmd);
1918 		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1919 		/*
1920 		 * No need to recheck the pmd, it can't change with write
1921 		 * mmap lock held here.
1922 		 *
1923 		 * Meanwhile, making sure it's not a CoW VMA with writable
1924 		 * mapping, otherwise it means either the anon page wrongly
1925 		 * applied special bit, or we made the PRIVATE mapping be
1926 		 * able to wrongly write to the backend MMIO.
1927 		 */
1928 		VM_WARN_ON_ONCE(is_cow_mapping(src_vma->vm_flags) && pmd_write(pmd));
1929 		goto set_pmd;
1930 	}
1931 
1932 	/* Skip if can be re-fill on fault */
1933 	if (!vma_is_anonymous(dst_vma))
1934 		return 0;
1935 
1936 	pgtable = pte_alloc_one(dst_mm);
1937 	if (unlikely(!pgtable))
1938 		goto out;
1939 
1940 	dst_ptl = pmd_lock(dst_mm, dst_pmd);
1941 	src_ptl = pmd_lockptr(src_mm, src_pmd);
1942 	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1943 
1944 	ret = -EAGAIN;
1945 	pmd = *src_pmd;
1946 
1947 	if (unlikely(thp_migration_supported() &&
1948 		     pmd_is_valid_softleaf(pmd))) {
1949 		copy_huge_non_present_pmd(dst_mm, src_mm, dst_pmd, src_pmd, addr,
1950 					  dst_vma, src_vma, pmd, pgtable);
1951 		ret = 0;
1952 		goto out_unlock;
1953 	}
1954 
1955 	if (unlikely(!pmd_trans_huge(pmd))) {
1956 		pte_free(dst_mm, pgtable);
1957 		goto out_unlock;
1958 	}
1959 	/*
1960 	 * When page table lock is held, the huge zero pmd should not be
1961 	 * under splitting since we don't split the page itself, only pmd to
1962 	 * a page table.
1963 	 */
1964 	if (is_huge_zero_pmd(pmd)) {
1965 		/*
1966 		 * mm_get_huge_zero_folio() will never allocate a new
1967 		 * folio here, since we already have a zero page to
1968 		 * copy. It just takes a reference.
1969 		 */
1970 		mm_get_huge_zero_folio(dst_mm);
1971 		goto out_zero_page;
1972 	}
1973 
1974 	src_page = pmd_page(pmd);
1975 	VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
1976 	src_folio = page_folio(src_page);
1977 
1978 	folio_get(src_folio);
1979 	if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, dst_vma, src_vma))) {
1980 		/* Page maybe pinned: split and retry the fault on PTEs. */
1981 		folio_put(src_folio);
1982 		pte_free(dst_mm, pgtable);
1983 		spin_unlock(src_ptl);
1984 		spin_unlock(dst_ptl);
1985 		__split_huge_pmd(src_vma, src_pmd, addr, false);
1986 		return -EAGAIN;
1987 	}
1988 	add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1989 out_zero_page:
1990 	mm_inc_nr_ptes(dst_mm);
1991 	pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1992 	pmdp_set_wrprotect(src_mm, addr, src_pmd);
1993 	if (!userfaultfd_wp(dst_vma))
1994 		pmd = pmd_clear_uffd_wp(pmd);
1995 	pmd = pmd_wrprotect(pmd);
1996 set_pmd:
1997 	pmd = pmd_mkold(pmd);
1998 	set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1999 
2000 	ret = 0;
2001 out_unlock:
2002 	spin_unlock(src_ptl);
2003 	spin_unlock(dst_ptl);
2004 out:
2005 	return ret;
2006 }
2007 
2008 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
2009 void touch_pud(struct vm_area_struct *vma, unsigned long addr,
2010 	       pud_t *pud, bool write)
2011 {
2012 	pud_t _pud;
2013 
2014 	_pud = pud_mkyoung(*pud);
2015 	if (write)
2016 		_pud = pud_mkdirty(_pud);
2017 	if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
2018 				  pud, _pud, write))
2019 		update_mmu_cache_pud(vma, addr, pud);
2020 }
2021 
2022 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
2023 		  pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
2024 		  struct vm_area_struct *vma)
2025 {
2026 	spinlock_t *dst_ptl, *src_ptl;
2027 	pud_t pud;
2028 	int ret;
2029 
2030 	dst_ptl = pud_lock(dst_mm, dst_pud);
2031 	src_ptl = pud_lockptr(src_mm, src_pud);
2032 	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
2033 
2034 	ret = -EAGAIN;
2035 	pud = *src_pud;
2036 	if (unlikely(!pud_trans_huge(pud)))
2037 		goto out_unlock;
2038 
2039 	/*
2040 	 * TODO: once we support anonymous pages, use
2041 	 * folio_try_dup_anon_rmap_*() and split if duplicating fails.
2042 	 */
2043 	if (is_cow_mapping(vma->vm_flags) && pud_write(pud)) {
2044 		pudp_set_wrprotect(src_mm, addr, src_pud);
2045 		pud = pud_wrprotect(pud);
2046 	}
2047 	pud = pud_mkold(pud);
2048 	set_pud_at(dst_mm, addr, dst_pud, pud);
2049 
2050 	ret = 0;
2051 out_unlock:
2052 	spin_unlock(src_ptl);
2053 	spin_unlock(dst_ptl);
2054 	return ret;
2055 }
2056 
2057 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
2058 {
2059 	bool write = vmf->flags & FAULT_FLAG_WRITE;
2060 
2061 	vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
2062 	if (unlikely(!pud_same(*vmf->pud, orig_pud)))
2063 		goto unlock;
2064 
2065 	touch_pud(vmf->vma, vmf->address, vmf->pud, write);
2066 unlock:
2067 	spin_unlock(vmf->ptl);
2068 }
2069 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
2070 
2071 bool huge_pmd_set_accessed(struct vm_fault *vmf)
2072 {
2073 	bool write = vmf->flags & FAULT_FLAG_WRITE;
2074 
2075 	if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd)))
2076 		return false;
2077 
2078 	return touch_pmd(vmf->vma, vmf->address, vmf->pmd, write);
2079 }
2080 
2081 static vm_fault_t do_huge_zero_wp_pmd(struct vm_fault *vmf)
2082 {
2083 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
2084 	struct vm_area_struct *vma = vmf->vma;
2085 	struct mmu_notifier_range range;
2086 	struct folio *folio;
2087 	vm_fault_t ret = 0;
2088 
2089 	folio = vma_alloc_anon_folio_pmd(vma, vmf->address);
2090 	if (unlikely(!folio))
2091 		return VM_FAULT_FALLBACK;
2092 
2093 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, haddr,
2094 				haddr + HPAGE_PMD_SIZE);
2095 	mmu_notifier_invalidate_range_start(&range);
2096 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
2097 	if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd)))
2098 		goto release;
2099 	ret = check_stable_address_space(vma->vm_mm);
2100 	if (ret)
2101 		goto release;
2102 	(void)pmdp_huge_clear_flush(vma, haddr, vmf->pmd);
2103 	map_anon_folio_pmd_pf(folio, vmf->pmd, vma, haddr);
2104 	goto unlock;
2105 release:
2106 	folio_put(folio);
2107 unlock:
2108 	spin_unlock(vmf->ptl);
2109 	mmu_notifier_invalidate_range_end(&range);
2110 	return ret;
2111 }
2112 
2113 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
2114 {
2115 	const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
2116 	struct vm_area_struct *vma = vmf->vma;
2117 	struct folio *folio;
2118 	struct page *page;
2119 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
2120 	pmd_t orig_pmd = vmf->orig_pmd;
2121 
2122 	vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
2123 	VM_BUG_ON_VMA(!vma->anon_vma, vma);
2124 
2125 	if (is_huge_zero_pmd(orig_pmd)) {
2126 		vm_fault_t ret = do_huge_zero_wp_pmd(vmf);
2127 
2128 		if (!(ret & VM_FAULT_FALLBACK))
2129 			return ret;
2130 
2131 		/* Fallback to splitting PMD if THP cannot be allocated */
2132 		goto fallback;
2133 	}
2134 
2135 	spin_lock(vmf->ptl);
2136 
2137 	if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
2138 		spin_unlock(vmf->ptl);
2139 		return 0;
2140 	}
2141 
2142 	page = pmd_page(orig_pmd);
2143 	folio = page_folio(page);
2144 	VM_BUG_ON_PAGE(!PageHead(page), page);
2145 
2146 	/* Early check when only holding the PT lock. */
2147 	if (PageAnonExclusive(page))
2148 		goto reuse;
2149 
2150 	if (!folio_trylock(folio)) {
2151 		folio_get(folio);
2152 		spin_unlock(vmf->ptl);
2153 		folio_lock(folio);
2154 		spin_lock(vmf->ptl);
2155 		if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
2156 			spin_unlock(vmf->ptl);
2157 			folio_unlock(folio);
2158 			folio_put(folio);
2159 			return 0;
2160 		}
2161 		folio_put(folio);
2162 	}
2163 
2164 	/* Recheck after temporarily dropping the PT lock. */
2165 	if (PageAnonExclusive(page)) {
2166 		folio_unlock(folio);
2167 		goto reuse;
2168 	}
2169 
2170 	/*
2171 	 * See do_wp_page(): we can only reuse the folio exclusively if
2172 	 * there are no additional references. Note that we always drain
2173 	 * the LRU cache immediately after adding a THP.
2174 	 */
2175 	if (folio_ref_count(folio) >
2176 			1 + folio_test_swapcache(folio) * folio_nr_pages(folio))
2177 		goto unlock_fallback;
2178 	if (folio_test_swapcache(folio))
2179 		folio_free_swap(folio);
2180 	if (folio_ref_count(folio) == 1) {
2181 		pmd_t entry;
2182 
2183 		folio_move_anon_rmap(folio, vma);
2184 		SetPageAnonExclusive(page);
2185 		folio_unlock(folio);
2186 reuse:
2187 		if (unlikely(unshare)) {
2188 			spin_unlock(vmf->ptl);
2189 			return 0;
2190 		}
2191 		entry = pmd_mkyoung(orig_pmd);
2192 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
2193 		if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
2194 			update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
2195 		spin_unlock(vmf->ptl);
2196 		return 0;
2197 	}
2198 
2199 unlock_fallback:
2200 	folio_unlock(folio);
2201 	spin_unlock(vmf->ptl);
2202 fallback:
2203 	__split_huge_pmd(vma, vmf->pmd, vmf->address, false);
2204 	return VM_FAULT_FALLBACK;
2205 }
2206 
2207 static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
2208 					   unsigned long addr, pmd_t pmd)
2209 {
2210 	struct page *page;
2211 
2212 	if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
2213 		return false;
2214 
2215 	/* Don't touch entries that are not even readable (NUMA hinting). */
2216 	if (pmd_protnone(pmd))
2217 		return false;
2218 
2219 	/* Do we need write faults for softdirty tracking? */
2220 	if (pmd_needs_soft_dirty_wp(vma, pmd))
2221 		return false;
2222 
2223 	/* Do we need write faults for uffd-wp tracking? */
2224 	if (userfaultfd_huge_pmd_wp(vma, pmd))
2225 		return false;
2226 
2227 	if (!(vma->vm_flags & VM_SHARED)) {
2228 		/* See can_change_pte_writable(). */
2229 		page = vm_normal_page_pmd(vma, addr, pmd);
2230 		return page && PageAnon(page) && PageAnonExclusive(page);
2231 	}
2232 
2233 	/* See can_change_pte_writable(). */
2234 	return pmd_dirty(pmd);
2235 }
2236 
2237 /* NUMA hinting page fault entry point for trans huge pmds */
2238 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
2239 {
2240 	struct vm_area_struct *vma = vmf->vma;
2241 	struct folio *folio;
2242 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
2243 	int nid = NUMA_NO_NODE;
2244 	int target_nid, last_cpupid;
2245 	pmd_t pmd, old_pmd;
2246 	bool writable = false;
2247 	int flags = 0;
2248 
2249 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
2250 	old_pmd = pmdp_get(vmf->pmd);
2251 
2252 	if (unlikely(!pmd_same(old_pmd, vmf->orig_pmd))) {
2253 		spin_unlock(vmf->ptl);
2254 		return 0;
2255 	}
2256 
2257 	pmd = pmd_modify(old_pmd, vma->vm_page_prot);
2258 
2259 	/*
2260 	 * Detect now whether the PMD could be writable; this information
2261 	 * is only valid while holding the PT lock.
2262 	 */
2263 	writable = pmd_write(pmd);
2264 	if (!writable && vma_wants_manual_pte_write_upgrade(vma) &&
2265 	    can_change_pmd_writable(vma, vmf->address, pmd))
2266 		writable = true;
2267 
2268 	folio = vm_normal_folio_pmd(vma, haddr, pmd);
2269 	if (!folio)
2270 		goto out_map;
2271 
2272 	nid = folio_nid(folio);
2273 
2274 	target_nid = numa_migrate_check(folio, vmf, haddr, &flags, writable,
2275 					&last_cpupid);
2276 	if (target_nid == NUMA_NO_NODE)
2277 		goto out_map;
2278 	if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
2279 		flags |= TNF_MIGRATE_FAIL;
2280 		goto out_map;
2281 	}
2282 	/* The folio is isolated and isolation code holds a folio reference. */
2283 	spin_unlock(vmf->ptl);
2284 	writable = false;
2285 
2286 	if (!migrate_misplaced_folio(folio, target_nid)) {
2287 		flags |= TNF_MIGRATED;
2288 		nid = target_nid;
2289 		task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
2290 		return 0;
2291 	}
2292 
2293 	flags |= TNF_MIGRATE_FAIL;
2294 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
2295 	if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) {
2296 		spin_unlock(vmf->ptl);
2297 		return 0;
2298 	}
2299 out_map:
2300 	/* Restore the PMD */
2301 	pmd = pmd_modify(pmdp_get(vmf->pmd), vma->vm_page_prot);
2302 	pmd = pmd_mkyoung(pmd);
2303 	if (writable)
2304 		pmd = pmd_mkwrite(pmd, vma);
2305 	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
2306 	update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
2307 	spin_unlock(vmf->ptl);
2308 
2309 	if (nid != NUMA_NO_NODE)
2310 		task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
2311 	return 0;
2312 }
2313 
2314 /*
2315  * Return true if we do MADV_FREE successfully on entire pmd page.
2316  * Otherwise, return false.
2317  */
2318 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
2319 		pmd_t *pmd, unsigned long addr, unsigned long next)
2320 {
2321 	spinlock_t *ptl;
2322 	pmd_t orig_pmd;
2323 	struct folio *folio;
2324 	struct mm_struct *mm = tlb->mm;
2325 	bool ret = false;
2326 
2327 	tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
2328 
2329 	ptl = pmd_trans_huge_lock(pmd, vma);
2330 	if (!ptl)
2331 		goto out_unlocked;
2332 
2333 	orig_pmd = *pmd;
2334 	if (is_huge_zero_pmd(orig_pmd))
2335 		goto out;
2336 
2337 	if (unlikely(!pmd_present(orig_pmd))) {
2338 		VM_BUG_ON(thp_migration_supported() &&
2339 				  !pmd_is_migration_entry(orig_pmd));
2340 		goto out;
2341 	}
2342 
2343 	folio = pmd_folio(orig_pmd);
2344 	/*
2345 	 * If other processes are mapping this folio, we couldn't discard
2346 	 * the folio unless they all do MADV_FREE so let's skip the folio.
2347 	 */
2348 	if (folio_maybe_mapped_shared(folio))
2349 		goto out;
2350 
2351 	if (!folio_trylock(folio))
2352 		goto out;
2353 
2354 	/*
2355 	 * If user want to discard part-pages of THP, split it so MADV_FREE
2356 	 * will deactivate only them.
2357 	 */
2358 	if (next - addr != HPAGE_PMD_SIZE) {
2359 		folio_get(folio);
2360 		spin_unlock(ptl);
2361 		split_folio(folio);
2362 		folio_unlock(folio);
2363 		folio_put(folio);
2364 		goto out_unlocked;
2365 	}
2366 
2367 	if (folio_test_dirty(folio))
2368 		folio_clear_dirty(folio);
2369 	folio_unlock(folio);
2370 
2371 	if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
2372 		pmdp_invalidate(vma, addr, pmd);
2373 		orig_pmd = pmd_mkold(orig_pmd);
2374 		orig_pmd = pmd_mkclean(orig_pmd);
2375 
2376 		set_pmd_at(mm, addr, pmd, orig_pmd);
2377 		tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
2378 	}
2379 
2380 	folio_mark_lazyfree(folio);
2381 	ret = true;
2382 out:
2383 	spin_unlock(ptl);
2384 out_unlocked:
2385 	return ret;
2386 }
2387 
2388 static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
2389 {
2390 	pgtable_t pgtable;
2391 
2392 	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2393 	pte_free(mm, pgtable);
2394 	mm_dec_nr_ptes(mm);
2395 }
2396 
2397 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
2398 		 pmd_t *pmd, unsigned long addr)
2399 {
2400 	pmd_t orig_pmd;
2401 	spinlock_t *ptl;
2402 
2403 	tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
2404 
2405 	ptl = __pmd_trans_huge_lock(pmd, vma);
2406 	if (!ptl)
2407 		return 0;
2408 	/*
2409 	 * For architectures like ppc64 we look at deposited pgtable
2410 	 * when calling pmdp_huge_get_and_clear. So do the
2411 	 * pgtable_trans_huge_withdraw after finishing pmdp related
2412 	 * operations.
2413 	 */
2414 	orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd,
2415 						tlb->fullmm);
2416 	arch_check_zapped_pmd(vma, orig_pmd);
2417 	tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
2418 	if (!vma_is_dax(vma) && vma_is_special_huge(vma)) {
2419 		if (arch_needs_pgtable_deposit())
2420 			zap_deposited_table(tlb->mm, pmd);
2421 		spin_unlock(ptl);
2422 	} else if (is_huge_zero_pmd(orig_pmd)) {
2423 		if (!vma_is_dax(vma) || arch_needs_pgtable_deposit())
2424 			zap_deposited_table(tlb->mm, pmd);
2425 		spin_unlock(ptl);
2426 	} else {
2427 		struct folio *folio = NULL;
2428 		int flush_needed = 1;
2429 
2430 		if (pmd_present(orig_pmd)) {
2431 			struct page *page = pmd_page(orig_pmd);
2432 
2433 			folio = page_folio(page);
2434 			folio_remove_rmap_pmd(folio, page, vma);
2435 			WARN_ON_ONCE(folio_mapcount(folio) < 0);
2436 			VM_BUG_ON_PAGE(!PageHead(page), page);
2437 		} else if (pmd_is_valid_softleaf(orig_pmd)) {
2438 			const softleaf_t entry = softleaf_from_pmd(orig_pmd);
2439 
2440 			folio = softleaf_to_folio(entry);
2441 			flush_needed = 0;
2442 
2443 			if (!thp_migration_supported())
2444 				WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
2445 		}
2446 
2447 		if (folio_test_anon(folio)) {
2448 			zap_deposited_table(tlb->mm, pmd);
2449 			add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
2450 		} else {
2451 			if (arch_needs_pgtable_deposit())
2452 				zap_deposited_table(tlb->mm, pmd);
2453 			add_mm_counter(tlb->mm, mm_counter_file(folio),
2454 				       -HPAGE_PMD_NR);
2455 
2456 			/*
2457 			 * Use flush_needed to indicate whether the PMD entry
2458 			 * is present, instead of checking pmd_present() again.
2459 			 */
2460 			if (flush_needed && pmd_young(orig_pmd) &&
2461 			    likely(vma_has_recency(vma)))
2462 				folio_mark_accessed(folio);
2463 		}
2464 
2465 		if (folio_is_device_private(folio)) {
2466 			folio_remove_rmap_pmd(folio, &folio->page, vma);
2467 			WARN_ON_ONCE(folio_mapcount(folio) < 0);
2468 			folio_put(folio);
2469 		}
2470 
2471 		spin_unlock(ptl);
2472 		if (flush_needed)
2473 			tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE);
2474 	}
2475 	return 1;
2476 }
2477 
2478 #ifndef pmd_move_must_withdraw
2479 static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
2480 					 spinlock_t *old_pmd_ptl,
2481 					 struct vm_area_struct *vma)
2482 {
2483 	/*
2484 	 * With split pmd lock we also need to move preallocated
2485 	 * PTE page table if new_pmd is on different PMD page table.
2486 	 *
2487 	 * We also don't deposit and withdraw tables for file pages.
2488 	 */
2489 	return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
2490 }
2491 #endif
2492 
2493 static pmd_t move_soft_dirty_pmd(pmd_t pmd)
2494 {
2495 	if (pgtable_supports_soft_dirty()) {
2496 		if (unlikely(pmd_is_migration_entry(pmd)))
2497 			pmd = pmd_swp_mksoft_dirty(pmd);
2498 		else if (pmd_present(pmd))
2499 			pmd = pmd_mksoft_dirty(pmd);
2500 	}
2501 
2502 	return pmd;
2503 }
2504 
2505 static pmd_t clear_uffd_wp_pmd(pmd_t pmd)
2506 {
2507 	if (pmd_none(pmd))
2508 		return pmd;
2509 	if (pmd_present(pmd))
2510 		pmd = pmd_clear_uffd_wp(pmd);
2511 	else
2512 		pmd = pmd_swp_clear_uffd_wp(pmd);
2513 
2514 	return pmd;
2515 }
2516 
2517 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
2518 		  unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
2519 {
2520 	spinlock_t *old_ptl, *new_ptl;
2521 	pmd_t pmd;
2522 	struct mm_struct *mm = vma->vm_mm;
2523 	bool force_flush = false;
2524 
2525 	/*
2526 	 * The destination pmd shouldn't be established, free_pgtables()
2527 	 * should have released it; but move_page_tables() might have already
2528 	 * inserted a page table, if racing against shmem/file collapse.
2529 	 */
2530 	if (!pmd_none(*new_pmd)) {
2531 		VM_BUG_ON(pmd_trans_huge(*new_pmd));
2532 		return false;
2533 	}
2534 
2535 	/*
2536 	 * We don't have to worry about the ordering of src and dst
2537 	 * ptlocks because exclusive mmap_lock prevents deadlock.
2538 	 */
2539 	old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
2540 	if (old_ptl) {
2541 		new_ptl = pmd_lockptr(mm, new_pmd);
2542 		if (new_ptl != old_ptl)
2543 			spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
2544 		pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
2545 		if (pmd_present(pmd))
2546 			force_flush = true;
2547 		VM_BUG_ON(!pmd_none(*new_pmd));
2548 
2549 		if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) {
2550 			pgtable_t pgtable;
2551 			pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
2552 			pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
2553 		}
2554 		pmd = move_soft_dirty_pmd(pmd);
2555 		if (vma_has_uffd_without_event_remap(vma))
2556 			pmd = clear_uffd_wp_pmd(pmd);
2557 		set_pmd_at(mm, new_addr, new_pmd, pmd);
2558 		if (force_flush)
2559 			flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
2560 		if (new_ptl != old_ptl)
2561 			spin_unlock(new_ptl);
2562 		spin_unlock(old_ptl);
2563 		return true;
2564 	}
2565 	return false;
2566 }
2567 
2568 static void change_non_present_huge_pmd(struct mm_struct *mm,
2569 		unsigned long addr, pmd_t *pmd, bool uffd_wp,
2570 		bool uffd_wp_resolve)
2571 {
2572 	softleaf_t entry = softleaf_from_pmd(*pmd);
2573 	const struct folio *folio = softleaf_to_folio(entry);
2574 	pmd_t newpmd;
2575 
2576 	VM_WARN_ON(!pmd_is_valid_softleaf(*pmd));
2577 	if (softleaf_is_migration_write(entry)) {
2578 		/*
2579 		 * A protection check is difficult so
2580 		 * just be safe and disable write
2581 		 */
2582 		if (folio_test_anon(folio))
2583 			entry = make_readable_exclusive_migration_entry(swp_offset(entry));
2584 		else
2585 			entry = make_readable_migration_entry(swp_offset(entry));
2586 		newpmd = swp_entry_to_pmd(entry);
2587 		if (pmd_swp_soft_dirty(*pmd))
2588 			newpmd = pmd_swp_mksoft_dirty(newpmd);
2589 	} else if (softleaf_is_device_private_write(entry)) {
2590 		entry = make_readable_device_private_entry(swp_offset(entry));
2591 		newpmd = swp_entry_to_pmd(entry);
2592 	} else {
2593 		newpmd = *pmd;
2594 	}
2595 
2596 	if (uffd_wp)
2597 		newpmd = pmd_swp_mkuffd_wp(newpmd);
2598 	else if (uffd_wp_resolve)
2599 		newpmd = pmd_swp_clear_uffd_wp(newpmd);
2600 	if (!pmd_same(*pmd, newpmd))
2601 		set_pmd_at(mm, addr, pmd, newpmd);
2602 }
2603 
2604 /*
2605  * Returns
2606  *  - 0 if PMD could not be locked
2607  *  - 1 if PMD was locked but protections unchanged and TLB flush unnecessary
2608  *      or if prot_numa but THP migration is not supported
2609  *  - HPAGE_PMD_NR if protections changed and TLB flush necessary
2610  */
2611 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
2612 		    pmd_t *pmd, unsigned long addr, pgprot_t newprot,
2613 		    unsigned long cp_flags)
2614 {
2615 	struct mm_struct *mm = vma->vm_mm;
2616 	spinlock_t *ptl;
2617 	pmd_t oldpmd, entry;
2618 	bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
2619 	bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
2620 	bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
2621 	int ret = 1;
2622 
2623 	tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
2624 
2625 	if (prot_numa && !thp_migration_supported())
2626 		return 1;
2627 
2628 	ptl = __pmd_trans_huge_lock(pmd, vma);
2629 	if (!ptl)
2630 		return 0;
2631 
2632 	if (thp_migration_supported() && pmd_is_valid_softleaf(*pmd)) {
2633 		change_non_present_huge_pmd(mm, addr, pmd, uffd_wp,
2634 					    uffd_wp_resolve);
2635 		goto unlock;
2636 	}
2637 
2638 	if (prot_numa) {
2639 
2640 		/*
2641 		 * Avoid trapping faults against the zero page. The read-only
2642 		 * data is likely to be read-cached on the local CPU and
2643 		 * local/remote hits to the zero page are not interesting.
2644 		 */
2645 		if (is_huge_zero_pmd(*pmd))
2646 			goto unlock;
2647 
2648 		if (pmd_protnone(*pmd))
2649 			goto unlock;
2650 
2651 		if (!folio_can_map_prot_numa(pmd_folio(*pmd), vma,
2652 					     vma_is_single_threaded_private(vma)))
2653 			goto unlock;
2654 	}
2655 	/*
2656 	 * In case prot_numa, we are under mmap_read_lock(mm). It's critical
2657 	 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
2658 	 * which is also under mmap_read_lock(mm):
2659 	 *
2660 	 *	CPU0:				CPU1:
2661 	 *				change_huge_pmd(prot_numa=1)
2662 	 *				 pmdp_huge_get_and_clear_notify()
2663 	 * madvise_dontneed()
2664 	 *  zap_pmd_range()
2665 	 *   pmd_trans_huge(*pmd) == 0 (without ptl)
2666 	 *   // skip the pmd
2667 	 *				 set_pmd_at();
2668 	 *				 // pmd is re-established
2669 	 *
2670 	 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
2671 	 * which may break userspace.
2672 	 *
2673 	 * pmdp_invalidate_ad() is required to make sure we don't miss
2674 	 * dirty/young flags set by hardware.
2675 	 */
2676 	oldpmd = pmdp_invalidate_ad(vma, addr, pmd);
2677 
2678 	entry = pmd_modify(oldpmd, newprot);
2679 	if (uffd_wp)
2680 		entry = pmd_mkuffd_wp(entry);
2681 	else if (uffd_wp_resolve)
2682 		/*
2683 		 * Leave the write bit to be handled by PF interrupt
2684 		 * handler, then things like COW could be properly
2685 		 * handled.
2686 		 */
2687 		entry = pmd_clear_uffd_wp(entry);
2688 
2689 	/* See change_pte_range(). */
2690 	if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && !pmd_write(entry) &&
2691 	    can_change_pmd_writable(vma, addr, entry))
2692 		entry = pmd_mkwrite(entry, vma);
2693 
2694 	ret = HPAGE_PMD_NR;
2695 	set_pmd_at(mm, addr, pmd, entry);
2696 
2697 	if (huge_pmd_needs_flush(oldpmd, entry))
2698 		tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE);
2699 unlock:
2700 	spin_unlock(ptl);
2701 	return ret;
2702 }
2703 
2704 /*
2705  * Returns:
2706  *
2707  * - 0: if pud leaf changed from under us
2708  * - 1: if pud can be skipped
2709  * - HPAGE_PUD_NR: if pud was successfully processed
2710  */
2711 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
2712 int change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
2713 		    pud_t *pudp, unsigned long addr, pgprot_t newprot,
2714 		    unsigned long cp_flags)
2715 {
2716 	struct mm_struct *mm = vma->vm_mm;
2717 	pud_t oldpud, entry;
2718 	spinlock_t *ptl;
2719 
2720 	tlb_change_page_size(tlb, HPAGE_PUD_SIZE);
2721 
2722 	/* NUMA balancing doesn't apply to dax */
2723 	if (cp_flags & MM_CP_PROT_NUMA)
2724 		return 1;
2725 
2726 	/*
2727 	 * Huge entries on userfault-wp only works with anonymous, while we
2728 	 * don't have anonymous PUDs yet.
2729 	 */
2730 	if (WARN_ON_ONCE(cp_flags & MM_CP_UFFD_WP_ALL))
2731 		return 1;
2732 
2733 	ptl = __pud_trans_huge_lock(pudp, vma);
2734 	if (!ptl)
2735 		return 0;
2736 
2737 	/*
2738 	 * Can't clear PUD or it can race with concurrent zapping.  See
2739 	 * change_huge_pmd().
2740 	 */
2741 	oldpud = pudp_invalidate(vma, addr, pudp);
2742 	entry = pud_modify(oldpud, newprot);
2743 	set_pud_at(mm, addr, pudp, entry);
2744 	tlb_flush_pud_range(tlb, addr, HPAGE_PUD_SIZE);
2745 
2746 	spin_unlock(ptl);
2747 	return HPAGE_PUD_NR;
2748 }
2749 #endif
2750 
2751 #ifdef CONFIG_USERFAULTFD
2752 /*
2753  * The PT lock for src_pmd and dst_vma/src_vma (for reading) are locked by
2754  * the caller, but it must return after releasing the page_table_lock.
2755  * Just move the page from src_pmd to dst_pmd if possible.
2756  * Return zero if succeeded in moving the page, -EAGAIN if it needs to be
2757  * repeated by the caller, or other errors in case of failure.
2758  */
2759 int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval,
2760 			struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
2761 			unsigned long dst_addr, unsigned long src_addr)
2762 {
2763 	pmd_t _dst_pmd, src_pmdval;
2764 	struct page *src_page;
2765 	struct folio *src_folio;
2766 	spinlock_t *src_ptl, *dst_ptl;
2767 	pgtable_t src_pgtable;
2768 	struct mmu_notifier_range range;
2769 	int err = 0;
2770 
2771 	src_pmdval = *src_pmd;
2772 	src_ptl = pmd_lockptr(mm, src_pmd);
2773 
2774 	lockdep_assert_held(src_ptl);
2775 	vma_assert_locked(src_vma);
2776 	vma_assert_locked(dst_vma);
2777 
2778 	/* Sanity checks before the operation */
2779 	if (WARN_ON_ONCE(!pmd_none(dst_pmdval)) || WARN_ON_ONCE(src_addr & ~HPAGE_PMD_MASK) ||
2780 	    WARN_ON_ONCE(dst_addr & ~HPAGE_PMD_MASK)) {
2781 		spin_unlock(src_ptl);
2782 		return -EINVAL;
2783 	}
2784 
2785 	if (!pmd_trans_huge(src_pmdval)) {
2786 		spin_unlock(src_ptl);
2787 		if (pmd_is_migration_entry(src_pmdval)) {
2788 			pmd_migration_entry_wait(mm, &src_pmdval);
2789 			return -EAGAIN;
2790 		}
2791 		return -ENOENT;
2792 	}
2793 
2794 	src_page = pmd_page(src_pmdval);
2795 
2796 	if (!is_huge_zero_pmd(src_pmdval)) {
2797 		if (unlikely(!PageAnonExclusive(src_page))) {
2798 			spin_unlock(src_ptl);
2799 			return -EBUSY;
2800 		}
2801 
2802 		src_folio = page_folio(src_page);
2803 		folio_get(src_folio);
2804 	} else
2805 		src_folio = NULL;
2806 
2807 	spin_unlock(src_ptl);
2808 
2809 	flush_cache_range(src_vma, src_addr, src_addr + HPAGE_PMD_SIZE);
2810 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, src_addr,
2811 				src_addr + HPAGE_PMD_SIZE);
2812 	mmu_notifier_invalidate_range_start(&range);
2813 
2814 	if (src_folio)
2815 		folio_lock(src_folio);
2816 
2817 	dst_ptl = pmd_lockptr(mm, dst_pmd);
2818 	double_pt_lock(src_ptl, dst_ptl);
2819 	if (unlikely(!pmd_same(*src_pmd, src_pmdval) ||
2820 		     !pmd_same(*dst_pmd, dst_pmdval))) {
2821 		err = -EAGAIN;
2822 		goto unlock_ptls;
2823 	}
2824 	if (src_folio) {
2825 		if (folio_maybe_dma_pinned(src_folio) ||
2826 		    !PageAnonExclusive(&src_folio->page)) {
2827 			err = -EBUSY;
2828 			goto unlock_ptls;
2829 		}
2830 
2831 		if (WARN_ON_ONCE(!folio_test_head(src_folio)) ||
2832 		    WARN_ON_ONCE(!folio_test_anon(src_folio))) {
2833 			err = -EBUSY;
2834 			goto unlock_ptls;
2835 		}
2836 
2837 		src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
2838 		/* Folio got pinned from under us. Put it back and fail the move. */
2839 		if (folio_maybe_dma_pinned(src_folio)) {
2840 			set_pmd_at(mm, src_addr, src_pmd, src_pmdval);
2841 			err = -EBUSY;
2842 			goto unlock_ptls;
2843 		}
2844 
2845 		folio_move_anon_rmap(src_folio, dst_vma);
2846 		src_folio->index = linear_page_index(dst_vma, dst_addr);
2847 
2848 		_dst_pmd = folio_mk_pmd(src_folio, dst_vma->vm_page_prot);
2849 		/* Follow mremap() behavior and treat the entry dirty after the move */
2850 		_dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
2851 	} else {
2852 		src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
2853 		_dst_pmd = move_soft_dirty_pmd(src_pmdval);
2854 		_dst_pmd = clear_uffd_wp_pmd(_dst_pmd);
2855 	}
2856 	set_pmd_at(mm, dst_addr, dst_pmd, _dst_pmd);
2857 
2858 	src_pgtable = pgtable_trans_huge_withdraw(mm, src_pmd);
2859 	pgtable_trans_huge_deposit(mm, dst_pmd, src_pgtable);
2860 unlock_ptls:
2861 	double_pt_unlock(src_ptl, dst_ptl);
2862 	/* unblock rmap walks */
2863 	if (src_folio)
2864 		folio_unlock(src_folio);
2865 	mmu_notifier_invalidate_range_end(&range);
2866 	if (src_folio)
2867 		folio_put(src_folio);
2868 	return err;
2869 }
2870 #endif /* CONFIG_USERFAULTFD */
2871 
2872 /*
2873  * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
2874  *
2875  * Note that if it returns page table lock pointer, this routine returns without
2876  * unlocking page table lock. So callers must unlock it.
2877  */
2878 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
2879 {
2880 	spinlock_t *ptl;
2881 
2882 	ptl = pmd_lock(vma->vm_mm, pmd);
2883 	if (likely(pmd_is_huge(*pmd)))
2884 		return ptl;
2885 	spin_unlock(ptl);
2886 	return NULL;
2887 }
2888 
2889 /*
2890  * Returns page table lock pointer if a given pud maps a thp, NULL otherwise.
2891  *
2892  * Note that if it returns page table lock pointer, this routine returns without
2893  * unlocking page table lock. So callers must unlock it.
2894  */
2895 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
2896 {
2897 	spinlock_t *ptl;
2898 
2899 	ptl = pud_lock(vma->vm_mm, pud);
2900 	if (likely(pud_trans_huge(*pud)))
2901 		return ptl;
2902 	spin_unlock(ptl);
2903 	return NULL;
2904 }
2905 
2906 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
2907 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
2908 		 pud_t *pud, unsigned long addr)
2909 {
2910 	spinlock_t *ptl;
2911 	pud_t orig_pud;
2912 
2913 	ptl = __pud_trans_huge_lock(pud, vma);
2914 	if (!ptl)
2915 		return 0;
2916 
2917 	orig_pud = pudp_huge_get_and_clear_full(vma, addr, pud, tlb->fullmm);
2918 	arch_check_zapped_pud(vma, orig_pud);
2919 	tlb_remove_pud_tlb_entry(tlb, pud, addr);
2920 	if (!vma_is_dax(vma) && vma_is_special_huge(vma)) {
2921 		spin_unlock(ptl);
2922 		/* No zero page support yet */
2923 	} else {
2924 		struct page *page = NULL;
2925 		struct folio *folio;
2926 
2927 		/* No support for anonymous PUD pages or migration yet */
2928 		VM_WARN_ON_ONCE(vma_is_anonymous(vma) ||
2929 				!pud_present(orig_pud));
2930 
2931 		page = pud_page(orig_pud);
2932 		folio = page_folio(page);
2933 		folio_remove_rmap_pud(folio, page, vma);
2934 		add_mm_counter(tlb->mm, mm_counter_file(folio), -HPAGE_PUD_NR);
2935 
2936 		spin_unlock(ptl);
2937 		tlb_remove_page_size(tlb, page, HPAGE_PUD_SIZE);
2938 	}
2939 	return 1;
2940 }
2941 
2942 static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
2943 		unsigned long haddr)
2944 {
2945 	struct folio *folio;
2946 	struct page *page;
2947 	pud_t old_pud;
2948 
2949 	VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
2950 	VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2951 	VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
2952 	VM_BUG_ON(!pud_trans_huge(*pud));
2953 
2954 	count_vm_event(THP_SPLIT_PUD);
2955 
2956 	old_pud = pudp_huge_clear_flush(vma, haddr, pud);
2957 
2958 	if (!vma_is_dax(vma))
2959 		return;
2960 
2961 	page = pud_page(old_pud);
2962 	folio = page_folio(page);
2963 
2964 	if (!folio_test_dirty(folio) && pud_dirty(old_pud))
2965 		folio_mark_dirty(folio);
2966 	if (!folio_test_referenced(folio) && pud_young(old_pud))
2967 		folio_set_referenced(folio);
2968 	folio_remove_rmap_pud(folio, page, vma);
2969 	folio_put(folio);
2970 	add_mm_counter(vma->vm_mm, mm_counter_file(folio),
2971 		-HPAGE_PUD_NR);
2972 }
2973 
2974 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
2975 		unsigned long address)
2976 {
2977 	spinlock_t *ptl;
2978 	struct mmu_notifier_range range;
2979 
2980 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2981 				address & HPAGE_PUD_MASK,
2982 				(address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
2983 	mmu_notifier_invalidate_range_start(&range);
2984 	ptl = pud_lock(vma->vm_mm, pud);
2985 	if (unlikely(!pud_trans_huge(*pud)))
2986 		goto out;
2987 	__split_huge_pud_locked(vma, pud, range.start);
2988 
2989 out:
2990 	spin_unlock(ptl);
2991 	mmu_notifier_invalidate_range_end(&range);
2992 }
2993 #else
2994 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
2995 		unsigned long address)
2996 {
2997 }
2998 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
2999 
3000 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
3001 		unsigned long haddr, pmd_t *pmd)
3002 {
3003 	struct mm_struct *mm = vma->vm_mm;
3004 	pgtable_t pgtable;
3005 	pmd_t _pmd, old_pmd;
3006 	unsigned long addr;
3007 	pte_t *pte;
3008 	int i;
3009 
3010 	/*
3011 	 * Leave pmd empty until pte is filled note that it is fine to delay
3012 	 * notification until mmu_notifier_invalidate_range_end() as we are
3013 	 * replacing a zero pmd write protected page with a zero pte write
3014 	 * protected page.
3015 	 *
3016 	 * See Documentation/mm/mmu_notifier.rst
3017 	 */
3018 	old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
3019 
3020 	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
3021 	pmd_populate(mm, &_pmd, pgtable);
3022 
3023 	pte = pte_offset_map(&_pmd, haddr);
3024 	VM_BUG_ON(!pte);
3025 	for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
3026 		pte_t entry;
3027 
3028 		entry = pfn_pte(zero_pfn(addr), vma->vm_page_prot);
3029 		entry = pte_mkspecial(entry);
3030 		if (pmd_uffd_wp(old_pmd))
3031 			entry = pte_mkuffd_wp(entry);
3032 		VM_BUG_ON(!pte_none(ptep_get(pte)));
3033 		set_pte_at(mm, addr, pte, entry);
3034 		pte++;
3035 	}
3036 	pte_unmap(pte - 1);
3037 	smp_wmb(); /* make pte visible before pmd */
3038 	pmd_populate(mm, pmd, pgtable);
3039 }
3040 
3041 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
3042 		unsigned long haddr, bool freeze)
3043 {
3044 	struct mm_struct *mm = vma->vm_mm;
3045 	struct folio *folio;
3046 	struct page *page;
3047 	pgtable_t pgtable;
3048 	pmd_t old_pmd, _pmd;
3049 	bool soft_dirty, uffd_wp = false, young = false, write = false;
3050 	bool anon_exclusive = false, dirty = false;
3051 	unsigned long addr;
3052 	pte_t *pte;
3053 	int i;
3054 
3055 	VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
3056 	VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
3057 	VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
3058 
3059 	VM_WARN_ON_ONCE(!pmd_is_valid_softleaf(*pmd) && !pmd_trans_huge(*pmd));
3060 
3061 	count_vm_event(THP_SPLIT_PMD);
3062 
3063 	if (!vma_is_anonymous(vma)) {
3064 		old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
3065 		/*
3066 		 * We are going to unmap this huge page. So
3067 		 * just go ahead and zap it
3068 		 */
3069 		if (arch_needs_pgtable_deposit())
3070 			zap_deposited_table(mm, pmd);
3071 		if (!vma_is_dax(vma) && vma_is_special_huge(vma))
3072 			return;
3073 		if (unlikely(pmd_is_migration_entry(old_pmd))) {
3074 			const softleaf_t old_entry = softleaf_from_pmd(old_pmd);
3075 
3076 			folio = softleaf_to_folio(old_entry);
3077 		} else if (is_huge_zero_pmd(old_pmd)) {
3078 			return;
3079 		} else {
3080 			page = pmd_page(old_pmd);
3081 			folio = page_folio(page);
3082 			if (!folio_test_dirty(folio) && pmd_dirty(old_pmd))
3083 				folio_mark_dirty(folio);
3084 			if (!folio_test_referenced(folio) && pmd_young(old_pmd))
3085 				folio_set_referenced(folio);
3086 			folio_remove_rmap_pmd(folio, page, vma);
3087 			folio_put(folio);
3088 		}
3089 		add_mm_counter(mm, mm_counter_file(folio), -HPAGE_PMD_NR);
3090 		return;
3091 	}
3092 
3093 	if (is_huge_zero_pmd(*pmd)) {
3094 		/*
3095 		 * FIXME: Do we want to invalidate secondary mmu by calling
3096 		 * mmu_notifier_arch_invalidate_secondary_tlbs() see comments below
3097 		 * inside __split_huge_pmd() ?
3098 		 *
3099 		 * We are going from a zero huge page write protected to zero
3100 		 * small page also write protected so it does not seems useful
3101 		 * to invalidate secondary mmu at this time.
3102 		 */
3103 		return __split_huge_zero_page_pmd(vma, haddr, pmd);
3104 	}
3105 
3106 	if (pmd_is_migration_entry(*pmd)) {
3107 		softleaf_t entry;
3108 
3109 		old_pmd = *pmd;
3110 		entry = softleaf_from_pmd(old_pmd);
3111 		page = softleaf_to_page(entry);
3112 		folio = page_folio(page);
3113 
3114 		soft_dirty = pmd_swp_soft_dirty(old_pmd);
3115 		uffd_wp = pmd_swp_uffd_wp(old_pmd);
3116 
3117 		write = softleaf_is_migration_write(entry);
3118 		if (PageAnon(page))
3119 			anon_exclusive = softleaf_is_migration_read_exclusive(entry);
3120 		young = softleaf_is_migration_young(entry);
3121 		dirty = softleaf_is_migration_dirty(entry);
3122 	} else if (pmd_is_device_private_entry(*pmd)) {
3123 		softleaf_t entry;
3124 
3125 		old_pmd = *pmd;
3126 		entry = softleaf_from_pmd(old_pmd);
3127 		page = softleaf_to_page(entry);
3128 		folio = page_folio(page);
3129 
3130 		soft_dirty = pmd_swp_soft_dirty(old_pmd);
3131 		uffd_wp = pmd_swp_uffd_wp(old_pmd);
3132 
3133 		write = softleaf_is_device_private_write(entry);
3134 		anon_exclusive = PageAnonExclusive(page);
3135 
3136 		/*
3137 		 * Device private THP should be treated the same as regular
3138 		 * folios w.r.t anon exclusive handling. See the comments for
3139 		 * folio handling and anon_exclusive below.
3140 		 */
3141 		if (freeze && anon_exclusive &&
3142 		    folio_try_share_anon_rmap_pmd(folio, page))
3143 			freeze = false;
3144 		if (!freeze) {
3145 			rmap_t rmap_flags = RMAP_NONE;
3146 
3147 			folio_ref_add(folio, HPAGE_PMD_NR - 1);
3148 			if (anon_exclusive)
3149 				rmap_flags |= RMAP_EXCLUSIVE;
3150 
3151 			folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR,
3152 						 vma, haddr, rmap_flags);
3153 		}
3154 	} else {
3155 		/*
3156 		 * Up to this point the pmd is present and huge and userland has
3157 		 * the whole access to the hugepage during the split (which
3158 		 * happens in place). If we overwrite the pmd with the not-huge
3159 		 * version pointing to the pte here (which of course we could if
3160 		 * all CPUs were bug free), userland could trigger a small page
3161 		 * size TLB miss on the small sized TLB while the hugepage TLB
3162 		 * entry is still established in the huge TLB. Some CPU doesn't
3163 		 * like that. See
3164 		 * http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
3165 		 * 383 on page 105. Intel should be safe but is also warns that
3166 		 * it's only safe if the permission and cache attributes of the
3167 		 * two entries loaded in the two TLB is identical (which should
3168 		 * be the case here). But it is generally safer to never allow
3169 		 * small and huge TLB entries for the same virtual address to be
3170 		 * loaded simultaneously. So instead of doing "pmd_populate();
3171 		 * flush_pmd_tlb_range();" we first mark the current pmd
3172 		 * notpresent (atomically because here the pmd_trans_huge must
3173 		 * remain set at all times on the pmd until the split is
3174 		 * complete for this pmd), then we flush the SMP TLB and finally
3175 		 * we write the non-huge version of the pmd entry with
3176 		 * pmd_populate.
3177 		 */
3178 		old_pmd = pmdp_invalidate(vma, haddr, pmd);
3179 		page = pmd_page(old_pmd);
3180 		folio = page_folio(page);
3181 		if (pmd_dirty(old_pmd)) {
3182 			dirty = true;
3183 			folio_set_dirty(folio);
3184 		}
3185 		write = pmd_write(old_pmd);
3186 		young = pmd_young(old_pmd);
3187 		soft_dirty = pmd_soft_dirty(old_pmd);
3188 		uffd_wp = pmd_uffd_wp(old_pmd);
3189 
3190 		VM_WARN_ON_FOLIO(!folio_ref_count(folio), folio);
3191 		VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
3192 
3193 		/*
3194 		 * Without "freeze", we'll simply split the PMD, propagating the
3195 		 * PageAnonExclusive() flag for each PTE by setting it for
3196 		 * each subpage -- no need to (temporarily) clear.
3197 		 *
3198 		 * With "freeze" we want to replace mapped pages by
3199 		 * migration entries right away. This is only possible if we
3200 		 * managed to clear PageAnonExclusive() -- see
3201 		 * set_pmd_migration_entry().
3202 		 *
3203 		 * In case we cannot clear PageAnonExclusive(), split the PMD
3204 		 * only and let try_to_migrate_one() fail later.
3205 		 *
3206 		 * See folio_try_share_anon_rmap_pmd(): invalidate PMD first.
3207 		 */
3208 		anon_exclusive = PageAnonExclusive(page);
3209 		if (freeze && anon_exclusive &&
3210 		    folio_try_share_anon_rmap_pmd(folio, page))
3211 			freeze = false;
3212 		if (!freeze) {
3213 			rmap_t rmap_flags = RMAP_NONE;
3214 
3215 			folio_ref_add(folio, HPAGE_PMD_NR - 1);
3216 			if (anon_exclusive)
3217 				rmap_flags |= RMAP_EXCLUSIVE;
3218 			folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR,
3219 						 vma, haddr, rmap_flags);
3220 		}
3221 	}
3222 
3223 	/*
3224 	 * Withdraw the table only after we mark the pmd entry invalid.
3225 	 * This's critical for some architectures (Power).
3226 	 */
3227 	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
3228 	pmd_populate(mm, &_pmd, pgtable);
3229 
3230 	pte = pte_offset_map(&_pmd, haddr);
3231 	VM_BUG_ON(!pte);
3232 
3233 	/*
3234 	 * Note that NUMA hinting access restrictions are not transferred to
3235 	 * avoid any possibility of altering permissions across VMAs.
3236 	 */
3237 	if (freeze || pmd_is_migration_entry(old_pmd)) {
3238 		pte_t entry;
3239 		swp_entry_t swp_entry;
3240 
3241 		for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
3242 			if (write)
3243 				swp_entry = make_writable_migration_entry(
3244 							page_to_pfn(page + i));
3245 			else if (anon_exclusive)
3246 				swp_entry = make_readable_exclusive_migration_entry(
3247 							page_to_pfn(page + i));
3248 			else
3249 				swp_entry = make_readable_migration_entry(
3250 							page_to_pfn(page + i));
3251 			if (young)
3252 				swp_entry = make_migration_entry_young(swp_entry);
3253 			if (dirty)
3254 				swp_entry = make_migration_entry_dirty(swp_entry);
3255 			entry = swp_entry_to_pte(swp_entry);
3256 			if (soft_dirty)
3257 				entry = pte_swp_mksoft_dirty(entry);
3258 			if (uffd_wp)
3259 				entry = pte_swp_mkuffd_wp(entry);
3260 			VM_WARN_ON(!pte_none(ptep_get(pte + i)));
3261 			set_pte_at(mm, addr, pte + i, entry);
3262 		}
3263 	} else if (pmd_is_device_private_entry(old_pmd)) {
3264 		pte_t entry;
3265 		swp_entry_t swp_entry;
3266 
3267 		for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
3268 			/*
3269 			 * anon_exclusive was already propagated to the relevant
3270 			 * pages corresponding to the pte entries when freeze
3271 			 * is false.
3272 			 */
3273 			if (write)
3274 				swp_entry = make_writable_device_private_entry(
3275 							page_to_pfn(page + i));
3276 			else
3277 				swp_entry = make_readable_device_private_entry(
3278 							page_to_pfn(page + i));
3279 			/*
3280 			 * Young and dirty bits are not progated via swp_entry
3281 			 */
3282 			entry = swp_entry_to_pte(swp_entry);
3283 			if (soft_dirty)
3284 				entry = pte_swp_mksoft_dirty(entry);
3285 			if (uffd_wp)
3286 				entry = pte_swp_mkuffd_wp(entry);
3287 			VM_WARN_ON(!pte_none(ptep_get(pte + i)));
3288 			set_pte_at(mm, addr, pte + i, entry);
3289 		}
3290 	} else {
3291 		pte_t entry;
3292 
3293 		entry = mk_pte(page, READ_ONCE(vma->vm_page_prot));
3294 		if (write)
3295 			entry = pte_mkwrite(entry, vma);
3296 		if (!young)
3297 			entry = pte_mkold(entry);
3298 		/* NOTE: this may set soft-dirty too on some archs */
3299 		if (dirty)
3300 			entry = pte_mkdirty(entry);
3301 		if (soft_dirty)
3302 			entry = pte_mksoft_dirty(entry);
3303 		if (uffd_wp)
3304 			entry = pte_mkuffd_wp(entry);
3305 
3306 		for (i = 0; i < HPAGE_PMD_NR; i++)
3307 			VM_WARN_ON(!pte_none(ptep_get(pte + i)));
3308 
3309 		set_ptes(mm, haddr, pte, entry, HPAGE_PMD_NR);
3310 	}
3311 	pte_unmap(pte);
3312 
3313 	if (!pmd_is_migration_entry(*pmd))
3314 		folio_remove_rmap_pmd(folio, page, vma);
3315 	if (freeze)
3316 		put_page(page);
3317 
3318 	smp_wmb(); /* make pte visible before pmd */
3319 	pmd_populate(mm, pmd, pgtable);
3320 }
3321 
3322 void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
3323 			   pmd_t *pmd, bool freeze)
3324 {
3325 	VM_WARN_ON_ONCE(!IS_ALIGNED(address, HPAGE_PMD_SIZE));
3326 	if (pmd_trans_huge(*pmd) || pmd_is_valid_softleaf(*pmd))
3327 		__split_huge_pmd_locked(vma, pmd, address, freeze);
3328 }
3329 
3330 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
3331 		unsigned long address, bool freeze)
3332 {
3333 	spinlock_t *ptl;
3334 	struct mmu_notifier_range range;
3335 
3336 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
3337 				address & HPAGE_PMD_MASK,
3338 				(address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
3339 	mmu_notifier_invalidate_range_start(&range);
3340 	ptl = pmd_lock(vma->vm_mm, pmd);
3341 	split_huge_pmd_locked(vma, range.start, pmd, freeze);
3342 	spin_unlock(ptl);
3343 	mmu_notifier_invalidate_range_end(&range);
3344 }
3345 
3346 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
3347 		bool freeze)
3348 {
3349 	pmd_t *pmd = mm_find_pmd(vma->vm_mm, address);
3350 
3351 	if (!pmd)
3352 		return;
3353 
3354 	__split_huge_pmd(vma, pmd, address, freeze);
3355 }
3356 
3357 static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address)
3358 {
3359 	/*
3360 	 * If the new address isn't hpage aligned and it could previously
3361 	 * contain an hugepage: check if we need to split an huge pmd.
3362 	 */
3363 	if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) &&
3364 	    range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE),
3365 			 ALIGN(address, HPAGE_PMD_SIZE)))
3366 		split_huge_pmd_address(vma, address, false);
3367 }
3368 
3369 void vma_adjust_trans_huge(struct vm_area_struct *vma,
3370 			   unsigned long start,
3371 			   unsigned long end,
3372 			   struct vm_area_struct *next)
3373 {
3374 	/* Check if we need to split start first. */
3375 	split_huge_pmd_if_needed(vma, start);
3376 
3377 	/* Check if we need to split end next. */
3378 	split_huge_pmd_if_needed(vma, end);
3379 
3380 	/* If we're incrementing next->vm_start, we might need to split it. */
3381 	if (next)
3382 		split_huge_pmd_if_needed(next, end);
3383 }
3384 
3385 static void unmap_folio(struct folio *folio)
3386 {
3387 	enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SYNC |
3388 		TTU_BATCH_FLUSH;
3389 
3390 	VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
3391 
3392 	if (folio_test_pmd_mappable(folio))
3393 		ttu_flags |= TTU_SPLIT_HUGE_PMD;
3394 
3395 	/*
3396 	 * Anon pages need migration entries to preserve them, but file
3397 	 * pages can simply be left unmapped, then faulted back on demand.
3398 	 * If that is ever changed (perhaps for mlock), update remap_page().
3399 	 */
3400 	if (folio_test_anon(folio))
3401 		try_to_migrate(folio, ttu_flags);
3402 	else
3403 		try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK);
3404 
3405 	try_to_unmap_flush();
3406 }
3407 
3408 static bool __discard_anon_folio_pmd_locked(struct vm_area_struct *vma,
3409 					    unsigned long addr, pmd_t *pmdp,
3410 					    struct folio *folio)
3411 {
3412 	struct mm_struct *mm = vma->vm_mm;
3413 	int ref_count, map_count;
3414 	pmd_t orig_pmd = *pmdp;
3415 
3416 	if (pmd_dirty(orig_pmd))
3417 		folio_set_dirty(folio);
3418 	if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) {
3419 		folio_set_swapbacked(folio);
3420 		return false;
3421 	}
3422 
3423 	orig_pmd = pmdp_huge_clear_flush(vma, addr, pmdp);
3424 
3425 	/*
3426 	 * Syncing against concurrent GUP-fast:
3427 	 * - clear PMD; barrier; read refcount
3428 	 * - inc refcount; barrier; read PMD
3429 	 */
3430 	smp_mb();
3431 
3432 	ref_count = folio_ref_count(folio);
3433 	map_count = folio_mapcount(folio);
3434 
3435 	/*
3436 	 * Order reads for folio refcount and dirty flag
3437 	 * (see comments in __remove_mapping()).
3438 	 */
3439 	smp_rmb();
3440 
3441 	/*
3442 	 * If the folio or its PMD is redirtied at this point, or if there
3443 	 * are unexpected references, we will give up to discard this folio
3444 	 * and remap it.
3445 	 *
3446 	 * The only folio refs must be one from isolation plus the rmap(s).
3447 	 */
3448 	if (pmd_dirty(orig_pmd))
3449 		folio_set_dirty(folio);
3450 	if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) {
3451 		folio_set_swapbacked(folio);
3452 		set_pmd_at(mm, addr, pmdp, orig_pmd);
3453 		return false;
3454 	}
3455 
3456 	if (ref_count != map_count + 1) {
3457 		set_pmd_at(mm, addr, pmdp, orig_pmd);
3458 		return false;
3459 	}
3460 
3461 	folio_remove_rmap_pmd(folio, pmd_page(orig_pmd), vma);
3462 	zap_deposited_table(mm, pmdp);
3463 	add_mm_counter(mm, MM_ANONPAGES, -HPAGE_PMD_NR);
3464 	if (vma->vm_flags & VM_LOCKED)
3465 		mlock_drain_local();
3466 	folio_put(folio);
3467 
3468 	return true;
3469 }
3470 
3471 bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
3472 			   pmd_t *pmdp, struct folio *folio)
3473 {
3474 	VM_WARN_ON_FOLIO(!folio_test_pmd_mappable(folio), folio);
3475 	VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
3476 	VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
3477 	VM_WARN_ON_FOLIO(folio_test_swapbacked(folio), folio);
3478 	VM_WARN_ON_ONCE(!IS_ALIGNED(addr, HPAGE_PMD_SIZE));
3479 
3480 	return __discard_anon_folio_pmd_locked(vma, addr, pmdp, folio);
3481 }
3482 
3483 static void remap_page(struct folio *folio, unsigned long nr, int flags)
3484 {
3485 	int i = 0;
3486 
3487 	/* If unmap_folio() uses try_to_migrate() on file, remove this check */
3488 	if (!folio_test_anon(folio))
3489 		return;
3490 	for (;;) {
3491 		remove_migration_ptes(folio, folio, TTU_RMAP_LOCKED | flags);
3492 		i += folio_nr_pages(folio);
3493 		if (i >= nr)
3494 			break;
3495 		folio = folio_next(folio);
3496 	}
3497 }
3498 
3499 static void lru_add_split_folio(struct folio *folio, struct folio *new_folio,
3500 		struct lruvec *lruvec, struct list_head *list)
3501 {
3502 	VM_BUG_ON_FOLIO(folio_test_lru(new_folio), folio);
3503 	lockdep_assert_held(&lruvec->lru_lock);
3504 
3505 	if (folio_is_device_private(folio))
3506 		return;
3507 
3508 	if (list) {
3509 		/* page reclaim is reclaiming a huge page */
3510 		VM_WARN_ON(folio_test_lru(folio));
3511 		folio_get(new_folio);
3512 		list_add_tail(&new_folio->lru, list);
3513 	} else {
3514 		/* head is still on lru (and we have it frozen) */
3515 		VM_WARN_ON(!folio_test_lru(folio));
3516 		if (folio_test_unevictable(folio))
3517 			new_folio->mlock_count = 0;
3518 		else
3519 			list_add_tail(&new_folio->lru, &folio->lru);
3520 		folio_set_lru(new_folio);
3521 	}
3522 }
3523 
3524 static bool page_range_has_hwpoisoned(struct page *page, long nr_pages)
3525 {
3526 	for (; nr_pages; page++, nr_pages--)
3527 		if (PageHWPoison(page))
3528 			return true;
3529 	return false;
3530 }
3531 
3532 /*
3533  * It splits @folio into @new_order folios and copies the @folio metadata to
3534  * all the resulting folios.
3535  */
3536 static void __split_folio_to_order(struct folio *folio, int old_order,
3537 		int new_order)
3538 {
3539 	/* Scan poisoned pages when split a poisoned folio to large folios */
3540 	const bool handle_hwpoison = folio_test_has_hwpoisoned(folio) && new_order;
3541 	long new_nr_pages = 1 << new_order;
3542 	long nr_pages = 1 << old_order;
3543 	long i;
3544 
3545 	folio_clear_has_hwpoisoned(folio);
3546 
3547 	/* Check first new_nr_pages since the loop below skips them */
3548 	if (handle_hwpoison &&
3549 	    page_range_has_hwpoisoned(folio_page(folio, 0), new_nr_pages))
3550 		folio_set_has_hwpoisoned(folio);
3551 	/*
3552 	 * Skip the first new_nr_pages, since the new folio from them have all
3553 	 * the flags from the original folio.
3554 	 */
3555 	for (i = new_nr_pages; i < nr_pages; i += new_nr_pages) {
3556 		struct page *new_head = &folio->page + i;
3557 		/*
3558 		 * Careful: new_folio is not a "real" folio before we cleared PageTail.
3559 		 * Don't pass it around before clear_compound_head().
3560 		 */
3561 		struct folio *new_folio = (struct folio *)new_head;
3562 
3563 		VM_BUG_ON_PAGE(atomic_read(&new_folio->_mapcount) != -1, new_head);
3564 
3565 		/*
3566 		 * Clone page flags before unfreezing refcount.
3567 		 *
3568 		 * After successful get_page_unless_zero() might follow flags change,
3569 		 * for example lock_page() which set PG_waiters.
3570 		 *
3571 		 * Note that for mapped sub-pages of an anonymous THP,
3572 		 * PG_anon_exclusive has been cleared in unmap_folio() and is stored in
3573 		 * the migration entry instead from where remap_page() will restore it.
3574 		 * We can still have PG_anon_exclusive set on effectively unmapped and
3575 		 * unreferenced sub-pages of an anonymous THP: we can simply drop
3576 		 * PG_anon_exclusive (-> PG_mappedtodisk) for these here.
3577 		 */
3578 		new_folio->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP;
3579 		new_folio->flags.f |= (folio->flags.f &
3580 				((1L << PG_referenced) |
3581 				 (1L << PG_swapbacked) |
3582 				 (1L << PG_swapcache) |
3583 				 (1L << PG_mlocked) |
3584 				 (1L << PG_uptodate) |
3585 				 (1L << PG_active) |
3586 				 (1L << PG_workingset) |
3587 				 (1L << PG_locked) |
3588 				 (1L << PG_unevictable) |
3589 #ifdef CONFIG_ARCH_USES_PG_ARCH_2
3590 				 (1L << PG_arch_2) |
3591 #endif
3592 #ifdef CONFIG_ARCH_USES_PG_ARCH_3
3593 				 (1L << PG_arch_3) |
3594 #endif
3595 				 (1L << PG_dirty) |
3596 				 LRU_GEN_MASK | LRU_REFS_MASK));
3597 
3598 		if (handle_hwpoison &&
3599 		    page_range_has_hwpoisoned(new_head, new_nr_pages))
3600 			folio_set_has_hwpoisoned(new_folio);
3601 
3602 		new_folio->mapping = folio->mapping;
3603 		new_folio->index = folio->index + i;
3604 
3605 		if (folio_test_swapcache(folio))
3606 			new_folio->swap.val = folio->swap.val + i;
3607 
3608 		/* Page flags must be visible before we make the page non-compound. */
3609 		smp_wmb();
3610 
3611 		/*
3612 		 * Clear PageTail before unfreezing page refcount.
3613 		 *
3614 		 * After successful get_page_unless_zero() might follow put_page()
3615 		 * which needs correct compound_head().
3616 		 */
3617 		clear_compound_head(new_head);
3618 		if (new_order) {
3619 			prep_compound_page(new_head, new_order);
3620 			folio_set_large_rmappable(new_folio);
3621 		}
3622 
3623 		if (folio_test_young(folio))
3624 			folio_set_young(new_folio);
3625 		if (folio_test_idle(folio))
3626 			folio_set_idle(new_folio);
3627 #ifdef CONFIG_MEMCG
3628 		new_folio->memcg_data = folio->memcg_data;
3629 #endif
3630 
3631 		folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio));
3632 	}
3633 
3634 	if (new_order)
3635 		folio_set_order(folio, new_order);
3636 	else
3637 		ClearPageCompound(&folio->page);
3638 }
3639 
3640 /**
3641  * __split_unmapped_folio() - splits an unmapped @folio to lower order folios in
3642  * two ways: uniform split or non-uniform split.
3643  * @folio: the to-be-split folio
3644  * @new_order: the smallest order of the after split folios (since buddy
3645  *             allocator like split generates folios with orders from @folio's
3646  *             order - 1 to new_order).
3647  * @split_at: in buddy allocator like split, the folio containing @split_at
3648  *            will be split until its order becomes @new_order.
3649  * @xas: xa_state pointing to folio->mapping->i_pages and locked by caller
3650  * @mapping: @folio->mapping
3651  * @split_type: if the split is uniform or not (buddy allocator like split)
3652  *
3653  *
3654  * 1. uniform split: the given @folio into multiple @new_order small folios,
3655  *    where all small folios have the same order. This is done when
3656  *    split_type is SPLIT_TYPE_UNIFORM.
3657  * 2. buddy allocator like (non-uniform) split: the given @folio is split into
3658  *    half and one of the half (containing the given page) is split into half
3659  *    until the given @folio's order becomes @new_order. This is done when
3660  *    split_type is SPLIT_TYPE_NON_UNIFORM.
3661  *
3662  * The high level flow for these two methods are:
3663  *
3664  * 1. uniform split: @xas is split with no expectation of failure and a single
3665  *    __split_folio_to_order() is called to split the @folio into @new_order
3666  *    along with stats update.
3667  * 2. non-uniform split: folio_order - @new_order calls to
3668  *    __split_folio_to_order() are expected to be made in a for loop to split
3669  *    the @folio to one lower order at a time. The folio containing @split_at
3670  *    is split in each iteration. @xas is split into half in each iteration and
3671  *    can fail. A failed @xas split leaves split folios as is without merging
3672  *    them back.
3673  *
3674  * After splitting, the caller's folio reference will be transferred to the
3675  * folio containing @split_at. The caller needs to unlock and/or free
3676  * after-split folios if necessary.
3677  *
3678  * Return: 0 - successful, <0 - failed (if -ENOMEM is returned, @folio might be
3679  * split but not to @new_order, the caller needs to check)
3680  */
3681 static int __split_unmapped_folio(struct folio *folio, int new_order,
3682 		struct page *split_at, struct xa_state *xas,
3683 		struct address_space *mapping, enum split_type split_type)
3684 {
3685 	const bool is_anon = folio_test_anon(folio);
3686 	int old_order = folio_order(folio);
3687 	int start_order = split_type == SPLIT_TYPE_UNIFORM ? new_order : old_order - 1;
3688 	struct folio *old_folio = folio;
3689 	int split_order;
3690 
3691 	/*
3692 	 * split to new_order one order at a time. For uniform split,
3693 	 * folio is split to new_order directly.
3694 	 */
3695 	for (split_order = start_order;
3696 	     split_order >= new_order;
3697 	     split_order--) {
3698 		int nr_new_folios = 1UL << (old_order - split_order);
3699 
3700 		/* order-1 anonymous folio is not supported */
3701 		if (is_anon && split_order == 1)
3702 			continue;
3703 
3704 		if (mapping) {
3705 			/*
3706 			 * uniform split has xas_split_alloc() called before
3707 			 * irq is disabled to allocate enough memory, whereas
3708 			 * non-uniform split can handle ENOMEM.
3709 			 * Use the to-be-split folio, so that a parallel
3710 			 * folio_try_get() waits on it until xarray is updated
3711 			 * with after-split folios and the original one is
3712 			 * unfrozen.
3713 			 */
3714 			if (split_type == SPLIT_TYPE_UNIFORM) {
3715 				xas_split(xas, old_folio, old_order);
3716 			} else {
3717 				xas_set_order(xas, folio->index, split_order);
3718 				xas_try_split(xas, old_folio, old_order);
3719 				if (xas_error(xas))
3720 					return xas_error(xas);
3721 			}
3722 		}
3723 
3724 		folio_split_memcg_refs(folio, old_order, split_order);
3725 		split_page_owner(&folio->page, old_order, split_order);
3726 		pgalloc_tag_split(folio, old_order, split_order);
3727 		__split_folio_to_order(folio, old_order, split_order);
3728 
3729 		if (is_anon) {
3730 			mod_mthp_stat(old_order, MTHP_STAT_NR_ANON, -1);
3731 			mod_mthp_stat(split_order, MTHP_STAT_NR_ANON, nr_new_folios);
3732 		}
3733 		/*
3734 		 * If uniform split, the process is complete.
3735 		 * If non-uniform, continue splitting the folio at @split_at
3736 		 * as long as the next @split_order is >= @new_order.
3737 		 */
3738 		folio = page_folio(split_at);
3739 		old_order = split_order;
3740 	}
3741 
3742 	return 0;
3743 }
3744 
3745 /**
3746  * folio_check_splittable() - check if a folio can be split to a given order
3747  * @folio: folio to be split
3748  * @new_order: the smallest order of the after split folios (since buddy
3749  *             allocator like split generates folios with orders from @folio's
3750  *             order - 1 to new_order).
3751  * @split_type: uniform or non-uniform split
3752  *
3753  * folio_check_splittable() checks if @folio can be split to @new_order using
3754  * @split_type method. The truncated folio check must come first.
3755  *
3756  * Context: folio must be locked.
3757  *
3758  * Return: 0 - @folio can be split to @new_order, otherwise an error number is
3759  * returned.
3760  */
3761 int folio_check_splittable(struct folio *folio, unsigned int new_order,
3762 			   enum split_type split_type)
3763 {
3764 	VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
3765 	/*
3766 	 * Folios that just got truncated cannot get split. Signal to the
3767 	 * caller that there was a race.
3768 	 *
3769 	 * TODO: this will also currently refuse folios without a mapping in the
3770 	 * swapcache (shmem or to-be-anon folios).
3771 	 */
3772 	if (!folio->mapping && !folio_test_anon(folio))
3773 		return -EBUSY;
3774 
3775 	if (folio_test_anon(folio)) {
3776 		/* order-1 is not supported for anonymous THP. */
3777 		if (new_order == 1)
3778 			return -EINVAL;
3779 	} else if (split_type == SPLIT_TYPE_NON_UNIFORM || new_order) {
3780 		if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
3781 		    !mapping_large_folio_support(folio->mapping)) {
3782 			/*
3783 			 * We can always split a folio down to a single page
3784 			 * (new_order == 0) uniformly.
3785 			 *
3786 			 * For any other scenario
3787 			 *   a) uniform split targeting a large folio
3788 			 *      (new_order > 0)
3789 			 *   b) any non-uniform split
3790 			 * we must confirm that the file system supports large
3791 			 * folios.
3792 			 *
3793 			 * Note that we might still have THPs in such
3794 			 * mappings, which is created from khugepaged when
3795 			 * CONFIG_READ_ONLY_THP_FOR_FS is enabled. But in that
3796 			 * case, the mapping does not actually support large
3797 			 * folios properly.
3798 			 */
3799 			return -EINVAL;
3800 		}
3801 	}
3802 
3803 	/*
3804 	 * swapcache folio could only be split to order 0
3805 	 *
3806 	 * non-uniform split creates after-split folios with orders from
3807 	 * folio_order(folio) - 1 to new_order, making it not suitable for any
3808 	 * swapcache folio split. Only uniform split to order-0 can be used
3809 	 * here.
3810 	 */
3811 	if ((split_type == SPLIT_TYPE_NON_UNIFORM || new_order) && folio_test_swapcache(folio)) {
3812 		return -EINVAL;
3813 	}
3814 
3815 	if (is_huge_zero_folio(folio))
3816 		return -EINVAL;
3817 
3818 	if (folio_test_writeback(folio))
3819 		return -EBUSY;
3820 
3821 	return 0;
3822 }
3823 
3824 /* Number of folio references from the pagecache or the swapcache. */
3825 static unsigned int folio_cache_ref_count(const struct folio *folio)
3826 {
3827 	if (folio_test_anon(folio) && !folio_test_swapcache(folio))
3828 		return 0;
3829 	return folio_nr_pages(folio);
3830 }
3831 
3832 static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int new_order,
3833 					     struct page *split_at, struct xa_state *xas,
3834 					     struct address_space *mapping, bool do_lru,
3835 					     struct list_head *list, enum split_type split_type,
3836 					     pgoff_t end, int *nr_shmem_dropped)
3837 {
3838 	struct folio *end_folio = folio_next(folio);
3839 	struct folio *new_folio, *next;
3840 	int old_order = folio_order(folio);
3841 	int ret = 0;
3842 	struct deferred_split *ds_queue;
3843 
3844 	VM_WARN_ON_ONCE(!mapping && end);
3845 	/* Prevent deferred_split_scan() touching ->_refcount */
3846 	ds_queue = folio_split_queue_lock(folio);
3847 	if (folio_ref_freeze(folio, folio_cache_ref_count(folio) + 1)) {
3848 		struct swap_cluster_info *ci = NULL;
3849 		struct lruvec *lruvec;
3850 
3851 		if (old_order > 1) {
3852 			if (!list_empty(&folio->_deferred_list)) {
3853 				ds_queue->split_queue_len--;
3854 				/*
3855 				 * Reinitialize page_deferred_list after removing the
3856 				 * page from the split_queue, otherwise a subsequent
3857 				 * split will see list corruption when checking the
3858 				 * page_deferred_list.
3859 				 */
3860 				list_del_init(&folio->_deferred_list);
3861 			}
3862 			if (folio_test_partially_mapped(folio)) {
3863 				folio_clear_partially_mapped(folio);
3864 				mod_mthp_stat(old_order,
3865 					MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
3866 			}
3867 		}
3868 		split_queue_unlock(ds_queue);
3869 		if (mapping) {
3870 			int nr = folio_nr_pages(folio);
3871 
3872 			if (folio_test_pmd_mappable(folio) &&
3873 			    new_order < HPAGE_PMD_ORDER) {
3874 				if (folio_test_swapbacked(folio)) {
3875 					lruvec_stat_mod_folio(folio,
3876 							NR_SHMEM_THPS, -nr);
3877 				} else {
3878 					lruvec_stat_mod_folio(folio,
3879 							NR_FILE_THPS, -nr);
3880 					filemap_nr_thps_dec(mapping);
3881 				}
3882 			}
3883 		}
3884 
3885 		if (folio_test_swapcache(folio)) {
3886 			if (mapping) {
3887 				VM_WARN_ON_ONCE_FOLIO(mapping, folio);
3888 				return -EINVAL;
3889 			}
3890 
3891 			ci = swap_cluster_get_and_lock(folio);
3892 		}
3893 
3894 		/* lock lru list/PageCompound, ref frozen by page_ref_freeze */
3895 		if (do_lru)
3896 			lruvec = folio_lruvec_lock(folio);
3897 
3898 		ret = __split_unmapped_folio(folio, new_order, split_at, xas,
3899 					     mapping, split_type);
3900 
3901 		/*
3902 		 * Unfreeze after-split folios and put them back to the right
3903 		 * list. @folio should be kept frozon until page cache
3904 		 * entries are updated with all the other after-split folios
3905 		 * to prevent others seeing stale page cache entries.
3906 		 * As a result, new_folio starts from the next folio of
3907 		 * @folio.
3908 		 */
3909 		for (new_folio = folio_next(folio); new_folio != end_folio;
3910 		     new_folio = next) {
3911 			unsigned long nr_pages = folio_nr_pages(new_folio);
3912 
3913 			next = folio_next(new_folio);
3914 
3915 			zone_device_private_split_cb(folio, new_folio);
3916 
3917 			folio_ref_unfreeze(new_folio,
3918 					   folio_cache_ref_count(new_folio) + 1);
3919 
3920 			if (do_lru)
3921 				lru_add_split_folio(folio, new_folio, lruvec, list);
3922 
3923 			/*
3924 			 * Anonymous folio with swap cache.
3925 			 * NOTE: shmem in swap cache is not supported yet.
3926 			 */
3927 			if (ci) {
3928 				__swap_cache_replace_folio(ci, folio, new_folio);
3929 				continue;
3930 			}
3931 
3932 			/* Anonymous folio without swap cache */
3933 			if (!mapping)
3934 				continue;
3935 
3936 			/* Add the new folio to the page cache. */
3937 			if (new_folio->index < end) {
3938 				__xa_store(&mapping->i_pages, new_folio->index,
3939 					   new_folio, 0);
3940 				continue;
3941 			}
3942 
3943 			VM_WARN_ON_ONCE(!nr_shmem_dropped);
3944 			/* Drop folio beyond EOF: ->index >= end */
3945 			if (shmem_mapping(mapping) && nr_shmem_dropped)
3946 				*nr_shmem_dropped += nr_pages;
3947 			else if (folio_test_clear_dirty(new_folio))
3948 				folio_account_cleaned(
3949 					new_folio, inode_to_wb(mapping->host));
3950 			__filemap_remove_folio(new_folio, NULL);
3951 			folio_put_refs(new_folio, nr_pages);
3952 		}
3953 
3954 		zone_device_private_split_cb(folio, NULL);
3955 		/*
3956 		 * Unfreeze @folio only after all page cache entries, which
3957 		 * used to point to it, have been updated with new folios.
3958 		 * Otherwise, a parallel folio_try_get() can grab @folio
3959 		 * and its caller can see stale page cache entries.
3960 		 */
3961 		folio_ref_unfreeze(folio, folio_cache_ref_count(folio) + 1);
3962 
3963 		if (do_lru)
3964 			unlock_page_lruvec(lruvec);
3965 
3966 		if (ci)
3967 			swap_cluster_unlock(ci);
3968 	} else {
3969 		split_queue_unlock(ds_queue);
3970 		return -EAGAIN;
3971 	}
3972 
3973 	return ret;
3974 }
3975 
3976 /**
3977  * __folio_split() - split a folio at @split_at to a @new_order folio
3978  * @folio: folio to split
3979  * @new_order: the order of the new folio
3980  * @split_at: a page within the new folio
3981  * @lock_at: a page within @folio to be left locked to caller
3982  * @list: after-split folios will be put on it if non NULL
3983  * @split_type: perform uniform split or not (non-uniform split)
3984  *
3985  * It calls __split_unmapped_folio() to perform uniform and non-uniform split.
3986  * It is in charge of checking whether the split is supported or not and
3987  * preparing @folio for __split_unmapped_folio().
3988  *
3989  * After splitting, the after-split folio containing @lock_at remains locked
3990  * and others are unlocked:
3991  * 1. for uniform split, @lock_at points to one of @folio's subpages;
3992  * 2. for buddy allocator like (non-uniform) split, @lock_at points to @folio.
3993  *
3994  * Return: 0 - successful, <0 - failed (if -ENOMEM is returned, @folio might be
3995  * split but not to @new_order, the caller needs to check)
3996  */
3997 static int __folio_split(struct folio *folio, unsigned int new_order,
3998 		struct page *split_at, struct page *lock_at,
3999 		struct list_head *list, enum split_type split_type)
4000 {
4001 	XA_STATE(xas, &folio->mapping->i_pages, folio->index);
4002 	struct folio *end_folio = folio_next(folio);
4003 	bool is_anon = folio_test_anon(folio);
4004 	struct address_space *mapping = NULL;
4005 	struct anon_vma *anon_vma = NULL;
4006 	int old_order = folio_order(folio);
4007 	struct folio *new_folio, *next;
4008 	int nr_shmem_dropped = 0;
4009 	enum ttu_flags ttu_flags = 0;
4010 	int ret;
4011 	pgoff_t end = 0;
4012 
4013 	VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio);
4014 	VM_WARN_ON_ONCE_FOLIO(!folio_test_large(folio), folio);
4015 
4016 	if (folio != page_folio(split_at) || folio != page_folio(lock_at)) {
4017 		ret = -EINVAL;
4018 		goto out;
4019 	}
4020 
4021 	if (new_order >= old_order) {
4022 		ret = -EINVAL;
4023 		goto out;
4024 	}
4025 
4026 	ret = folio_check_splittable(folio, new_order, split_type);
4027 	if (ret) {
4028 		VM_WARN_ONCE(ret == -EINVAL, "Tried to split an unsplittable folio");
4029 		goto out;
4030 	}
4031 
4032 	if (is_anon) {
4033 		/*
4034 		 * The caller does not necessarily hold an mmap_lock that would
4035 		 * prevent the anon_vma disappearing so we first we take a
4036 		 * reference to it and then lock the anon_vma for write. This
4037 		 * is similar to folio_lock_anon_vma_read except the write lock
4038 		 * is taken to serialise against parallel split or collapse
4039 		 * operations.
4040 		 */
4041 		anon_vma = folio_get_anon_vma(folio);
4042 		if (!anon_vma) {
4043 			ret = -EBUSY;
4044 			goto out;
4045 		}
4046 		anon_vma_lock_write(anon_vma);
4047 		mapping = NULL;
4048 	} else {
4049 		unsigned int min_order;
4050 		gfp_t gfp;
4051 
4052 		mapping = folio->mapping;
4053 		min_order = mapping_min_folio_order(folio->mapping);
4054 		if (new_order < min_order) {
4055 			ret = -EINVAL;
4056 			goto out;
4057 		}
4058 
4059 		gfp = current_gfp_context(mapping_gfp_mask(mapping) &
4060 							GFP_RECLAIM_MASK);
4061 
4062 		if (!filemap_release_folio(folio, gfp)) {
4063 			ret = -EBUSY;
4064 			goto out;
4065 		}
4066 
4067 		if (split_type == SPLIT_TYPE_UNIFORM) {
4068 			xas_set_order(&xas, folio->index, new_order);
4069 			xas_split_alloc(&xas, folio, old_order, gfp);
4070 			if (xas_error(&xas)) {
4071 				ret = xas_error(&xas);
4072 				goto out;
4073 			}
4074 		}
4075 
4076 		anon_vma = NULL;
4077 		i_mmap_lock_read(mapping);
4078 
4079 		/*
4080 		 *__split_unmapped_folio() may need to trim off pages beyond
4081 		 * EOF: but on 32-bit, i_size_read() takes an irq-unsafe
4082 		 * seqlock, which cannot be nested inside the page tree lock.
4083 		 * So note end now: i_size itself may be changed at any moment,
4084 		 * but folio lock is good enough to serialize the trimming.
4085 		 */
4086 		end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
4087 		if (shmem_mapping(mapping))
4088 			end = shmem_fallocend(mapping->host, end);
4089 	}
4090 
4091 	/*
4092 	 * Racy check if we can split the page, before unmap_folio() will
4093 	 * split PMDs
4094 	 */
4095 	if (folio_expected_ref_count(folio) != folio_ref_count(folio) - 1) {
4096 		ret = -EAGAIN;
4097 		goto out_unlock;
4098 	}
4099 
4100 	unmap_folio(folio);
4101 
4102 	/* block interrupt reentry in xa_lock and spinlock */
4103 	local_irq_disable();
4104 	if (mapping) {
4105 		/*
4106 		 * Check if the folio is present in page cache.
4107 		 * We assume all tail are present too, if folio is there.
4108 		 */
4109 		xas_lock(&xas);
4110 		xas_reset(&xas);
4111 		if (xas_load(&xas) != folio) {
4112 			ret = -EAGAIN;
4113 			goto fail;
4114 		}
4115 	}
4116 
4117 	ret = __folio_freeze_and_split_unmapped(folio, new_order, split_at, &xas, mapping,
4118 						true, list, split_type, end, &nr_shmem_dropped);
4119 fail:
4120 	if (mapping)
4121 		xas_unlock(&xas);
4122 
4123 	local_irq_enable();
4124 
4125 	if (nr_shmem_dropped)
4126 		shmem_uncharge(mapping->host, nr_shmem_dropped);
4127 
4128 	if (!ret && is_anon && !folio_is_device_private(folio))
4129 		ttu_flags = TTU_USE_SHARED_ZEROPAGE;
4130 
4131 	remap_page(folio, 1 << old_order, ttu_flags);
4132 
4133 	/*
4134 	 * Unlock all after-split folios except the one containing
4135 	 * @lock_at page. If @folio is not split, it will be kept locked.
4136 	 */
4137 	for (new_folio = folio; new_folio != end_folio; new_folio = next) {
4138 		next = folio_next(new_folio);
4139 		if (new_folio == page_folio(lock_at))
4140 			continue;
4141 
4142 		folio_unlock(new_folio);
4143 		/*
4144 		 * Subpages may be freed if there wasn't any mapping
4145 		 * like if add_to_swap() is running on a lru page that
4146 		 * had its mapping zapped. And freeing these pages
4147 		 * requires taking the lru_lock so we do the put_page
4148 		 * of the tail pages after the split is complete.
4149 		 */
4150 		free_folio_and_swap_cache(new_folio);
4151 	}
4152 
4153 out_unlock:
4154 	if (anon_vma) {
4155 		anon_vma_unlock_write(anon_vma);
4156 		put_anon_vma(anon_vma);
4157 	}
4158 	if (mapping)
4159 		i_mmap_unlock_read(mapping);
4160 out:
4161 	xas_destroy(&xas);
4162 	if (is_pmd_order(old_order))
4163 		count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
4164 	count_mthp_stat(old_order, !ret ? MTHP_STAT_SPLIT : MTHP_STAT_SPLIT_FAILED);
4165 	return ret;
4166 }
4167 
4168 /**
4169  * folio_split_unmapped() - split a large anon folio that is already unmapped
4170  * @folio: folio to split
4171  * @new_order: the order of folios after split
4172  *
4173  * This function is a helper for splitting folios that have already been
4174  * unmapped. The use case is that the device or the CPU can refuse to migrate
4175  * THP pages in the middle of migration, due to allocation issues on either
4176  * side.
4177  *
4178  * anon_vma_lock is not required to be held, mmap_read_lock() or
4179  * mmap_write_lock() should be held. @folio is expected to be locked by the
4180  * caller. device-private and non device-private folios are supported along
4181  * with folios that are in the swapcache. @folio should also be unmapped and
4182  * isolated from LRU (if applicable)
4183  *
4184  * Upon return, the folio is not remapped, split folios are not added to LRU,
4185  * free_folio_and_swap_cache() is not called, and new folios remain locked.
4186  *
4187  * Return: 0 on success, -EAGAIN if the folio cannot be split (e.g., due to
4188  *         insufficient reference count or extra pins).
4189  */
4190 int folio_split_unmapped(struct folio *folio, unsigned int new_order)
4191 {
4192 	int ret = 0;
4193 
4194 	VM_WARN_ON_ONCE_FOLIO(folio_mapped(folio), folio);
4195 	VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio);
4196 	VM_WARN_ON_ONCE_FOLIO(!folio_test_large(folio), folio);
4197 	VM_WARN_ON_ONCE_FOLIO(!folio_test_anon(folio), folio);
4198 
4199 	if (folio_expected_ref_count(folio) != folio_ref_count(folio) - 1)
4200 		return -EAGAIN;
4201 
4202 	local_irq_disable();
4203 	ret = __folio_freeze_and_split_unmapped(folio, new_order, &folio->page, NULL,
4204 						NULL, false, NULL, SPLIT_TYPE_UNIFORM,
4205 						0, NULL);
4206 	local_irq_enable();
4207 	return ret;
4208 }
4209 
4210 /*
4211  * This function splits a large folio into smaller folios of order @new_order.
4212  * @page can point to any page of the large folio to split. The split operation
4213  * does not change the position of @page.
4214  *
4215  * Prerequisites:
4216  *
4217  * 1) The caller must hold a reference on the @page's owning folio, also known
4218  *    as the large folio.
4219  *
4220  * 2) The large folio must be locked.
4221  *
4222  * 3) The folio must not be pinned. Any unexpected folio references, including
4223  *    GUP pins, will result in the folio not getting split; instead, the caller
4224  *    will receive an -EAGAIN.
4225  *
4226  * 4) @new_order > 1, usually. Splitting to order-1 anonymous folios is not
4227  *    supported for non-file-backed folios, because folio->_deferred_list, which
4228  *    is used by partially mapped folios, is stored in subpage 2, but an order-1
4229  *    folio only has subpages 0 and 1. File-backed order-1 folios are supported,
4230  *    since they do not use _deferred_list.
4231  *
4232  * After splitting, the caller's folio reference will be transferred to @page,
4233  * resulting in a raised refcount of @page after this call. The other pages may
4234  * be freed if they are not mapped.
4235  *
4236  * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
4237  *
4238  * Pages in @new_order will inherit the mapping, flags, and so on from the
4239  * huge page.
4240  *
4241  * Returns 0 if the huge page was split successfully.
4242  *
4243  * Returns -EAGAIN if the folio has unexpected reference (e.g., GUP) or if
4244  * the folio was concurrently removed from the page cache.
4245  *
4246  * Returns -EBUSY when trying to split the huge zeropage, if the folio is
4247  * under writeback, if fs-specific folio metadata cannot currently be
4248  * released, or if some unexpected race happened (e.g., anon VMA disappeared,
4249  * truncation).
4250  *
4251  * Callers should ensure that the order respects the address space mapping
4252  * min-order if one is set for non-anonymous folios.
4253  *
4254  * Returns -EINVAL when trying to split to an order that is incompatible
4255  * with the folio. Splitting to order 0 is compatible with all folios.
4256  */
4257 int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
4258 				     unsigned int new_order)
4259 {
4260 	struct folio *folio = page_folio(page);
4261 
4262 	return __folio_split(folio, new_order, &folio->page, page, list,
4263 			     SPLIT_TYPE_UNIFORM);
4264 }
4265 
4266 /**
4267  * folio_split() - split a folio at @split_at to a @new_order folio
4268  * @folio: folio to split
4269  * @new_order: the order of the new folio
4270  * @split_at: a page within the new folio
4271  * @list: after-split folios are added to @list if not null, otherwise to LRU
4272  *        list
4273  *
4274  * It has the same prerequisites and returns as
4275  * split_huge_page_to_list_to_order().
4276  *
4277  * Split a folio at @split_at to a new_order folio, leave the
4278  * remaining subpages of the original folio as large as possible. For example,
4279  * in the case of splitting an order-9 folio at its third order-3 subpages to
4280  * an order-3 folio, there are 2^(9-3)=64 order-3 subpages in the order-9 folio.
4281  * After the split, there will be a group of folios with different orders and
4282  * the new folio containing @split_at is marked in bracket:
4283  * [order-4, {order-3}, order-3, order-5, order-6, order-7, order-8].
4284  *
4285  * After split, folio is left locked for caller.
4286  *
4287  * Return: 0 - successful, <0 - failed (if -ENOMEM is returned, @folio might be
4288  * split but not to @new_order, the caller needs to check)
4289  */
4290 int folio_split(struct folio *folio, unsigned int new_order,
4291 		struct page *split_at, struct list_head *list)
4292 {
4293 	return __folio_split(folio, new_order, split_at, &folio->page, list,
4294 			     SPLIT_TYPE_NON_UNIFORM);
4295 }
4296 
4297 /**
4298  * min_order_for_split() - get the minimum order @folio can be split to
4299  * @folio: folio to split
4300  *
4301  * min_order_for_split() tells the minimum order @folio can be split to.
4302  * If a file-backed folio is truncated, 0 will be returned. Any subsequent
4303  * split attempt should get -EBUSY from split checking code.
4304  *
4305  * Return: @folio's minimum order for split
4306  */
4307 unsigned int min_order_for_split(struct folio *folio)
4308 {
4309 	if (folio_test_anon(folio))
4310 		return 0;
4311 
4312 	/*
4313 	 * If the folio got truncated, we don't know the previous mapping and
4314 	 * consequently the old min order. But it doesn't matter, as any split
4315 	 * attempt will immediately fail with -EBUSY as the folio cannot get
4316 	 * split until freed.
4317 	 */
4318 	if (!folio->mapping)
4319 		return 0;
4320 
4321 	return mapping_min_folio_order(folio->mapping);
4322 }
4323 
4324 int split_folio_to_list(struct folio *folio, struct list_head *list)
4325 {
4326 	return split_huge_page_to_list_to_order(&folio->page, list, 0);
4327 }
4328 
4329 /*
4330  * __folio_unqueue_deferred_split() is not to be called directly:
4331  * the folio_unqueue_deferred_split() inline wrapper in mm/internal.h
4332  * limits its calls to those folios which may have a _deferred_list for
4333  * queueing THP splits, and that list is (racily observed to be) non-empty.
4334  *
4335  * It is unsafe to call folio_unqueue_deferred_split() until folio refcount is
4336  * zero: because even when split_queue_lock is held, a non-empty _deferred_list
4337  * might be in use on deferred_split_scan()'s unlocked on-stack list.
4338  *
4339  * If memory cgroups are enabled, split_queue_lock is in the mem_cgroup: it is
4340  * therefore important to unqueue deferred split before changing folio memcg.
4341  */
4342 bool __folio_unqueue_deferred_split(struct folio *folio)
4343 {
4344 	struct deferred_split *ds_queue;
4345 	unsigned long flags;
4346 	bool unqueued = false;
4347 
4348 	WARN_ON_ONCE(folio_ref_count(folio));
4349 	WARN_ON_ONCE(!mem_cgroup_disabled() && !folio_memcg_charged(folio));
4350 
4351 	ds_queue = folio_split_queue_lock_irqsave(folio, &flags);
4352 	if (!list_empty(&folio->_deferred_list)) {
4353 		ds_queue->split_queue_len--;
4354 		if (folio_test_partially_mapped(folio)) {
4355 			folio_clear_partially_mapped(folio);
4356 			mod_mthp_stat(folio_order(folio),
4357 				      MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
4358 		}
4359 		list_del_init(&folio->_deferred_list);
4360 		unqueued = true;
4361 	}
4362 	split_queue_unlock_irqrestore(ds_queue, flags);
4363 
4364 	return unqueued;	/* useful for debug warnings */
4365 }
4366 
4367 /* partially_mapped=false won't clear PG_partially_mapped folio flag */
4368 void deferred_split_folio(struct folio *folio, bool partially_mapped)
4369 {
4370 	struct deferred_split *ds_queue;
4371 	unsigned long flags;
4372 
4373 	/*
4374 	 * Order 1 folios have no space for a deferred list, but we also
4375 	 * won't waste much memory by not adding them to the deferred list.
4376 	 */
4377 	if (folio_order(folio) <= 1)
4378 		return;
4379 
4380 	if (!partially_mapped && !split_underused_thp)
4381 		return;
4382 
4383 	/*
4384 	 * Exclude swapcache: originally to avoid a corrupt deferred split
4385 	 * queue. Nowadays that is fully prevented by memcg1_swapout();
4386 	 * but if page reclaim is already handling the same folio, it is
4387 	 * unnecessary to handle it again in the shrinker, so excluding
4388 	 * swapcache here may still be a useful optimization.
4389 	 */
4390 	if (folio_test_swapcache(folio))
4391 		return;
4392 
4393 	ds_queue = folio_split_queue_lock_irqsave(folio, &flags);
4394 	if (partially_mapped) {
4395 		if (!folio_test_partially_mapped(folio)) {
4396 			folio_set_partially_mapped(folio);
4397 			if (folio_test_pmd_mappable(folio))
4398 				count_vm_event(THP_DEFERRED_SPLIT_PAGE);
4399 			count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED);
4400 			mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, 1);
4401 
4402 		}
4403 	} else {
4404 		/* partially mapped folios cannot become non-partially mapped */
4405 		VM_WARN_ON_FOLIO(folio_test_partially_mapped(folio), folio);
4406 	}
4407 	if (list_empty(&folio->_deferred_list)) {
4408 		struct mem_cgroup *memcg;
4409 
4410 		memcg = folio_split_queue_memcg(folio, ds_queue);
4411 		list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
4412 		ds_queue->split_queue_len++;
4413 		if (memcg)
4414 			set_shrinker_bit(memcg, folio_nid(folio),
4415 					 shrinker_id(deferred_split_shrinker));
4416 	}
4417 	split_queue_unlock_irqrestore(ds_queue, flags);
4418 }
4419 
4420 static unsigned long deferred_split_count(struct shrinker *shrink,
4421 		struct shrink_control *sc)
4422 {
4423 	struct pglist_data *pgdata = NODE_DATA(sc->nid);
4424 	struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
4425 
4426 #ifdef CONFIG_MEMCG
4427 	if (sc->memcg)
4428 		ds_queue = &sc->memcg->deferred_split_queue;
4429 #endif
4430 	return READ_ONCE(ds_queue->split_queue_len);
4431 }
4432 
4433 static bool thp_underused(struct folio *folio)
4434 {
4435 	int num_zero_pages = 0, num_filled_pages = 0;
4436 	int i;
4437 
4438 	if (khugepaged_max_ptes_none == HPAGE_PMD_NR - 1)
4439 		return false;
4440 
4441 	if (folio_contain_hwpoisoned_page(folio))
4442 		return false;
4443 
4444 	for (i = 0; i < folio_nr_pages(folio); i++) {
4445 		if (pages_identical(folio_page(folio, i), ZERO_PAGE(0))) {
4446 			if (++num_zero_pages > khugepaged_max_ptes_none)
4447 				return true;
4448 		} else {
4449 			/*
4450 			 * Another path for early exit once the number
4451 			 * of non-zero filled pages exceeds threshold.
4452 			 */
4453 			if (++num_filled_pages >= HPAGE_PMD_NR - khugepaged_max_ptes_none)
4454 				return false;
4455 		}
4456 	}
4457 	return false;
4458 }
4459 
4460 static unsigned long deferred_split_scan(struct shrinker *shrink,
4461 		struct shrink_control *sc)
4462 {
4463 	struct deferred_split *ds_queue;
4464 	unsigned long flags;
4465 	struct folio *folio, *next;
4466 	int split = 0, i;
4467 	struct folio_batch fbatch;
4468 
4469 	folio_batch_init(&fbatch);
4470 
4471 retry:
4472 	ds_queue = split_queue_lock_irqsave(sc->nid, sc->memcg, &flags);
4473 	/* Take pin on all head pages to avoid freeing them under us */
4474 	list_for_each_entry_safe(folio, next, &ds_queue->split_queue,
4475 							_deferred_list) {
4476 		if (folio_try_get(folio)) {
4477 			folio_batch_add(&fbatch, folio);
4478 		} else if (folio_test_partially_mapped(folio)) {
4479 			/* We lost race with folio_put() */
4480 			folio_clear_partially_mapped(folio);
4481 			mod_mthp_stat(folio_order(folio),
4482 				      MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
4483 		}
4484 		list_del_init(&folio->_deferred_list);
4485 		ds_queue->split_queue_len--;
4486 		if (!--sc->nr_to_scan)
4487 			break;
4488 		if (!folio_batch_space(&fbatch))
4489 			break;
4490 	}
4491 	split_queue_unlock_irqrestore(ds_queue, flags);
4492 
4493 	for (i = 0; i < folio_batch_count(&fbatch); i++) {
4494 		bool did_split = false;
4495 		bool underused = false;
4496 		struct deferred_split *fqueue;
4497 
4498 		folio = fbatch.folios[i];
4499 		if (!folio_test_partially_mapped(folio)) {
4500 			/*
4501 			 * See try_to_map_unused_to_zeropage(): we cannot
4502 			 * optimize zero-filled pages after splitting an
4503 			 * mlocked folio.
4504 			 */
4505 			if (folio_test_mlocked(folio))
4506 				goto next;
4507 			underused = thp_underused(folio);
4508 			if (!underused)
4509 				goto next;
4510 		}
4511 		if (!folio_trylock(folio))
4512 			goto next;
4513 		if (!split_folio(folio)) {
4514 			did_split = true;
4515 			if (underused)
4516 				count_vm_event(THP_UNDERUSED_SPLIT_PAGE);
4517 			split++;
4518 		}
4519 		folio_unlock(folio);
4520 next:
4521 		if (did_split || !folio_test_partially_mapped(folio))
4522 			continue;
4523 		/*
4524 		 * Only add back to the queue if folio is partially mapped.
4525 		 * If thp_underused returns false, or if split_folio fails
4526 		 * in the case it was underused, then consider it used and
4527 		 * don't add it back to split_queue.
4528 		 */
4529 		fqueue = folio_split_queue_lock_irqsave(folio, &flags);
4530 		if (list_empty(&folio->_deferred_list)) {
4531 			list_add_tail(&folio->_deferred_list, &fqueue->split_queue);
4532 			fqueue->split_queue_len++;
4533 		}
4534 		split_queue_unlock_irqrestore(fqueue, flags);
4535 	}
4536 	folios_put(&fbatch);
4537 
4538 	if (sc->nr_to_scan && !list_empty(&ds_queue->split_queue)) {
4539 		cond_resched();
4540 		goto retry;
4541 	}
4542 
4543 	/*
4544 	 * Stop shrinker if we didn't split any page, but the queue is empty.
4545 	 * This can happen if pages were freed under us.
4546 	 */
4547 	if (!split && list_empty(&ds_queue->split_queue))
4548 		return SHRINK_STOP;
4549 	return split;
4550 }
4551 
4552 #ifdef CONFIG_MEMCG
4553 void reparent_deferred_split_queue(struct mem_cgroup *memcg)
4554 {
4555 	struct mem_cgroup *parent = parent_mem_cgroup(memcg);
4556 	struct deferred_split *ds_queue = &memcg->deferred_split_queue;
4557 	struct deferred_split *parent_ds_queue = &parent->deferred_split_queue;
4558 	int nid;
4559 
4560 	spin_lock_irq(&ds_queue->split_queue_lock);
4561 	spin_lock_nested(&parent_ds_queue->split_queue_lock, SINGLE_DEPTH_NESTING);
4562 
4563 	if (!ds_queue->split_queue_len)
4564 		goto unlock;
4565 
4566 	list_splice_tail_init(&ds_queue->split_queue, &parent_ds_queue->split_queue);
4567 	parent_ds_queue->split_queue_len += ds_queue->split_queue_len;
4568 	ds_queue->split_queue_len = 0;
4569 
4570 	for_each_node(nid)
4571 		set_shrinker_bit(parent, nid, shrinker_id(deferred_split_shrinker));
4572 
4573 unlock:
4574 	spin_unlock(&parent_ds_queue->split_queue_lock);
4575 	spin_unlock_irq(&ds_queue->split_queue_lock);
4576 }
4577 #endif
4578 
4579 #ifdef CONFIG_DEBUG_FS
4580 static void split_huge_pages_all(void)
4581 {
4582 	struct zone *zone;
4583 	struct page *page;
4584 	struct folio *folio;
4585 	unsigned long pfn, max_zone_pfn;
4586 	unsigned long total = 0, split = 0;
4587 
4588 	pr_debug("Split all THPs\n");
4589 	for_each_zone(zone) {
4590 		if (!managed_zone(zone))
4591 			continue;
4592 		max_zone_pfn = zone_end_pfn(zone);
4593 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
4594 			int nr_pages;
4595 
4596 			page = pfn_to_online_page(pfn);
4597 			if (!page || PageTail(page))
4598 				continue;
4599 			folio = page_folio(page);
4600 			if (!folio_try_get(folio))
4601 				continue;
4602 
4603 			if (unlikely(page_folio(page) != folio))
4604 				goto next;
4605 
4606 			if (zone != folio_zone(folio))
4607 				goto next;
4608 
4609 			if (!folio_test_large(folio)
4610 				|| folio_test_hugetlb(folio)
4611 				|| !folio_test_lru(folio))
4612 				goto next;
4613 
4614 			total++;
4615 			folio_lock(folio);
4616 			nr_pages = folio_nr_pages(folio);
4617 			if (!split_folio(folio))
4618 				split++;
4619 			pfn += nr_pages - 1;
4620 			folio_unlock(folio);
4621 next:
4622 			folio_put(folio);
4623 			cond_resched();
4624 		}
4625 	}
4626 
4627 	pr_debug("%lu of %lu THP split\n", split, total);
4628 }
4629 
4630 static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma)
4631 {
4632 	return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) ||
4633 		    is_vm_hugetlb_page(vma);
4634 }
4635 
4636 static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
4637 				unsigned long vaddr_end, unsigned int new_order,
4638 				long in_folio_offset)
4639 {
4640 	int ret = 0;
4641 	struct task_struct *task;
4642 	struct mm_struct *mm;
4643 	unsigned long total = 0, split = 0;
4644 	unsigned long addr;
4645 
4646 	vaddr_start &= PAGE_MASK;
4647 	vaddr_end &= PAGE_MASK;
4648 
4649 	task = find_get_task_by_vpid(pid);
4650 	if (!task) {
4651 		ret = -ESRCH;
4652 		goto out;
4653 	}
4654 
4655 	/* Find the mm_struct */
4656 	mm = get_task_mm(task);
4657 	put_task_struct(task);
4658 
4659 	if (!mm) {
4660 		ret = -EINVAL;
4661 		goto out;
4662 	}
4663 
4664 	pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx], new_order: %u, in_folio_offset: %ld\n",
4665 		 pid, vaddr_start, vaddr_end, new_order, in_folio_offset);
4666 
4667 	mmap_read_lock(mm);
4668 	/*
4669 	 * always increase addr by PAGE_SIZE, since we could have a PTE page
4670 	 * table filled with PTE-mapped THPs, each of which is distinct.
4671 	 */
4672 	for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) {
4673 		struct vm_area_struct *vma = vma_lookup(mm, addr);
4674 		struct folio_walk fw;
4675 		struct folio *folio;
4676 		struct address_space *mapping;
4677 		unsigned int target_order = new_order;
4678 
4679 		if (!vma)
4680 			break;
4681 
4682 		/* skip special VMA and hugetlb VMA */
4683 		if (vma_not_suitable_for_thp_split(vma)) {
4684 			addr = vma->vm_end;
4685 			continue;
4686 		}
4687 
4688 		folio = folio_walk_start(&fw, vma, addr, 0);
4689 		if (!folio)
4690 			continue;
4691 
4692 		if (!is_transparent_hugepage(folio))
4693 			goto next;
4694 
4695 		if (!folio_test_anon(folio)) {
4696 			mapping = folio->mapping;
4697 			target_order = max(new_order,
4698 					   mapping_min_folio_order(mapping));
4699 		}
4700 
4701 		if (target_order >= folio_order(folio))
4702 			goto next;
4703 
4704 		total++;
4705 		/*
4706 		 * For folios with private, split_huge_page_to_list_to_order()
4707 		 * will try to drop it before split and then check if the folio
4708 		 * can be split or not. So skip the check here.
4709 		 */
4710 		if (!folio_test_private(folio) &&
4711 		    folio_expected_ref_count(folio) != folio_ref_count(folio))
4712 			goto next;
4713 
4714 		if (!folio_trylock(folio))
4715 			goto next;
4716 		folio_get(folio);
4717 		folio_walk_end(&fw, vma);
4718 
4719 		if (!folio_test_anon(folio) && folio->mapping != mapping)
4720 			goto unlock;
4721 
4722 		if (in_folio_offset < 0 ||
4723 		    in_folio_offset >= folio_nr_pages(folio)) {
4724 			if (!split_folio_to_order(folio, target_order))
4725 				split++;
4726 		} else {
4727 			struct page *split_at = folio_page(folio,
4728 							   in_folio_offset);
4729 			if (!folio_split(folio, target_order, split_at, NULL))
4730 				split++;
4731 		}
4732 
4733 unlock:
4734 
4735 		folio_unlock(folio);
4736 		folio_put(folio);
4737 
4738 		cond_resched();
4739 		continue;
4740 next:
4741 		folio_walk_end(&fw, vma);
4742 		cond_resched();
4743 	}
4744 	mmap_read_unlock(mm);
4745 	mmput(mm);
4746 
4747 	pr_debug("%lu of %lu THP split\n", split, total);
4748 
4749 out:
4750 	return ret;
4751 }
4752 
4753 static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
4754 				pgoff_t off_end, unsigned int new_order,
4755 				long in_folio_offset)
4756 {
4757 	struct file *candidate;
4758 	struct address_space *mapping;
4759 	pgoff_t index;
4760 	int nr_pages = 1;
4761 	unsigned long total = 0, split = 0;
4762 	unsigned int min_order;
4763 	unsigned int target_order;
4764 
4765 	CLASS(filename_kernel, file)(file_path);
4766 	candidate = file_open_name(file, O_RDONLY, 0);
4767 	if (IS_ERR(candidate))
4768 		return -EINVAL;
4769 
4770 	pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx], new_order: %u, in_folio_offset: %ld\n",
4771 		 file_path, off_start, off_end, new_order, in_folio_offset);
4772 
4773 	mapping = candidate->f_mapping;
4774 	min_order = mapping_min_folio_order(mapping);
4775 	target_order = max(new_order, min_order);
4776 
4777 	for (index = off_start; index < off_end; index += nr_pages) {
4778 		struct folio *folio = filemap_get_folio(mapping, index);
4779 
4780 		nr_pages = 1;
4781 		if (IS_ERR(folio))
4782 			continue;
4783 
4784 		if (!folio_test_large(folio))
4785 			goto next;
4786 
4787 		total++;
4788 		nr_pages = folio_nr_pages(folio);
4789 
4790 		if (target_order >= folio_order(folio))
4791 			goto next;
4792 
4793 		if (!folio_trylock(folio))
4794 			goto next;
4795 
4796 		if (folio->mapping != mapping)
4797 			goto unlock;
4798 
4799 		if (in_folio_offset < 0 || in_folio_offset >= nr_pages) {
4800 			if (!split_folio_to_order(folio, target_order))
4801 				split++;
4802 		} else {
4803 			struct page *split_at = folio_page(folio,
4804 							   in_folio_offset);
4805 			if (!folio_split(folio, target_order, split_at, NULL))
4806 				split++;
4807 		}
4808 
4809 unlock:
4810 		folio_unlock(folio);
4811 next:
4812 		folio_put(folio);
4813 		cond_resched();
4814 	}
4815 
4816 	filp_close(candidate, NULL);
4817 	pr_debug("%lu of %lu file-backed THP split\n", split, total);
4818 	return 0;
4819 }
4820 
4821 #define MAX_INPUT_BUF_SZ 255
4822 
4823 static ssize_t split_huge_pages_write(struct file *file, const char __user *buf,
4824 				size_t count, loff_t *ppops)
4825 {
4826 	static DEFINE_MUTEX(split_debug_mutex);
4827 	ssize_t ret;
4828 	/*
4829 	 * hold pid, start_vaddr, end_vaddr, new_order or
4830 	 * file_path, off_start, off_end, new_order
4831 	 */
4832 	char input_buf[MAX_INPUT_BUF_SZ];
4833 	int pid;
4834 	unsigned long vaddr_start, vaddr_end;
4835 	unsigned int new_order = 0;
4836 	long in_folio_offset = -1;
4837 
4838 	ret = mutex_lock_interruptible(&split_debug_mutex);
4839 	if (ret)
4840 		return ret;
4841 
4842 	ret = -EFAULT;
4843 
4844 	memset(input_buf, 0, MAX_INPUT_BUF_SZ);
4845 	if (copy_from_user(input_buf, buf, min_t(size_t, count, MAX_INPUT_BUF_SZ)))
4846 		goto out;
4847 
4848 	input_buf[MAX_INPUT_BUF_SZ - 1] = '\0';
4849 
4850 	if (input_buf[0] == '/') {
4851 		char *tok;
4852 		char *tok_buf = input_buf;
4853 		char file_path[MAX_INPUT_BUF_SZ];
4854 		pgoff_t off_start = 0, off_end = 0;
4855 		size_t input_len = strlen(input_buf);
4856 
4857 		tok = strsep(&tok_buf, ",");
4858 		if (tok && tok_buf) {
4859 			strscpy(file_path, tok);
4860 		} else {
4861 			ret = -EINVAL;
4862 			goto out;
4863 		}
4864 
4865 		ret = sscanf(tok_buf, "0x%lx,0x%lx,%d,%ld", &off_start, &off_end,
4866 				&new_order, &in_folio_offset);
4867 		if (ret != 2 && ret != 3 && ret != 4) {
4868 			ret = -EINVAL;
4869 			goto out;
4870 		}
4871 		ret = split_huge_pages_in_file(file_path, off_start, off_end,
4872 				new_order, in_folio_offset);
4873 		if (!ret)
4874 			ret = input_len;
4875 
4876 		goto out;
4877 	}
4878 
4879 	ret = sscanf(input_buf, "%d,0x%lx,0x%lx,%d,%ld", &pid, &vaddr_start,
4880 			&vaddr_end, &new_order, &in_folio_offset);
4881 	if (ret == 1 && pid == 1) {
4882 		split_huge_pages_all();
4883 		ret = strlen(input_buf);
4884 		goto out;
4885 	} else if (ret != 3 && ret != 4 && ret != 5) {
4886 		ret = -EINVAL;
4887 		goto out;
4888 	}
4889 
4890 	ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end, new_order,
4891 			in_folio_offset);
4892 	if (!ret)
4893 		ret = strlen(input_buf);
4894 out:
4895 	mutex_unlock(&split_debug_mutex);
4896 	return ret;
4897 
4898 }
4899 
4900 static const struct file_operations split_huge_pages_fops = {
4901 	.owner	 = THIS_MODULE,
4902 	.write	 = split_huge_pages_write,
4903 };
4904 
4905 static int __init split_huge_pages_debugfs(void)
4906 {
4907 	debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
4908 			    &split_huge_pages_fops);
4909 	return 0;
4910 }
4911 late_initcall(split_huge_pages_debugfs);
4912 #endif
4913 
4914 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
4915 int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
4916 		struct page *page)
4917 {
4918 	struct folio *folio = page_folio(page);
4919 	struct vm_area_struct *vma = pvmw->vma;
4920 	struct mm_struct *mm = vma->vm_mm;
4921 	unsigned long address = pvmw->address;
4922 	bool anon_exclusive;
4923 	pmd_t pmdval;
4924 	swp_entry_t entry;
4925 	pmd_t pmdswp;
4926 
4927 	if (!(pvmw->pmd && !pvmw->pte))
4928 		return 0;
4929 
4930 	flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
4931 	if (unlikely(!pmd_present(*pvmw->pmd)))
4932 		pmdval = pmdp_huge_get_and_clear(vma->vm_mm, address, pvmw->pmd);
4933 	else
4934 		pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
4935 
4936 	/* See folio_try_share_anon_rmap_pmd(): invalidate PMD first. */
4937 	anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page);
4938 	if (anon_exclusive && folio_try_share_anon_rmap_pmd(folio, page)) {
4939 		set_pmd_at(mm, address, pvmw->pmd, pmdval);
4940 		return -EBUSY;
4941 	}
4942 
4943 	if (pmd_dirty(pmdval))
4944 		folio_mark_dirty(folio);
4945 	if (pmd_write(pmdval))
4946 		entry = make_writable_migration_entry(page_to_pfn(page));
4947 	else if (anon_exclusive)
4948 		entry = make_readable_exclusive_migration_entry(page_to_pfn(page));
4949 	else
4950 		entry = make_readable_migration_entry(page_to_pfn(page));
4951 	if (pmd_young(pmdval))
4952 		entry = make_migration_entry_young(entry);
4953 	if (pmd_dirty(pmdval))
4954 		entry = make_migration_entry_dirty(entry);
4955 	pmdswp = swp_entry_to_pmd(entry);
4956 	if (pmd_soft_dirty(pmdval))
4957 		pmdswp = pmd_swp_mksoft_dirty(pmdswp);
4958 	if (pmd_uffd_wp(pmdval))
4959 		pmdswp = pmd_swp_mkuffd_wp(pmdswp);
4960 	set_pmd_at(mm, address, pvmw->pmd, pmdswp);
4961 	folio_remove_rmap_pmd(folio, page, vma);
4962 	folio_put(folio);
4963 	trace_set_migration_pmd(address, pmd_val(pmdswp));
4964 
4965 	return 0;
4966 }
4967 
4968 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
4969 {
4970 	struct folio *folio = page_folio(new);
4971 	struct vm_area_struct *vma = pvmw->vma;
4972 	struct mm_struct *mm = vma->vm_mm;
4973 	unsigned long address = pvmw->address;
4974 	unsigned long haddr = address & HPAGE_PMD_MASK;
4975 	pmd_t pmde;
4976 	softleaf_t entry;
4977 
4978 	if (!(pvmw->pmd && !pvmw->pte))
4979 		return;
4980 
4981 	entry = softleaf_from_pmd(*pvmw->pmd);
4982 	folio_get(folio);
4983 	pmde = folio_mk_pmd(folio, READ_ONCE(vma->vm_page_prot));
4984 
4985 	if (pmd_swp_soft_dirty(*pvmw->pmd))
4986 		pmde = pmd_mksoft_dirty(pmde);
4987 	if (softleaf_is_migration_write(entry))
4988 		pmde = pmd_mkwrite(pmde, vma);
4989 	if (pmd_swp_uffd_wp(*pvmw->pmd))
4990 		pmde = pmd_mkuffd_wp(pmde);
4991 	if (!softleaf_is_migration_young(entry))
4992 		pmde = pmd_mkold(pmde);
4993 	/* NOTE: this may contain setting soft-dirty on some archs */
4994 	if (folio_test_dirty(folio) && softleaf_is_migration_dirty(entry))
4995 		pmde = pmd_mkdirty(pmde);
4996 
4997 	if (folio_is_device_private(folio)) {
4998 		swp_entry_t entry;
4999 
5000 		if (pmd_write(pmde))
5001 			entry = make_writable_device_private_entry(
5002 							page_to_pfn(new));
5003 		else
5004 			entry = make_readable_device_private_entry(
5005 							page_to_pfn(new));
5006 		pmde = swp_entry_to_pmd(entry);
5007 
5008 		if (pmd_swp_soft_dirty(*pvmw->pmd))
5009 			pmde = pmd_swp_mksoft_dirty(pmde);
5010 		if (pmd_swp_uffd_wp(*pvmw->pmd))
5011 			pmde = pmd_swp_mkuffd_wp(pmde);
5012 	}
5013 
5014 	if (folio_test_anon(folio)) {
5015 		rmap_t rmap_flags = RMAP_NONE;
5016 
5017 		if (!softleaf_is_migration_read(entry))
5018 			rmap_flags |= RMAP_EXCLUSIVE;
5019 
5020 		folio_add_anon_rmap_pmd(folio, new, vma, haddr, rmap_flags);
5021 	} else {
5022 		folio_add_file_rmap_pmd(folio, new, vma);
5023 	}
5024 	VM_BUG_ON(pmd_write(pmde) && folio_test_anon(folio) && !PageAnonExclusive(new));
5025 	set_pmd_at(mm, haddr, pvmw->pmd, pmde);
5026 
5027 	/* No need to invalidate - it was non-present before */
5028 	update_mmu_cache_pmd(vma, address, pvmw->pmd);
5029 	trace_remove_migration_pmd(address, pmd_val(pmde));
5030 }
5031 #endif
5032