1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2009 Red Hat, Inc.
4 */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/sched/mm.h>
11 #include <linux/sched/numa_balancing.h>
12 #include <linux/highmem.h>
13 #include <linux/hugetlb.h>
14 #include <linux/mmu_notifier.h>
15 #include <linux/rmap.h>
16 #include <linux/swap.h>
17 #include <linux/shrinker.h>
18 #include <linux/mm_inline.h>
19 #include <linux/swapops.h>
20 #include <linux/backing-dev.h>
21 #include <linux/dax.h>
22 #include <linux/mm_types.h>
23 #include <linux/khugepaged.h>
24 #include <linux/freezer.h>
25 #include <linux/mman.h>
26 #include <linux/memremap.h>
27 #include <linux/pagemap.h>
28 #include <linux/debugfs.h>
29 #include <linux/migrate.h>
30 #include <linux/hashtable.h>
31 #include <linux/userfaultfd_k.h>
32 #include <linux/page_idle.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/oom.h>
35 #include <linux/numa.h>
36 #include <linux/page_owner.h>
37 #include <linux/sched/sysctl.h>
38 #include <linux/memory-tiers.h>
39 #include <linux/compat.h>
40 #include <linux/pgalloc.h>
41 #include <linux/pgalloc_tag.h>
42 #include <linux/pagewalk.h>
43
44 #include <asm/tlb.h>
45 #include "internal.h"
46 #include "swap.h"
47
48 #define CREATE_TRACE_POINTS
49 #include <trace/events/thp.h>
50
51 /*
52 * By default, transparent hugepage support is disabled in order to avoid
53 * risking an increased memory footprint for applications that are not
54 * guaranteed to benefit from it. When transparent hugepage support is
55 * enabled, it is for all mappings, and khugepaged scans all mappings.
56 * Defrag is invoked by khugepaged hugepage allocations and by page faults
57 * for all hugepage allocations.
58 */
59 unsigned long transparent_hugepage_flags __read_mostly =
60 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
61 (1<<TRANSPARENT_HUGEPAGE_FLAG)|
62 #endif
63 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
64 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
65 #endif
66 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
67 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
68 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
69
70 static struct shrinker *deferred_split_shrinker;
71 static unsigned long deferred_split_count(struct shrinker *shrink,
72 struct shrink_control *sc);
73 static unsigned long deferred_split_scan(struct shrinker *shrink,
74 struct shrink_control *sc);
75 static bool split_underused_thp = true;
76
77 static atomic_t huge_zero_refcount;
78 struct folio *huge_zero_folio __read_mostly;
79 unsigned long huge_zero_pfn __read_mostly = ~0UL;
80 unsigned long huge_anon_orders_always __read_mostly;
81 unsigned long huge_anon_orders_madvise __read_mostly;
82 unsigned long huge_anon_orders_inherit __read_mostly;
83 static bool anon_orders_configured __initdata;
84
file_thp_enabled(struct vm_area_struct * vma)85 static inline bool file_thp_enabled(struct vm_area_struct *vma)
86 {
87 struct inode *inode;
88
89 if (!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS))
90 return false;
91
92 if (!vma->vm_file)
93 return false;
94
95 inode = file_inode(vma->vm_file);
96
97 if (IS_ANON_FILE(inode))
98 return false;
99
100 return !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
101 }
102
103 /* If returns true, we are unable to access the VMA's folios. */
vma_is_special_huge(const struct vm_area_struct * vma)104 static bool vma_is_special_huge(const struct vm_area_struct *vma)
105 {
106 if (vma_is_dax(vma))
107 return false;
108 return vma_test_any(vma, VMA_PFNMAP_BIT, VMA_MIXEDMAP_BIT);
109 }
110
__thp_vma_allowable_orders(struct vm_area_struct * vma,vm_flags_t vm_flags,enum tva_type type,unsigned long orders)111 unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
112 vm_flags_t vm_flags,
113 enum tva_type type,
114 unsigned long orders)
115 {
116 const bool smaps = type == TVA_SMAPS;
117 const bool in_pf = type == TVA_PAGEFAULT;
118 const bool forced_collapse = type == TVA_FORCED_COLLAPSE;
119 unsigned long supported_orders;
120
121 /* Check the intersection of requested and supported orders. */
122 if (vma_is_anonymous(vma))
123 supported_orders = THP_ORDERS_ALL_ANON;
124 else if (vma_is_dax(vma) || vma_is_special_huge(vma))
125 supported_orders = THP_ORDERS_ALL_SPECIAL_DAX;
126 else
127 supported_orders = THP_ORDERS_ALL_FILE_DEFAULT;
128
129 orders &= supported_orders;
130 if (!orders)
131 return 0;
132
133 if (!vma->vm_mm) /* vdso */
134 return 0;
135
136 if (thp_disabled_by_hw() || vma_thp_disabled(vma, vm_flags, forced_collapse))
137 return 0;
138
139 /* khugepaged doesn't collapse DAX vma, but page fault is fine. */
140 if (vma_is_dax(vma))
141 return in_pf ? orders : 0;
142
143 /*
144 * khugepaged special VMA and hugetlb VMA.
145 * Must be checked after dax since some dax mappings may have
146 * VM_MIXEDMAP set.
147 */
148 if (!in_pf && !smaps && (vm_flags & VM_NO_KHUGEPAGED))
149 return 0;
150
151 /*
152 * Check alignment for file vma and size for both file and anon vma by
153 * filtering out the unsuitable orders.
154 *
155 * Skip the check for page fault. Huge fault does the check in fault
156 * handlers.
157 */
158 if (!in_pf) {
159 int order = highest_order(orders);
160 unsigned long addr;
161
162 while (orders) {
163 addr = vma->vm_end - (PAGE_SIZE << order);
164 if (thp_vma_suitable_order(vma, addr, order))
165 break;
166 order = next_order(&orders, order);
167 }
168
169 if (!orders)
170 return 0;
171 }
172
173 /*
174 * Enabled via shmem mount options or sysfs settings.
175 * Must be done before hugepage flags check since shmem has its
176 * own flags.
177 */
178 if (!in_pf && shmem_file(vma->vm_file))
179 return orders & shmem_allowable_huge_orders(file_inode(vma->vm_file),
180 vma, vma->vm_pgoff, 0,
181 forced_collapse);
182
183 if (!vma_is_anonymous(vma)) {
184 /*
185 * Enforce THP collapse requirements as necessary. Anonymous vmas
186 * were already handled in thp_vma_allowable_orders().
187 */
188 if (!forced_collapse &&
189 (!hugepage_global_enabled() || (!(vm_flags & VM_HUGEPAGE) &&
190 !hugepage_global_always())))
191 return 0;
192
193 /*
194 * Trust that ->huge_fault() handlers know what they are doing
195 * in fault path.
196 */
197 if (((in_pf || smaps)) && vma->vm_ops->huge_fault)
198 return orders;
199 /* Only regular file is valid in collapse path */
200 if (((!in_pf || smaps)) && file_thp_enabled(vma))
201 return orders;
202 return 0;
203 }
204
205 if (vma_is_temporary_stack(vma))
206 return 0;
207
208 /*
209 * THPeligible bit of smaps should show 1 for proper VMAs even
210 * though anon_vma is not initialized yet.
211 *
212 * Allow page fault since anon_vma may be not initialized until
213 * the first page fault.
214 */
215 if (!vma->anon_vma)
216 return (smaps || in_pf) ? orders : 0;
217
218 return orders;
219 }
220
get_huge_zero_folio(void)221 static bool get_huge_zero_folio(void)
222 {
223 struct folio *zero_folio;
224 retry:
225 if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
226 return true;
227
228 zero_folio = folio_alloc((GFP_TRANSHUGE | __GFP_ZERO | __GFP_ZEROTAGS) &
229 ~__GFP_MOVABLE,
230 HPAGE_PMD_ORDER);
231 if (!zero_folio) {
232 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
233 return false;
234 }
235 /* Ensure zero folio won't have large_rmappable flag set. */
236 folio_clear_large_rmappable(zero_folio);
237 preempt_disable();
238 if (cmpxchg(&huge_zero_folio, NULL, zero_folio)) {
239 preempt_enable();
240 folio_put(zero_folio);
241 goto retry;
242 }
243 WRITE_ONCE(huge_zero_pfn, folio_pfn(zero_folio));
244
245 /* We take additional reference here. It will be put back by shrinker */
246 atomic_set(&huge_zero_refcount, 2);
247 preempt_enable();
248 count_vm_event(THP_ZERO_PAGE_ALLOC);
249 return true;
250 }
251
put_huge_zero_folio(void)252 static void put_huge_zero_folio(void)
253 {
254 /*
255 * Counter should never go to zero here. Only shrinker can put
256 * last reference.
257 */
258 BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
259 }
260
mm_get_huge_zero_folio(struct mm_struct * mm)261 struct folio *mm_get_huge_zero_folio(struct mm_struct *mm)
262 {
263 if (IS_ENABLED(CONFIG_PERSISTENT_HUGE_ZERO_FOLIO))
264 return huge_zero_folio;
265
266 if (mm_flags_test(MMF_HUGE_ZERO_FOLIO, mm))
267 return READ_ONCE(huge_zero_folio);
268
269 if (!get_huge_zero_folio())
270 return NULL;
271
272 if (mm_flags_test_and_set(MMF_HUGE_ZERO_FOLIO, mm))
273 put_huge_zero_folio();
274
275 return READ_ONCE(huge_zero_folio);
276 }
277
mm_put_huge_zero_folio(struct mm_struct * mm)278 void mm_put_huge_zero_folio(struct mm_struct *mm)
279 {
280 if (IS_ENABLED(CONFIG_PERSISTENT_HUGE_ZERO_FOLIO))
281 return;
282
283 if (mm_flags_test(MMF_HUGE_ZERO_FOLIO, mm))
284 put_huge_zero_folio();
285 }
286
shrink_huge_zero_folio_count(struct shrinker * shrink,struct shrink_control * sc)287 static unsigned long shrink_huge_zero_folio_count(struct shrinker *shrink,
288 struct shrink_control *sc)
289 {
290 /* we can free zero page only if last reference remains */
291 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
292 }
293
shrink_huge_zero_folio_scan(struct shrinker * shrink,struct shrink_control * sc)294 static unsigned long shrink_huge_zero_folio_scan(struct shrinker *shrink,
295 struct shrink_control *sc)
296 {
297 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
298 struct folio *zero_folio = xchg(&huge_zero_folio, NULL);
299 BUG_ON(zero_folio == NULL);
300 WRITE_ONCE(huge_zero_pfn, ~0UL);
301 folio_put(zero_folio);
302 return HPAGE_PMD_NR;
303 }
304
305 return 0;
306 }
307
308 static struct shrinker *huge_zero_folio_shrinker;
309
310 #ifdef CONFIG_SYSFS
enabled_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)311 static ssize_t enabled_show(struct kobject *kobj,
312 struct kobj_attribute *attr, char *buf)
313 {
314 const char *output;
315
316 if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags))
317 output = "[always] madvise never";
318 else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
319 &transparent_hugepage_flags))
320 output = "always [madvise] never";
321 else
322 output = "always madvise [never]";
323
324 return sysfs_emit(buf, "%s\n", output);
325 }
326
327 enum anon_enabled_mode {
328 ANON_ENABLED_ALWAYS = 0,
329 ANON_ENABLED_INHERIT = 1,
330 ANON_ENABLED_MADVISE = 2,
331 ANON_ENABLED_NEVER = 3,
332 };
333
334 static const char * const anon_enabled_mode_strings[] = {
335 [ANON_ENABLED_ALWAYS] = "always",
336 [ANON_ENABLED_INHERIT] = "inherit",
337 [ANON_ENABLED_MADVISE] = "madvise",
338 [ANON_ENABLED_NEVER] = "never",
339 };
340
341 enum global_enabled_mode {
342 GLOBAL_ENABLED_ALWAYS = 0,
343 GLOBAL_ENABLED_MADVISE = 1,
344 GLOBAL_ENABLED_NEVER = 2,
345 };
346
347 static const char * const global_enabled_mode_strings[] = {
348 [GLOBAL_ENABLED_ALWAYS] = "always",
349 [GLOBAL_ENABLED_MADVISE] = "madvise",
350 [GLOBAL_ENABLED_NEVER] = "never",
351 };
352
set_global_enabled_mode(enum global_enabled_mode mode)353 static bool set_global_enabled_mode(enum global_enabled_mode mode)
354 {
355 static const unsigned long thp_flags[] = {
356 TRANSPARENT_HUGEPAGE_FLAG,
357 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
358 };
359 enum global_enabled_mode m;
360 bool changed = false;
361
362 for (m = 0; m < ARRAY_SIZE(thp_flags); m++) {
363 if (m == mode)
364 changed |= !test_and_set_bit(thp_flags[m],
365 &transparent_hugepage_flags);
366 else
367 changed |= test_and_clear_bit(thp_flags[m],
368 &transparent_hugepage_flags);
369 }
370
371 return changed;
372 }
373
enabled_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)374 static ssize_t enabled_store(struct kobject *kobj,
375 struct kobj_attribute *attr,
376 const char *buf, size_t count)
377 {
378 int mode;
379
380 mode = sysfs_match_string(global_enabled_mode_strings, buf);
381 if (mode < 0)
382 return -EINVAL;
383
384 if (set_global_enabled_mode(mode)) {
385 int err = start_stop_khugepaged();
386
387 if (err)
388 return err;
389 } else {
390 /*
391 * Recalculate watermarks even when the mode didn't
392 * change, as the previous code always called
393 * start_stop_khugepaged() which does this internally.
394 */
395 set_recommended_min_free_kbytes();
396 }
397 return count;
398 }
399
400 static struct kobj_attribute enabled_attr = __ATTR_RW(enabled);
401
single_hugepage_flag_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf,enum transparent_hugepage_flag flag)402 ssize_t single_hugepage_flag_show(struct kobject *kobj,
403 struct kobj_attribute *attr, char *buf,
404 enum transparent_hugepage_flag flag)
405 {
406 return sysfs_emit(buf, "%d\n",
407 !!test_bit(flag, &transparent_hugepage_flags));
408 }
409
single_hugepage_flag_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count,enum transparent_hugepage_flag flag)410 ssize_t single_hugepage_flag_store(struct kobject *kobj,
411 struct kobj_attribute *attr,
412 const char *buf, size_t count,
413 enum transparent_hugepage_flag flag)
414 {
415 unsigned long value;
416 int ret;
417
418 ret = kstrtoul(buf, 10, &value);
419 if (ret < 0)
420 return ret;
421 if (value > 1)
422 return -EINVAL;
423
424 if (value)
425 set_bit(flag, &transparent_hugepage_flags);
426 else
427 clear_bit(flag, &transparent_hugepage_flags);
428
429 return count;
430 }
431
defrag_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)432 static ssize_t defrag_show(struct kobject *kobj,
433 struct kobj_attribute *attr, char *buf)
434 {
435 const char *output;
436
437 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
438 &transparent_hugepage_flags))
439 output = "[always] defer defer+madvise madvise never";
440 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
441 &transparent_hugepage_flags))
442 output = "always [defer] defer+madvise madvise never";
443 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
444 &transparent_hugepage_flags))
445 output = "always defer [defer+madvise] madvise never";
446 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
447 &transparent_hugepage_flags))
448 output = "always defer defer+madvise [madvise] never";
449 else
450 output = "always defer defer+madvise madvise [never]";
451
452 return sysfs_emit(buf, "%s\n", output);
453 }
454
defrag_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)455 static ssize_t defrag_store(struct kobject *kobj,
456 struct kobj_attribute *attr,
457 const char *buf, size_t count)
458 {
459 if (sysfs_streq(buf, "always")) {
460 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
461 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
462 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
463 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
464 } else if (sysfs_streq(buf, "defer+madvise")) {
465 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
466 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
467 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
468 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
469 } else if (sysfs_streq(buf, "defer")) {
470 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
471 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
472 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
473 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
474 } else if (sysfs_streq(buf, "madvise")) {
475 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
476 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
477 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
478 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
479 } else if (sysfs_streq(buf, "never")) {
480 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
481 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
482 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
483 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
484 } else
485 return -EINVAL;
486
487 return count;
488 }
489 static struct kobj_attribute defrag_attr = __ATTR_RW(defrag);
490
use_zero_page_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)491 static ssize_t use_zero_page_show(struct kobject *kobj,
492 struct kobj_attribute *attr, char *buf)
493 {
494 return single_hugepage_flag_show(kobj, attr, buf,
495 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
496 }
use_zero_page_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)497 static ssize_t use_zero_page_store(struct kobject *kobj,
498 struct kobj_attribute *attr, const char *buf, size_t count)
499 {
500 return single_hugepage_flag_store(kobj, attr, buf, count,
501 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
502 }
503 static struct kobj_attribute use_zero_page_attr = __ATTR_RW(use_zero_page);
504
hpage_pmd_size_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)505 static ssize_t hpage_pmd_size_show(struct kobject *kobj,
506 struct kobj_attribute *attr, char *buf)
507 {
508 return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE);
509 }
510 static struct kobj_attribute hpage_pmd_size_attr =
511 __ATTR_RO(hpage_pmd_size);
512
split_underused_thp_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)513 static ssize_t split_underused_thp_show(struct kobject *kobj,
514 struct kobj_attribute *attr, char *buf)
515 {
516 return sysfs_emit(buf, "%d\n", split_underused_thp);
517 }
518
split_underused_thp_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)519 static ssize_t split_underused_thp_store(struct kobject *kobj,
520 struct kobj_attribute *attr,
521 const char *buf, size_t count)
522 {
523 int err = kstrtobool(buf, &split_underused_thp);
524
525 if (err < 0)
526 return err;
527
528 return count;
529 }
530
531 static struct kobj_attribute split_underused_thp_attr = __ATTR(
532 shrink_underused, 0644, split_underused_thp_show, split_underused_thp_store);
533
534 static struct attribute *hugepage_attr[] = {
535 &enabled_attr.attr,
536 &defrag_attr.attr,
537 &use_zero_page_attr.attr,
538 &hpage_pmd_size_attr.attr,
539 #ifdef CONFIG_SHMEM
540 &shmem_enabled_attr.attr,
541 #endif
542 &split_underused_thp_attr.attr,
543 NULL,
544 };
545
546 static const struct attribute_group hugepage_attr_group = {
547 .attrs = hugepage_attr,
548 };
549
550 static void hugepage_exit_sysfs(struct kobject *hugepage_kobj);
551 static void thpsize_release(struct kobject *kobj);
552 static DEFINE_SPINLOCK(huge_anon_orders_lock);
553 static LIST_HEAD(thpsize_list);
554
anon_enabled_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)555 static ssize_t anon_enabled_show(struct kobject *kobj,
556 struct kobj_attribute *attr, char *buf)
557 {
558 int order = to_thpsize(kobj)->order;
559 const char *output;
560
561 if (test_bit(order, &huge_anon_orders_always))
562 output = "[always] inherit madvise never";
563 else if (test_bit(order, &huge_anon_orders_inherit))
564 output = "always [inherit] madvise never";
565 else if (test_bit(order, &huge_anon_orders_madvise))
566 output = "always inherit [madvise] never";
567 else
568 output = "always inherit madvise [never]";
569
570 return sysfs_emit(buf, "%s\n", output);
571 }
572
set_anon_enabled_mode(int order,enum anon_enabled_mode mode)573 static bool set_anon_enabled_mode(int order, enum anon_enabled_mode mode)
574 {
575 static unsigned long *enabled_orders[] = {
576 &huge_anon_orders_always,
577 &huge_anon_orders_inherit,
578 &huge_anon_orders_madvise,
579 };
580 enum anon_enabled_mode m;
581 bool changed = false;
582
583 spin_lock(&huge_anon_orders_lock);
584 for (m = 0; m < ARRAY_SIZE(enabled_orders); m++) {
585 if (m == mode)
586 changed |= !__test_and_set_bit(order, enabled_orders[m]);
587 else
588 changed |= __test_and_clear_bit(order, enabled_orders[m]);
589 }
590 spin_unlock(&huge_anon_orders_lock);
591
592 return changed;
593 }
594
anon_enabled_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)595 static ssize_t anon_enabled_store(struct kobject *kobj,
596 struct kobj_attribute *attr,
597 const char *buf, size_t count)
598 {
599 int order = to_thpsize(kobj)->order;
600 int mode;
601
602 mode = sysfs_match_string(anon_enabled_mode_strings, buf);
603 if (mode < 0)
604 return -EINVAL;
605
606 if (set_anon_enabled_mode(order, mode)) {
607 int err = start_stop_khugepaged();
608
609 if (err)
610 return err;
611 } else {
612 /*
613 * Recalculate watermarks even when the mode didn't
614 * change, as the previous code always called
615 * start_stop_khugepaged() which does this internally.
616 */
617 set_recommended_min_free_kbytes();
618 }
619
620 return count;
621 }
622
623 static struct kobj_attribute anon_enabled_attr =
624 __ATTR(enabled, 0644, anon_enabled_show, anon_enabled_store);
625
626 static struct attribute *anon_ctrl_attrs[] = {
627 &anon_enabled_attr.attr,
628 NULL,
629 };
630
631 static const struct attribute_group anon_ctrl_attr_grp = {
632 .attrs = anon_ctrl_attrs,
633 };
634
635 static struct attribute *file_ctrl_attrs[] = {
636 #ifdef CONFIG_SHMEM
637 &thpsize_shmem_enabled_attr.attr,
638 #endif
639 NULL,
640 };
641
642 static const struct attribute_group file_ctrl_attr_grp = {
643 .attrs = file_ctrl_attrs,
644 };
645
646 static struct attribute *any_ctrl_attrs[] = {
647 NULL,
648 };
649
650 static const struct attribute_group any_ctrl_attr_grp = {
651 .attrs = any_ctrl_attrs,
652 };
653
654 static const struct kobj_type thpsize_ktype = {
655 .release = &thpsize_release,
656 .sysfs_ops = &kobj_sysfs_ops,
657 };
658
659 DEFINE_PER_CPU(struct mthp_stat, mthp_stats) = {{{0}}};
660
sum_mthp_stat(int order,enum mthp_stat_item item)661 static unsigned long sum_mthp_stat(int order, enum mthp_stat_item item)
662 {
663 unsigned long sum = 0;
664 int cpu;
665
666 for_each_possible_cpu(cpu) {
667 struct mthp_stat *this = &per_cpu(mthp_stats, cpu);
668
669 sum += this->stats[order][item];
670 }
671
672 return sum;
673 }
674
675 #define DEFINE_MTHP_STAT_ATTR(_name, _index) \
676 static ssize_t _name##_show(struct kobject *kobj, \
677 struct kobj_attribute *attr, char *buf) \
678 { \
679 int order = to_thpsize(kobj)->order; \
680 \
681 return sysfs_emit(buf, "%lu\n", sum_mthp_stat(order, _index)); \
682 } \
683 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
684
685 DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC);
686 DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
687 DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
688 DEFINE_MTHP_STAT_ATTR(zswpout, MTHP_STAT_ZSWPOUT);
689 DEFINE_MTHP_STAT_ATTR(swpin, MTHP_STAT_SWPIN);
690 DEFINE_MTHP_STAT_ATTR(swpin_fallback, MTHP_STAT_SWPIN_FALLBACK);
691 DEFINE_MTHP_STAT_ATTR(swpin_fallback_charge, MTHP_STAT_SWPIN_FALLBACK_CHARGE);
692 DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT);
693 DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK);
694 #ifdef CONFIG_SHMEM
695 DEFINE_MTHP_STAT_ATTR(shmem_alloc, MTHP_STAT_SHMEM_ALLOC);
696 DEFINE_MTHP_STAT_ATTR(shmem_fallback, MTHP_STAT_SHMEM_FALLBACK);
697 DEFINE_MTHP_STAT_ATTR(shmem_fallback_charge, MTHP_STAT_SHMEM_FALLBACK_CHARGE);
698 #endif
699 DEFINE_MTHP_STAT_ATTR(split, MTHP_STAT_SPLIT);
700 DEFINE_MTHP_STAT_ATTR(split_failed, MTHP_STAT_SPLIT_FAILED);
701 DEFINE_MTHP_STAT_ATTR(split_deferred, MTHP_STAT_SPLIT_DEFERRED);
702 DEFINE_MTHP_STAT_ATTR(nr_anon, MTHP_STAT_NR_ANON);
703 DEFINE_MTHP_STAT_ATTR(nr_anon_partially_mapped, MTHP_STAT_NR_ANON_PARTIALLY_MAPPED);
704
705 static struct attribute *anon_stats_attrs[] = {
706 &anon_fault_alloc_attr.attr,
707 &anon_fault_fallback_attr.attr,
708 &anon_fault_fallback_charge_attr.attr,
709 #ifndef CONFIG_SHMEM
710 &zswpout_attr.attr,
711 &swpin_attr.attr,
712 &swpin_fallback_attr.attr,
713 &swpin_fallback_charge_attr.attr,
714 &swpout_attr.attr,
715 &swpout_fallback_attr.attr,
716 #endif
717 &split_deferred_attr.attr,
718 &nr_anon_attr.attr,
719 &nr_anon_partially_mapped_attr.attr,
720 NULL,
721 };
722
723 static struct attribute_group anon_stats_attr_grp = {
724 .name = "stats",
725 .attrs = anon_stats_attrs,
726 };
727
728 static struct attribute *file_stats_attrs[] = {
729 #ifdef CONFIG_SHMEM
730 &shmem_alloc_attr.attr,
731 &shmem_fallback_attr.attr,
732 &shmem_fallback_charge_attr.attr,
733 #endif
734 NULL,
735 };
736
737 static struct attribute_group file_stats_attr_grp = {
738 .name = "stats",
739 .attrs = file_stats_attrs,
740 };
741
742 static struct attribute *any_stats_attrs[] = {
743 #ifdef CONFIG_SHMEM
744 &zswpout_attr.attr,
745 &swpin_attr.attr,
746 &swpin_fallback_attr.attr,
747 &swpin_fallback_charge_attr.attr,
748 &swpout_attr.attr,
749 &swpout_fallback_attr.attr,
750 #endif
751 &split_attr.attr,
752 &split_failed_attr.attr,
753 NULL,
754 };
755
756 static struct attribute_group any_stats_attr_grp = {
757 .name = "stats",
758 .attrs = any_stats_attrs,
759 };
760
sysfs_add_group(struct kobject * kobj,const struct attribute_group * grp)761 static int sysfs_add_group(struct kobject *kobj,
762 const struct attribute_group *grp)
763 {
764 int ret = -ENOENT;
765
766 /*
767 * If the group is named, try to merge first, assuming the subdirectory
768 * was already created. This avoids the warning emitted by
769 * sysfs_create_group() if the directory already exists.
770 */
771 if (grp->name)
772 ret = sysfs_merge_group(kobj, grp);
773 if (ret)
774 ret = sysfs_create_group(kobj, grp);
775
776 return ret;
777 }
778
thpsize_create(int order,struct kobject * parent)779 static struct thpsize *thpsize_create(int order, struct kobject *parent)
780 {
781 unsigned long size = (PAGE_SIZE << order) / SZ_1K;
782 struct thpsize *thpsize;
783 int ret = -ENOMEM;
784
785 thpsize = kzalloc_obj(*thpsize);
786 if (!thpsize)
787 goto err;
788
789 thpsize->order = order;
790
791 ret = kobject_init_and_add(&thpsize->kobj, &thpsize_ktype, parent,
792 "hugepages-%lukB", size);
793 if (ret) {
794 kfree(thpsize);
795 goto err;
796 }
797
798
799 ret = sysfs_add_group(&thpsize->kobj, &any_ctrl_attr_grp);
800 if (ret)
801 goto err_put;
802
803 ret = sysfs_add_group(&thpsize->kobj, &any_stats_attr_grp);
804 if (ret)
805 goto err_put;
806
807 if (BIT(order) & THP_ORDERS_ALL_ANON) {
808 ret = sysfs_add_group(&thpsize->kobj, &anon_ctrl_attr_grp);
809 if (ret)
810 goto err_put;
811
812 ret = sysfs_add_group(&thpsize->kobj, &anon_stats_attr_grp);
813 if (ret)
814 goto err_put;
815 }
816
817 if (BIT(order) & THP_ORDERS_ALL_FILE_DEFAULT) {
818 ret = sysfs_add_group(&thpsize->kobj, &file_ctrl_attr_grp);
819 if (ret)
820 goto err_put;
821
822 ret = sysfs_add_group(&thpsize->kobj, &file_stats_attr_grp);
823 if (ret)
824 goto err_put;
825 }
826
827 return thpsize;
828 err_put:
829 kobject_put(&thpsize->kobj);
830 err:
831 return ERR_PTR(ret);
832 }
833
thpsize_release(struct kobject * kobj)834 static void thpsize_release(struct kobject *kobj)
835 {
836 kfree(to_thpsize(kobj));
837 }
838
hugepage_init_sysfs(struct kobject ** hugepage_kobj)839 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
840 {
841 int err;
842 struct thpsize *thpsize;
843 unsigned long orders;
844 int order;
845
846 /*
847 * Default to setting PMD-sized THP to inherit the global setting and
848 * disable all other sizes. powerpc's PMD_ORDER isn't a compile-time
849 * constant so we have to do this here.
850 */
851 if (!anon_orders_configured)
852 huge_anon_orders_inherit = BIT(PMD_ORDER);
853
854 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
855 if (unlikely(!*hugepage_kobj)) {
856 pr_err("failed to create transparent hugepage kobject\n");
857 return -ENOMEM;
858 }
859
860 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
861 if (err) {
862 pr_err("failed to register transparent hugepage group\n");
863 goto delete_obj;
864 }
865
866 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
867 if (err) {
868 pr_err("failed to register transparent hugepage group\n");
869 goto remove_hp_group;
870 }
871
872 orders = THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE_DEFAULT;
873 order = highest_order(orders);
874 while (orders) {
875 thpsize = thpsize_create(order, *hugepage_kobj);
876 if (IS_ERR(thpsize)) {
877 pr_err("failed to create thpsize for order %d\n", order);
878 err = PTR_ERR(thpsize);
879 goto remove_all;
880 }
881 list_add(&thpsize->node, &thpsize_list);
882 order = next_order(&orders, order);
883 }
884
885 return 0;
886
887 remove_all:
888 hugepage_exit_sysfs(*hugepage_kobj);
889 return err;
890 remove_hp_group:
891 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
892 delete_obj:
893 kobject_put(*hugepage_kobj);
894 return err;
895 }
896
hugepage_exit_sysfs(struct kobject * hugepage_kobj)897 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
898 {
899 struct thpsize *thpsize, *tmp;
900
901 list_for_each_entry_safe(thpsize, tmp, &thpsize_list, node) {
902 list_del(&thpsize->node);
903 kobject_put(&thpsize->kobj);
904 }
905
906 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
907 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
908 kobject_put(hugepage_kobj);
909 }
910 #else
hugepage_init_sysfs(struct kobject ** hugepage_kobj)911 static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
912 {
913 return 0;
914 }
915
hugepage_exit_sysfs(struct kobject * hugepage_kobj)916 static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
917 {
918 }
919 #endif /* CONFIG_SYSFS */
920
thp_shrinker_init(void)921 static int __init thp_shrinker_init(void)
922 {
923 deferred_split_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE |
924 SHRINKER_MEMCG_AWARE |
925 SHRINKER_NONSLAB,
926 "thp-deferred_split");
927 if (!deferred_split_shrinker)
928 return -ENOMEM;
929
930 deferred_split_shrinker->count_objects = deferred_split_count;
931 deferred_split_shrinker->scan_objects = deferred_split_scan;
932 shrinker_register(deferred_split_shrinker);
933
934 if (IS_ENABLED(CONFIG_PERSISTENT_HUGE_ZERO_FOLIO)) {
935 /*
936 * Bump the reference of the huge_zero_folio and do not
937 * initialize the shrinker.
938 *
939 * huge_zero_folio will always be NULL on failure. We assume
940 * that get_huge_zero_folio() will most likely not fail as
941 * thp_shrinker_init() is invoked early on during boot.
942 */
943 if (!get_huge_zero_folio())
944 pr_warn("Allocating persistent huge zero folio failed\n");
945 return 0;
946 }
947
948 huge_zero_folio_shrinker = shrinker_alloc(0, "thp-zero");
949 if (!huge_zero_folio_shrinker) {
950 shrinker_free(deferred_split_shrinker);
951 return -ENOMEM;
952 }
953
954 huge_zero_folio_shrinker->count_objects = shrink_huge_zero_folio_count;
955 huge_zero_folio_shrinker->scan_objects = shrink_huge_zero_folio_scan;
956 shrinker_register(huge_zero_folio_shrinker);
957
958 return 0;
959 }
960
thp_shrinker_exit(void)961 static void __init thp_shrinker_exit(void)
962 {
963 shrinker_free(huge_zero_folio_shrinker);
964 shrinker_free(deferred_split_shrinker);
965 }
966
hugepage_init(void)967 static int __init hugepage_init(void)
968 {
969 int err;
970 struct kobject *hugepage_kobj;
971
972 if (!has_transparent_hugepage()) {
973 transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED;
974 return -EINVAL;
975 }
976
977 /*
978 * hugepages can't be allocated by the buddy allocator
979 */
980 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER > MAX_PAGE_ORDER);
981
982 err = hugepage_init_sysfs(&hugepage_kobj);
983 if (err)
984 goto err_sysfs;
985
986 err = khugepaged_init();
987 if (err)
988 goto err_slab;
989
990 err = thp_shrinker_init();
991 if (err)
992 goto err_shrinker;
993
994 /*
995 * By default disable transparent hugepages on smaller systems,
996 * where the extra memory used could hurt more than TLB overhead
997 * is likely to save. The admin can still enable it through /sys.
998 */
999 if (totalram_pages() < MB_TO_PAGES(512)) {
1000 transparent_hugepage_flags = 0;
1001 return 0;
1002 }
1003
1004 err = start_stop_khugepaged();
1005 if (err)
1006 goto err_khugepaged;
1007
1008 return 0;
1009 err_khugepaged:
1010 thp_shrinker_exit();
1011 err_shrinker:
1012 khugepaged_destroy();
1013 err_slab:
1014 hugepage_exit_sysfs(hugepage_kobj);
1015 err_sysfs:
1016 return err;
1017 }
1018 subsys_initcall(hugepage_init);
1019
setup_transparent_hugepage(char * str)1020 static int __init setup_transparent_hugepage(char *str)
1021 {
1022 int ret = 0;
1023 if (!str)
1024 goto out;
1025 if (!strcmp(str, "always")) {
1026 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
1027 &transparent_hugepage_flags);
1028 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
1029 &transparent_hugepage_flags);
1030 ret = 1;
1031 } else if (!strcmp(str, "madvise")) {
1032 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
1033 &transparent_hugepage_flags);
1034 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
1035 &transparent_hugepage_flags);
1036 ret = 1;
1037 } else if (!strcmp(str, "never")) {
1038 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
1039 &transparent_hugepage_flags);
1040 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
1041 &transparent_hugepage_flags);
1042 ret = 1;
1043 }
1044 out:
1045 if (!ret)
1046 pr_warn("transparent_hugepage= cannot parse, ignored\n");
1047 return ret;
1048 }
1049 __setup("transparent_hugepage=", setup_transparent_hugepage);
1050
1051 static char str_dup[PAGE_SIZE] __initdata;
setup_thp_anon(char * str)1052 static int __init setup_thp_anon(char *str)
1053 {
1054 char *token, *range, *policy, *subtoken;
1055 unsigned long always, inherit, madvise;
1056 char *start_size, *end_size;
1057 int start, end, nr;
1058 char *p;
1059
1060 if (!str || strlen(str) + 1 > PAGE_SIZE)
1061 goto err;
1062 strscpy(str_dup, str);
1063
1064 always = huge_anon_orders_always;
1065 madvise = huge_anon_orders_madvise;
1066 inherit = huge_anon_orders_inherit;
1067 p = str_dup;
1068 while ((token = strsep(&p, ";")) != NULL) {
1069 range = strsep(&token, ":");
1070 policy = token;
1071
1072 if (!policy)
1073 goto err;
1074
1075 while ((subtoken = strsep(&range, ",")) != NULL) {
1076 if (strchr(subtoken, '-')) {
1077 start_size = strsep(&subtoken, "-");
1078 end_size = subtoken;
1079
1080 start = get_order_from_str(start_size, THP_ORDERS_ALL_ANON);
1081 end = get_order_from_str(end_size, THP_ORDERS_ALL_ANON);
1082 } else {
1083 start_size = end_size = subtoken;
1084 start = end = get_order_from_str(subtoken,
1085 THP_ORDERS_ALL_ANON);
1086 }
1087
1088 if (start == -EINVAL) {
1089 pr_err("invalid size %s in thp_anon boot parameter\n", start_size);
1090 goto err;
1091 }
1092
1093 if (end == -EINVAL) {
1094 pr_err("invalid size %s in thp_anon boot parameter\n", end_size);
1095 goto err;
1096 }
1097
1098 if (start < 0 || end < 0 || start > end)
1099 goto err;
1100
1101 nr = end - start + 1;
1102 if (!strcmp(policy, "always")) {
1103 bitmap_set(&always, start, nr);
1104 bitmap_clear(&inherit, start, nr);
1105 bitmap_clear(&madvise, start, nr);
1106 } else if (!strcmp(policy, "madvise")) {
1107 bitmap_set(&madvise, start, nr);
1108 bitmap_clear(&inherit, start, nr);
1109 bitmap_clear(&always, start, nr);
1110 } else if (!strcmp(policy, "inherit")) {
1111 bitmap_set(&inherit, start, nr);
1112 bitmap_clear(&madvise, start, nr);
1113 bitmap_clear(&always, start, nr);
1114 } else if (!strcmp(policy, "never")) {
1115 bitmap_clear(&inherit, start, nr);
1116 bitmap_clear(&madvise, start, nr);
1117 bitmap_clear(&always, start, nr);
1118 } else {
1119 pr_err("invalid policy %s in thp_anon boot parameter\n", policy);
1120 goto err;
1121 }
1122 }
1123 }
1124
1125 huge_anon_orders_always = always;
1126 huge_anon_orders_madvise = madvise;
1127 huge_anon_orders_inherit = inherit;
1128 anon_orders_configured = true;
1129 return 1;
1130
1131 err:
1132 pr_warn("thp_anon=%s: error parsing string, ignoring setting\n", str);
1133 return 0;
1134 }
1135 __setup("thp_anon=", setup_thp_anon);
1136
maybe_pmd_mkwrite(pmd_t pmd,struct vm_area_struct * vma)1137 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
1138 {
1139 if (likely(vma->vm_flags & VM_WRITE))
1140 pmd = pmd_mkwrite(pmd, vma);
1141 return pmd;
1142 }
1143
split_queue_node(int nid)1144 static struct deferred_split *split_queue_node(int nid)
1145 {
1146 struct pglist_data *pgdata = NODE_DATA(nid);
1147
1148 return &pgdata->deferred_split_queue;
1149 }
1150
1151 #ifdef CONFIG_MEMCG
1152 static inline
folio_split_queue_memcg(struct folio * folio,struct deferred_split * queue)1153 struct mem_cgroup *folio_split_queue_memcg(struct folio *folio,
1154 struct deferred_split *queue)
1155 {
1156 if (mem_cgroup_disabled())
1157 return NULL;
1158 if (split_queue_node(folio_nid(folio)) == queue)
1159 return NULL;
1160 return container_of(queue, struct mem_cgroup, deferred_split_queue);
1161 }
1162
memcg_split_queue(int nid,struct mem_cgroup * memcg)1163 static struct deferred_split *memcg_split_queue(int nid, struct mem_cgroup *memcg)
1164 {
1165 return memcg ? &memcg->deferred_split_queue : split_queue_node(nid);
1166 }
1167 #else
1168 static inline
folio_split_queue_memcg(struct folio * folio,struct deferred_split * queue)1169 struct mem_cgroup *folio_split_queue_memcg(struct folio *folio,
1170 struct deferred_split *queue)
1171 {
1172 return NULL;
1173 }
1174
memcg_split_queue(int nid,struct mem_cgroup * memcg)1175 static struct deferred_split *memcg_split_queue(int nid, struct mem_cgroup *memcg)
1176 {
1177 return split_queue_node(nid);
1178 }
1179 #endif
1180
split_queue_lock(int nid,struct mem_cgroup * memcg)1181 static struct deferred_split *split_queue_lock(int nid, struct mem_cgroup *memcg)
1182 {
1183 struct deferred_split *queue;
1184
1185 retry:
1186 queue = memcg_split_queue(nid, memcg);
1187 spin_lock(&queue->split_queue_lock);
1188 /*
1189 * There is a period between setting memcg to dying and reparenting
1190 * deferred split queue, and during this period the THPs in the deferred
1191 * split queue will be hidden from the shrinker side.
1192 */
1193 if (unlikely(memcg_is_dying(memcg))) {
1194 spin_unlock(&queue->split_queue_lock);
1195 memcg = parent_mem_cgroup(memcg);
1196 goto retry;
1197 }
1198
1199 return queue;
1200 }
1201
1202 static struct deferred_split *
split_queue_lock_irqsave(int nid,struct mem_cgroup * memcg,unsigned long * flags)1203 split_queue_lock_irqsave(int nid, struct mem_cgroup *memcg, unsigned long *flags)
1204 {
1205 struct deferred_split *queue;
1206
1207 retry:
1208 queue = memcg_split_queue(nid, memcg);
1209 spin_lock_irqsave(&queue->split_queue_lock, *flags);
1210 if (unlikely(memcg_is_dying(memcg))) {
1211 spin_unlock_irqrestore(&queue->split_queue_lock, *flags);
1212 memcg = parent_mem_cgroup(memcg);
1213 goto retry;
1214 }
1215
1216 return queue;
1217 }
1218
folio_split_queue_lock(struct folio * folio)1219 static struct deferred_split *folio_split_queue_lock(struct folio *folio)
1220 {
1221 struct deferred_split *queue;
1222
1223 rcu_read_lock();
1224 queue = split_queue_lock(folio_nid(folio), folio_memcg(folio));
1225 /*
1226 * The memcg destruction path is acquiring the split queue lock for
1227 * reparenting. Once you have it locked, it's safe to drop the rcu lock.
1228 */
1229 rcu_read_unlock();
1230
1231 return queue;
1232 }
1233
1234 static struct deferred_split *
folio_split_queue_lock_irqsave(struct folio * folio,unsigned long * flags)1235 folio_split_queue_lock_irqsave(struct folio *folio, unsigned long *flags)
1236 {
1237 struct deferred_split *queue;
1238
1239 rcu_read_lock();
1240 queue = split_queue_lock_irqsave(folio_nid(folio), folio_memcg(folio), flags);
1241 rcu_read_unlock();
1242
1243 return queue;
1244 }
1245
split_queue_unlock(struct deferred_split * queue)1246 static inline void split_queue_unlock(struct deferred_split *queue)
1247 {
1248 spin_unlock(&queue->split_queue_lock);
1249 }
1250
split_queue_unlock_irqrestore(struct deferred_split * queue,unsigned long flags)1251 static inline void split_queue_unlock_irqrestore(struct deferred_split *queue,
1252 unsigned long flags)
1253 {
1254 spin_unlock_irqrestore(&queue->split_queue_lock, flags);
1255 }
1256
is_transparent_hugepage(const struct folio * folio)1257 static inline bool is_transparent_hugepage(const struct folio *folio)
1258 {
1259 if (!folio_test_large(folio))
1260 return false;
1261
1262 return is_huge_zero_folio(folio) ||
1263 folio_test_large_rmappable(folio);
1264 }
1265
__thp_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,loff_t off,unsigned long flags,unsigned long size,vm_flags_t vm_flags)1266 static unsigned long __thp_get_unmapped_area(struct file *filp,
1267 unsigned long addr, unsigned long len,
1268 loff_t off, unsigned long flags, unsigned long size,
1269 vm_flags_t vm_flags)
1270 {
1271 loff_t off_end = off + len;
1272 loff_t off_align = round_up(off, size);
1273 unsigned long len_pad, ret, off_sub;
1274
1275 if (!IS_ENABLED(CONFIG_64BIT) || in_compat_syscall())
1276 return 0;
1277
1278 if (off_end <= off_align || (off_end - off_align) < size)
1279 return 0;
1280
1281 len_pad = len + size;
1282 if (len_pad < len || (off + len_pad) < off)
1283 return 0;
1284
1285 ret = mm_get_unmapped_area_vmflags(filp, addr, len_pad,
1286 off >> PAGE_SHIFT, flags, vm_flags);
1287
1288 /*
1289 * The failure might be due to length padding. The caller will retry
1290 * without the padding.
1291 */
1292 if (IS_ERR_VALUE(ret))
1293 return 0;
1294
1295 /*
1296 * Do not try to align to THP boundary if allocation at the address
1297 * hint succeeds.
1298 */
1299 if (ret == addr)
1300 return addr;
1301
1302 off_sub = (off - ret) & (size - 1);
1303
1304 if (mm_flags_test(MMF_TOPDOWN, current->mm) && !off_sub)
1305 return ret + size;
1306
1307 ret += off_sub;
1308 return ret;
1309 }
1310
thp_get_unmapped_area_vmflags(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags,vm_flags_t vm_flags)1311 unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
1312 unsigned long len, unsigned long pgoff, unsigned long flags,
1313 vm_flags_t vm_flags)
1314 {
1315 unsigned long ret;
1316 loff_t off = (loff_t)pgoff << PAGE_SHIFT;
1317
1318 ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE, vm_flags);
1319 if (ret)
1320 return ret;
1321
1322 return mm_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags,
1323 vm_flags);
1324 }
1325
thp_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)1326 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
1327 unsigned long len, unsigned long pgoff, unsigned long flags)
1328 {
1329 return thp_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags, 0);
1330 }
1331 EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
1332
vma_alloc_anon_folio_pmd(struct vm_area_struct * vma,unsigned long addr)1333 static struct folio *vma_alloc_anon_folio_pmd(struct vm_area_struct *vma,
1334 unsigned long addr)
1335 {
1336 gfp_t gfp = vma_thp_gfp_mask(vma);
1337 const int order = HPAGE_PMD_ORDER;
1338 struct folio *folio;
1339
1340 folio = vma_alloc_folio(gfp, order, vma, addr & HPAGE_PMD_MASK);
1341
1342 if (unlikely(!folio)) {
1343 count_vm_event(THP_FAULT_FALLBACK);
1344 count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK);
1345 return NULL;
1346 }
1347
1348 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
1349 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
1350 folio_put(folio);
1351 count_vm_event(THP_FAULT_FALLBACK);
1352 count_vm_event(THP_FAULT_FALLBACK_CHARGE);
1353 count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK);
1354 count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
1355 return NULL;
1356 }
1357 folio_throttle_swaprate(folio, gfp);
1358
1359 /*
1360 * When a folio is not zeroed during allocation (__GFP_ZERO not used)
1361 * or user folios require special handling, folio_zero_user() is used to
1362 * make sure that the page corresponding to the faulting address will be
1363 * hot in the cache after zeroing.
1364 */
1365 if (user_alloc_needs_zeroing())
1366 folio_zero_user(folio, addr);
1367 /*
1368 * The memory barrier inside __folio_mark_uptodate makes sure that
1369 * folio_zero_user writes become visible before the set_pmd_at()
1370 * write.
1371 */
1372 __folio_mark_uptodate(folio);
1373 return folio;
1374 }
1375
map_anon_folio_pmd_nopf(struct folio * folio,pmd_t * pmd,struct vm_area_struct * vma,unsigned long haddr)1376 void map_anon_folio_pmd_nopf(struct folio *folio, pmd_t *pmd,
1377 struct vm_area_struct *vma, unsigned long haddr)
1378 {
1379 pmd_t entry;
1380
1381 entry = folio_mk_pmd(folio, vma->vm_page_prot);
1382 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1383 folio_add_new_anon_rmap(folio, vma, haddr, RMAP_EXCLUSIVE);
1384 folio_add_lru_vma(folio, vma);
1385 set_pmd_at(vma->vm_mm, haddr, pmd, entry);
1386 update_mmu_cache_pmd(vma, haddr, pmd);
1387 deferred_split_folio(folio, false);
1388 }
1389
map_anon_folio_pmd_pf(struct folio * folio,pmd_t * pmd,struct vm_area_struct * vma,unsigned long haddr)1390 static void map_anon_folio_pmd_pf(struct folio *folio, pmd_t *pmd,
1391 struct vm_area_struct *vma, unsigned long haddr)
1392 {
1393 map_anon_folio_pmd_nopf(folio, pmd, vma, haddr);
1394 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1395 count_vm_event(THP_FAULT_ALLOC);
1396 count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_ALLOC);
1397 count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
1398 }
1399
__do_huge_pmd_anonymous_page(struct vm_fault * vmf)1400 static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf)
1401 {
1402 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1403 struct vm_area_struct *vma = vmf->vma;
1404 struct folio *folio;
1405 pgtable_t pgtable;
1406 vm_fault_t ret = 0;
1407
1408 folio = vma_alloc_anon_folio_pmd(vma, vmf->address);
1409 if (unlikely(!folio))
1410 return VM_FAULT_FALLBACK;
1411
1412 pgtable = pte_alloc_one(vma->vm_mm);
1413 if (unlikely(!pgtable)) {
1414 ret = VM_FAULT_OOM;
1415 goto release;
1416 }
1417
1418 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1419 if (unlikely(!pmd_none(*vmf->pmd))) {
1420 goto unlock_release;
1421 } else {
1422 ret = check_stable_address_space(vma->vm_mm);
1423 if (ret)
1424 goto unlock_release;
1425
1426 /* Deliver the page fault to userland */
1427 if (userfaultfd_missing(vma)) {
1428 spin_unlock(vmf->ptl);
1429 folio_put(folio);
1430 pte_free(vma->vm_mm, pgtable);
1431 ret = handle_userfault(vmf, VM_UFFD_MISSING);
1432 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
1433 return ret;
1434 }
1435 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
1436 map_anon_folio_pmd_pf(folio, vmf->pmd, vma, haddr);
1437 mm_inc_nr_ptes(vma->vm_mm);
1438 spin_unlock(vmf->ptl);
1439 }
1440
1441 return 0;
1442 unlock_release:
1443 spin_unlock(vmf->ptl);
1444 release:
1445 if (pgtable)
1446 pte_free(vma->vm_mm, pgtable);
1447 folio_put(folio);
1448 return ret;
1449
1450 }
1451
do_huge_pmd_device_private(struct vm_fault * vmf)1452 vm_fault_t do_huge_pmd_device_private(struct vm_fault *vmf)
1453 {
1454 struct vm_area_struct *vma = vmf->vma;
1455 vm_fault_t ret = 0;
1456 spinlock_t *ptl;
1457 softleaf_t entry;
1458 struct page *page;
1459 struct folio *folio;
1460
1461 if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
1462 vma_end_read(vma);
1463 return VM_FAULT_RETRY;
1464 }
1465
1466 ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1467 if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd))) {
1468 spin_unlock(ptl);
1469 return 0;
1470 }
1471
1472 entry = softleaf_from_pmd(vmf->orig_pmd);
1473 page = softleaf_to_page(entry);
1474 folio = page_folio(page);
1475 vmf->page = page;
1476 vmf->pte = NULL;
1477 if (folio_trylock(folio)) {
1478 folio_get(folio);
1479 spin_unlock(ptl);
1480 ret = page_pgmap(page)->ops->migrate_to_ram(vmf);
1481 folio_unlock(folio);
1482 folio_put(folio);
1483 } else {
1484 spin_unlock(ptl);
1485 }
1486
1487 return ret;
1488 }
1489
1490 /*
1491 * always: directly stall for all thp allocations
1492 * defer: wake kswapd and fail if not immediately available
1493 * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
1494 * fail if not immediately available
1495 * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
1496 * available
1497 * never: never stall for any thp allocation
1498 */
vma_thp_gfp_mask(struct vm_area_struct * vma)1499 gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma)
1500 {
1501 const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE);
1502
1503 /* Always do synchronous compaction */
1504 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
1505 return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
1506
1507 /* Kick kcompactd and fail quickly */
1508 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
1509 return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
1510
1511 /* Synchronous compaction if madvised, otherwise kick kcompactd */
1512 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
1513 return GFP_TRANSHUGE_LIGHT |
1514 (vma_madvised ? __GFP_DIRECT_RECLAIM :
1515 __GFP_KSWAPD_RECLAIM);
1516
1517 /* Only do synchronous compaction if madvised */
1518 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
1519 return GFP_TRANSHUGE_LIGHT |
1520 (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
1521
1522 return GFP_TRANSHUGE_LIGHT;
1523 }
1524
1525 /* Caller must hold page table lock. */
set_huge_zero_folio(pgtable_t pgtable,struct mm_struct * mm,struct vm_area_struct * vma,unsigned long haddr,pmd_t * pmd,struct folio * zero_folio)1526 static void set_huge_zero_folio(pgtable_t pgtable, struct mm_struct *mm,
1527 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
1528 struct folio *zero_folio)
1529 {
1530 pmd_t entry;
1531 entry = folio_mk_pmd(zero_folio, vma->vm_page_prot);
1532 entry = pmd_mkspecial(entry);
1533 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1534 set_pmd_at(mm, haddr, pmd, entry);
1535 mm_inc_nr_ptes(mm);
1536 }
1537
do_huge_pmd_anonymous_page(struct vm_fault * vmf)1538 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
1539 {
1540 struct vm_area_struct *vma = vmf->vma;
1541 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1542 vm_fault_t ret;
1543
1544 if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
1545 return VM_FAULT_FALLBACK;
1546 ret = vmf_anon_prepare(vmf);
1547 if (ret)
1548 return ret;
1549 khugepaged_enter_vma(vma, vma->vm_flags);
1550
1551 if (!(vmf->flags & FAULT_FLAG_WRITE) &&
1552 !mm_forbids_zeropage(vma->vm_mm) &&
1553 transparent_hugepage_use_zero_page()) {
1554 pgtable_t pgtable;
1555 struct folio *zero_folio;
1556 vm_fault_t ret;
1557
1558 pgtable = pte_alloc_one(vma->vm_mm);
1559 if (unlikely(!pgtable))
1560 return VM_FAULT_OOM;
1561 zero_folio = mm_get_huge_zero_folio(vma->vm_mm);
1562 if (unlikely(!zero_folio)) {
1563 pte_free(vma->vm_mm, pgtable);
1564 count_vm_event(THP_FAULT_FALLBACK);
1565 return VM_FAULT_FALLBACK;
1566 }
1567 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1568 ret = 0;
1569 if (pmd_none(*vmf->pmd)) {
1570 ret = check_stable_address_space(vma->vm_mm);
1571 if (ret) {
1572 spin_unlock(vmf->ptl);
1573 pte_free(vma->vm_mm, pgtable);
1574 } else if (userfaultfd_missing(vma)) {
1575 spin_unlock(vmf->ptl);
1576 pte_free(vma->vm_mm, pgtable);
1577 ret = handle_userfault(vmf, VM_UFFD_MISSING);
1578 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
1579 } else {
1580 set_huge_zero_folio(pgtable, vma->vm_mm, vma,
1581 haddr, vmf->pmd, zero_folio);
1582 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1583 spin_unlock(vmf->ptl);
1584 }
1585 } else {
1586 spin_unlock(vmf->ptl);
1587 pte_free(vma->vm_mm, pgtable);
1588 }
1589 return ret;
1590 }
1591
1592 return __do_huge_pmd_anonymous_page(vmf);
1593 }
1594
1595 struct folio_or_pfn {
1596 union {
1597 struct folio *folio;
1598 unsigned long pfn;
1599 };
1600 bool is_folio;
1601 };
1602
insert_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmd,struct folio_or_pfn fop,pgprot_t prot,bool write)1603 static vm_fault_t insert_pmd(struct vm_area_struct *vma, unsigned long addr,
1604 pmd_t *pmd, struct folio_or_pfn fop, pgprot_t prot,
1605 bool write)
1606 {
1607 struct mm_struct *mm = vma->vm_mm;
1608 pgtable_t pgtable = NULL;
1609 spinlock_t *ptl;
1610 pmd_t entry;
1611
1612 if (addr < vma->vm_start || addr >= vma->vm_end)
1613 return VM_FAULT_SIGBUS;
1614
1615 if (arch_needs_pgtable_deposit()) {
1616 pgtable = pte_alloc_one(vma->vm_mm);
1617 if (!pgtable)
1618 return VM_FAULT_OOM;
1619 }
1620
1621 ptl = pmd_lock(mm, pmd);
1622 if (!pmd_none(*pmd)) {
1623 const unsigned long pfn = fop.is_folio ? folio_pfn(fop.folio) :
1624 fop.pfn;
1625
1626 if (write) {
1627 if (pmd_pfn(*pmd) != pfn) {
1628 WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
1629 goto out_unlock;
1630 }
1631 entry = pmd_mkyoung(*pmd);
1632 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1633 if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
1634 update_mmu_cache_pmd(vma, addr, pmd);
1635 }
1636 goto out_unlock;
1637 }
1638
1639 if (fop.is_folio) {
1640 entry = folio_mk_pmd(fop.folio, vma->vm_page_prot);
1641
1642 if (is_huge_zero_folio(fop.folio)) {
1643 entry = pmd_mkspecial(entry);
1644 } else {
1645 folio_get(fop.folio);
1646 folio_add_file_rmap_pmd(fop.folio, &fop.folio->page, vma);
1647 add_mm_counter(mm, mm_counter_file(fop.folio), HPAGE_PMD_NR);
1648 }
1649 } else {
1650 entry = pmd_mkhuge(pfn_pmd(fop.pfn, prot));
1651 entry = pmd_mkspecial(entry);
1652 }
1653 if (write) {
1654 entry = pmd_mkyoung(pmd_mkdirty(entry));
1655 entry = maybe_pmd_mkwrite(entry, vma);
1656 }
1657
1658 if (pgtable) {
1659 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1660 mm_inc_nr_ptes(mm);
1661 pgtable = NULL;
1662 }
1663
1664 set_pmd_at(mm, addr, pmd, entry);
1665 update_mmu_cache_pmd(vma, addr, pmd);
1666
1667 out_unlock:
1668 spin_unlock(ptl);
1669 if (pgtable)
1670 pte_free(mm, pgtable);
1671 return VM_FAULT_NOPAGE;
1672 }
1673
1674 /**
1675 * vmf_insert_pfn_pmd - insert a pmd size pfn
1676 * @vmf: Structure describing the fault
1677 * @pfn: pfn to insert
1678 * @write: whether it's a write fault
1679 *
1680 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
1681 *
1682 * Return: vm_fault_t value.
1683 */
vmf_insert_pfn_pmd(struct vm_fault * vmf,unsigned long pfn,bool write)1684 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn,
1685 bool write)
1686 {
1687 unsigned long addr = vmf->address & PMD_MASK;
1688 struct vm_area_struct *vma = vmf->vma;
1689 pgprot_t pgprot = vma->vm_page_prot;
1690 struct folio_or_pfn fop = {
1691 .pfn = pfn,
1692 };
1693
1694 /*
1695 * If we had pmd_special, we could avoid all these restrictions,
1696 * but we need to be consistent with PTEs and architectures that
1697 * can't support a 'special' bit.
1698 */
1699 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
1700 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1701 (VM_PFNMAP|VM_MIXEDMAP));
1702 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1703
1704 pfnmap_setup_cachemode_pfn(pfn, &pgprot);
1705
1706 return insert_pmd(vma, addr, vmf->pmd, fop, pgprot, write);
1707 }
1708 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
1709
vmf_insert_folio_pmd(struct vm_fault * vmf,struct folio * folio,bool write)1710 vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio,
1711 bool write)
1712 {
1713 struct vm_area_struct *vma = vmf->vma;
1714 unsigned long addr = vmf->address & PMD_MASK;
1715 struct folio_or_pfn fop = {
1716 .folio = folio,
1717 .is_folio = true,
1718 };
1719
1720 if (WARN_ON_ONCE(folio_order(folio) != PMD_ORDER))
1721 return VM_FAULT_SIGBUS;
1722
1723 return insert_pmd(vma, addr, vmf->pmd, fop, vma->vm_page_prot, write);
1724 }
1725 EXPORT_SYMBOL_GPL(vmf_insert_folio_pmd);
1726
1727 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
maybe_pud_mkwrite(pud_t pud,struct vm_area_struct * vma)1728 static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
1729 {
1730 if (likely(vma->vm_flags & VM_WRITE))
1731 pud = pud_mkwrite(pud);
1732 return pud;
1733 }
1734
insert_pud(struct vm_area_struct * vma,unsigned long addr,pud_t * pud,struct folio_or_pfn fop,pgprot_t prot,bool write)1735 static vm_fault_t insert_pud(struct vm_area_struct *vma, unsigned long addr,
1736 pud_t *pud, struct folio_or_pfn fop, pgprot_t prot, bool write)
1737 {
1738 struct mm_struct *mm = vma->vm_mm;
1739 spinlock_t *ptl;
1740 pud_t entry;
1741
1742 if (addr < vma->vm_start || addr >= vma->vm_end)
1743 return VM_FAULT_SIGBUS;
1744
1745 ptl = pud_lock(mm, pud);
1746 if (!pud_none(*pud)) {
1747 const unsigned long pfn = fop.is_folio ? folio_pfn(fop.folio) :
1748 fop.pfn;
1749
1750 if (write) {
1751 if (WARN_ON_ONCE(pud_pfn(*pud) != pfn))
1752 goto out_unlock;
1753 entry = pud_mkyoung(*pud);
1754 entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
1755 if (pudp_set_access_flags(vma, addr, pud, entry, 1))
1756 update_mmu_cache_pud(vma, addr, pud);
1757 }
1758 goto out_unlock;
1759 }
1760
1761 if (fop.is_folio) {
1762 entry = folio_mk_pud(fop.folio, vma->vm_page_prot);
1763
1764 folio_get(fop.folio);
1765 folio_add_file_rmap_pud(fop.folio, &fop.folio->page, vma);
1766 add_mm_counter(mm, mm_counter_file(fop.folio), HPAGE_PUD_NR);
1767 } else {
1768 entry = pud_mkhuge(pfn_pud(fop.pfn, prot));
1769 entry = pud_mkspecial(entry);
1770 }
1771 if (write) {
1772 entry = pud_mkyoung(pud_mkdirty(entry));
1773 entry = maybe_pud_mkwrite(entry, vma);
1774 }
1775 set_pud_at(mm, addr, pud, entry);
1776 update_mmu_cache_pud(vma, addr, pud);
1777 out_unlock:
1778 spin_unlock(ptl);
1779 return VM_FAULT_NOPAGE;
1780 }
1781
1782 /**
1783 * vmf_insert_pfn_pud - insert a pud size pfn
1784 * @vmf: Structure describing the fault
1785 * @pfn: pfn to insert
1786 * @write: whether it's a write fault
1787 *
1788 * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
1789 *
1790 * Return: vm_fault_t value.
1791 */
vmf_insert_pfn_pud(struct vm_fault * vmf,unsigned long pfn,bool write)1792 vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, unsigned long pfn,
1793 bool write)
1794 {
1795 unsigned long addr = vmf->address & PUD_MASK;
1796 struct vm_area_struct *vma = vmf->vma;
1797 pgprot_t pgprot = vma->vm_page_prot;
1798 struct folio_or_pfn fop = {
1799 .pfn = pfn,
1800 };
1801
1802 /*
1803 * If we had pud_special, we could avoid all these restrictions,
1804 * but we need to be consistent with PTEs and architectures that
1805 * can't support a 'special' bit.
1806 */
1807 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
1808 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1809 (VM_PFNMAP|VM_MIXEDMAP));
1810 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1811
1812 pfnmap_setup_cachemode_pfn(pfn, &pgprot);
1813
1814 return insert_pud(vma, addr, vmf->pud, fop, pgprot, write);
1815 }
1816 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
1817
1818 /**
1819 * vmf_insert_folio_pud - insert a pud size folio mapped by a pud entry
1820 * @vmf: Structure describing the fault
1821 * @folio: folio to insert
1822 * @write: whether it's a write fault
1823 *
1824 * Return: vm_fault_t value.
1825 */
vmf_insert_folio_pud(struct vm_fault * vmf,struct folio * folio,bool write)1826 vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio,
1827 bool write)
1828 {
1829 struct vm_area_struct *vma = vmf->vma;
1830 unsigned long addr = vmf->address & PUD_MASK;
1831 struct folio_or_pfn fop = {
1832 .folio = folio,
1833 .is_folio = true,
1834 };
1835
1836 if (WARN_ON_ONCE(folio_order(folio) != PUD_ORDER))
1837 return VM_FAULT_SIGBUS;
1838
1839 return insert_pud(vma, addr, vmf->pud, fop, vma->vm_page_prot, write);
1840 }
1841 EXPORT_SYMBOL_GPL(vmf_insert_folio_pud);
1842 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1843
1844 /**
1845 * touch_pmd - Mark page table pmd entry as accessed and dirty (for write)
1846 * @vma: The VMA covering @addr
1847 * @addr: The virtual address
1848 * @pmd: pmd pointer into the page table mapping @addr
1849 * @write: Whether it's a write access
1850 *
1851 * Return: whether the pmd entry is changed
1852 */
touch_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmd,bool write)1853 bool touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1854 pmd_t *pmd, bool write)
1855 {
1856 pmd_t entry;
1857
1858 entry = pmd_mkyoung(*pmd);
1859 if (write)
1860 entry = pmd_mkdirty(entry);
1861 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
1862 pmd, entry, write)) {
1863 update_mmu_cache_pmd(vma, addr, pmd);
1864 return true;
1865 }
1866
1867 return false;
1868 }
1869
copy_huge_non_present_pmd(struct mm_struct * dst_mm,struct mm_struct * src_mm,pmd_t * dst_pmd,pmd_t * src_pmd,unsigned long addr,struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pmd_t pmd,pgtable_t pgtable)1870 static void copy_huge_non_present_pmd(
1871 struct mm_struct *dst_mm, struct mm_struct *src_mm,
1872 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1873 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1874 pmd_t pmd, pgtable_t pgtable)
1875 {
1876 softleaf_t entry = softleaf_from_pmd(pmd);
1877 struct folio *src_folio;
1878
1879 VM_WARN_ON_ONCE(!pmd_is_valid_softleaf(pmd));
1880
1881 if (softleaf_is_migration_write(entry) ||
1882 softleaf_is_migration_read_exclusive(entry)) {
1883 entry = make_readable_migration_entry(swp_offset(entry));
1884 pmd = swp_entry_to_pmd(entry);
1885 if (pmd_swp_soft_dirty(*src_pmd))
1886 pmd = pmd_swp_mksoft_dirty(pmd);
1887 if (pmd_swp_uffd_wp(*src_pmd))
1888 pmd = pmd_swp_mkuffd_wp(pmd);
1889 set_pmd_at(src_mm, addr, src_pmd, pmd);
1890 } else if (softleaf_is_device_private(entry)) {
1891 /*
1892 * For device private entries, since there are no
1893 * read exclusive entries, writable = !readable
1894 */
1895 if (softleaf_is_device_private_write(entry)) {
1896 entry = make_readable_device_private_entry(swp_offset(entry));
1897 pmd = swp_entry_to_pmd(entry);
1898
1899 if (pmd_swp_soft_dirty(*src_pmd))
1900 pmd = pmd_swp_mksoft_dirty(pmd);
1901 if (pmd_swp_uffd_wp(*src_pmd))
1902 pmd = pmd_swp_mkuffd_wp(pmd);
1903 set_pmd_at(src_mm, addr, src_pmd, pmd);
1904 }
1905
1906 src_folio = softleaf_to_folio(entry);
1907 VM_WARN_ON(!folio_test_large(src_folio));
1908
1909 folio_get(src_folio);
1910 /*
1911 * folio_try_dup_anon_rmap_pmd does not fail for
1912 * device private entries.
1913 */
1914 folio_try_dup_anon_rmap_pmd(src_folio, &src_folio->page,
1915 dst_vma, src_vma);
1916 }
1917
1918 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1919 mm_inc_nr_ptes(dst_mm);
1920 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1921 if (!userfaultfd_wp(dst_vma))
1922 pmd = pmd_swp_clear_uffd_wp(pmd);
1923 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1924 }
1925
copy_huge_pmd(struct mm_struct * dst_mm,struct mm_struct * src_mm,pmd_t * dst_pmd,pmd_t * src_pmd,unsigned long addr,struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma)1926 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1927 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1928 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1929 {
1930 spinlock_t *dst_ptl, *src_ptl;
1931 struct page *src_page;
1932 struct folio *src_folio;
1933 pmd_t pmd;
1934 pgtable_t pgtable = NULL;
1935 int ret = -ENOMEM;
1936
1937 pmd = pmdp_get_lockless(src_pmd);
1938 if (unlikely(pmd_present(pmd) && pmd_special(pmd) &&
1939 !is_huge_zero_pmd(pmd))) {
1940 dst_ptl = pmd_lock(dst_mm, dst_pmd);
1941 src_ptl = pmd_lockptr(src_mm, src_pmd);
1942 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1943 /*
1944 * No need to recheck the pmd, it can't change with write
1945 * mmap lock held here.
1946 *
1947 * Meanwhile, making sure it's not a CoW VMA with writable
1948 * mapping, otherwise it means either the anon page wrongly
1949 * applied special bit, or we made the PRIVATE mapping be
1950 * able to wrongly write to the backend MMIO.
1951 */
1952 VM_WARN_ON_ONCE(is_cow_mapping(src_vma->vm_flags) && pmd_write(pmd));
1953 goto set_pmd;
1954 }
1955
1956 /* Skip if can be re-fill on fault */
1957 if (!vma_is_anonymous(dst_vma))
1958 return 0;
1959
1960 pgtable = pte_alloc_one(dst_mm);
1961 if (unlikely(!pgtable))
1962 goto out;
1963
1964 dst_ptl = pmd_lock(dst_mm, dst_pmd);
1965 src_ptl = pmd_lockptr(src_mm, src_pmd);
1966 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1967
1968 ret = -EAGAIN;
1969 pmd = *src_pmd;
1970
1971 if (unlikely(thp_migration_supported() &&
1972 pmd_is_valid_softleaf(pmd))) {
1973 copy_huge_non_present_pmd(dst_mm, src_mm, dst_pmd, src_pmd, addr,
1974 dst_vma, src_vma, pmd, pgtable);
1975 ret = 0;
1976 goto out_unlock;
1977 }
1978
1979 if (unlikely(!pmd_trans_huge(pmd))) {
1980 pte_free(dst_mm, pgtable);
1981 goto out_unlock;
1982 }
1983 /*
1984 * When page table lock is held, the huge zero pmd should not be
1985 * under splitting since we don't split the page itself, only pmd to
1986 * a page table.
1987 */
1988 if (is_huge_zero_pmd(pmd)) {
1989 /*
1990 * mm_get_huge_zero_folio() will never allocate a new
1991 * folio here, since we already have a zero page to
1992 * copy. It just takes a reference.
1993 */
1994 mm_get_huge_zero_folio(dst_mm);
1995 goto out_zero_page;
1996 }
1997
1998 src_page = pmd_page(pmd);
1999 VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
2000 src_folio = page_folio(src_page);
2001
2002 folio_get(src_folio);
2003 if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, dst_vma, src_vma))) {
2004 /* Page maybe pinned: split and retry the fault on PTEs. */
2005 folio_put(src_folio);
2006 pte_free(dst_mm, pgtable);
2007 spin_unlock(src_ptl);
2008 spin_unlock(dst_ptl);
2009 __split_huge_pmd(src_vma, src_pmd, addr, false);
2010 return -EAGAIN;
2011 }
2012 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
2013 out_zero_page:
2014 mm_inc_nr_ptes(dst_mm);
2015 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
2016 pmdp_set_wrprotect(src_mm, addr, src_pmd);
2017 if (!userfaultfd_wp(dst_vma))
2018 pmd = pmd_clear_uffd_wp(pmd);
2019 pmd = pmd_wrprotect(pmd);
2020 set_pmd:
2021 pmd = pmd_mkold(pmd);
2022 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
2023
2024 ret = 0;
2025 out_unlock:
2026 spin_unlock(src_ptl);
2027 spin_unlock(dst_ptl);
2028 out:
2029 return ret;
2030 }
2031
2032 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
touch_pud(struct vm_area_struct * vma,unsigned long addr,pud_t * pud,bool write)2033 void touch_pud(struct vm_area_struct *vma, unsigned long addr,
2034 pud_t *pud, bool write)
2035 {
2036 pud_t _pud;
2037
2038 _pud = pud_mkyoung(*pud);
2039 if (write)
2040 _pud = pud_mkdirty(_pud);
2041 if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
2042 pud, _pud, write))
2043 update_mmu_cache_pud(vma, addr, pud);
2044 }
2045
copy_huge_pud(struct mm_struct * dst_mm,struct mm_struct * src_mm,pud_t * dst_pud,pud_t * src_pud,unsigned long addr,struct vm_area_struct * vma)2046 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
2047 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
2048 struct vm_area_struct *vma)
2049 {
2050 spinlock_t *dst_ptl, *src_ptl;
2051 pud_t pud;
2052 int ret;
2053
2054 dst_ptl = pud_lock(dst_mm, dst_pud);
2055 src_ptl = pud_lockptr(src_mm, src_pud);
2056 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
2057
2058 ret = -EAGAIN;
2059 pud = *src_pud;
2060 if (unlikely(!pud_trans_huge(pud)))
2061 goto out_unlock;
2062
2063 /*
2064 * TODO: once we support anonymous pages, use
2065 * folio_try_dup_anon_rmap_*() and split if duplicating fails.
2066 */
2067 if (is_cow_mapping(vma->vm_flags) && pud_write(pud)) {
2068 pudp_set_wrprotect(src_mm, addr, src_pud);
2069 pud = pud_wrprotect(pud);
2070 }
2071 pud = pud_mkold(pud);
2072 set_pud_at(dst_mm, addr, dst_pud, pud);
2073
2074 ret = 0;
2075 out_unlock:
2076 spin_unlock(src_ptl);
2077 spin_unlock(dst_ptl);
2078 return ret;
2079 }
2080
huge_pud_set_accessed(struct vm_fault * vmf,pud_t orig_pud)2081 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
2082 {
2083 bool write = vmf->flags & FAULT_FLAG_WRITE;
2084
2085 vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
2086 if (unlikely(!pud_same(*vmf->pud, orig_pud)))
2087 goto unlock;
2088
2089 touch_pud(vmf->vma, vmf->address, vmf->pud, write);
2090 unlock:
2091 spin_unlock(vmf->ptl);
2092 }
2093 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
2094
huge_pmd_set_accessed(struct vm_fault * vmf)2095 bool huge_pmd_set_accessed(struct vm_fault *vmf)
2096 {
2097 bool write = vmf->flags & FAULT_FLAG_WRITE;
2098
2099 if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd)))
2100 return false;
2101
2102 return touch_pmd(vmf->vma, vmf->address, vmf->pmd, write);
2103 }
2104
do_huge_zero_wp_pmd(struct vm_fault * vmf)2105 static vm_fault_t do_huge_zero_wp_pmd(struct vm_fault *vmf)
2106 {
2107 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
2108 struct vm_area_struct *vma = vmf->vma;
2109 struct mmu_notifier_range range;
2110 struct folio *folio;
2111 vm_fault_t ret = 0;
2112
2113 folio = vma_alloc_anon_folio_pmd(vma, vmf->address);
2114 if (unlikely(!folio))
2115 return VM_FAULT_FALLBACK;
2116
2117 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, haddr,
2118 haddr + HPAGE_PMD_SIZE);
2119 mmu_notifier_invalidate_range_start(&range);
2120 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
2121 if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd)))
2122 goto release;
2123 ret = check_stable_address_space(vma->vm_mm);
2124 if (ret)
2125 goto release;
2126 (void)pmdp_huge_clear_flush(vma, haddr, vmf->pmd);
2127 map_anon_folio_pmd_pf(folio, vmf->pmd, vma, haddr);
2128 goto unlock;
2129 release:
2130 folio_put(folio);
2131 unlock:
2132 spin_unlock(vmf->ptl);
2133 mmu_notifier_invalidate_range_end(&range);
2134 return ret;
2135 }
2136
do_huge_pmd_wp_page(struct vm_fault * vmf)2137 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
2138 {
2139 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
2140 struct vm_area_struct *vma = vmf->vma;
2141 struct folio *folio;
2142 struct page *page;
2143 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
2144 pmd_t orig_pmd = vmf->orig_pmd;
2145
2146 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
2147 VM_BUG_ON_VMA(!vma->anon_vma, vma);
2148
2149 if (is_huge_zero_pmd(orig_pmd)) {
2150 vm_fault_t ret = do_huge_zero_wp_pmd(vmf);
2151
2152 if (!(ret & VM_FAULT_FALLBACK))
2153 return ret;
2154
2155 /* Fallback to splitting PMD if THP cannot be allocated */
2156 goto fallback;
2157 }
2158
2159 spin_lock(vmf->ptl);
2160
2161 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
2162 spin_unlock(vmf->ptl);
2163 return 0;
2164 }
2165
2166 page = pmd_page(orig_pmd);
2167 folio = page_folio(page);
2168 VM_BUG_ON_PAGE(!PageHead(page), page);
2169
2170 /* Early check when only holding the PT lock. */
2171 if (PageAnonExclusive(page))
2172 goto reuse;
2173
2174 if (!folio_trylock(folio)) {
2175 folio_get(folio);
2176 spin_unlock(vmf->ptl);
2177 folio_lock(folio);
2178 spin_lock(vmf->ptl);
2179 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
2180 spin_unlock(vmf->ptl);
2181 folio_unlock(folio);
2182 folio_put(folio);
2183 return 0;
2184 }
2185 folio_put(folio);
2186 }
2187
2188 /* Recheck after temporarily dropping the PT lock. */
2189 if (PageAnonExclusive(page)) {
2190 folio_unlock(folio);
2191 goto reuse;
2192 }
2193
2194 /*
2195 * See do_wp_page(): we can only reuse the folio exclusively if
2196 * there are no additional references. Note that we always drain
2197 * the LRU cache immediately after adding a THP.
2198 */
2199 if (folio_ref_count(folio) >
2200 1 + folio_test_swapcache(folio) * folio_nr_pages(folio))
2201 goto unlock_fallback;
2202 if (folio_test_swapcache(folio))
2203 folio_free_swap(folio);
2204 if (folio_ref_count(folio) == 1) {
2205 pmd_t entry;
2206
2207 folio_move_anon_rmap(folio, vma);
2208 SetPageAnonExclusive(page);
2209 folio_unlock(folio);
2210 reuse:
2211 if (unlikely(unshare)) {
2212 spin_unlock(vmf->ptl);
2213 return 0;
2214 }
2215 entry = pmd_mkyoung(orig_pmd);
2216 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
2217 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
2218 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
2219 spin_unlock(vmf->ptl);
2220 return 0;
2221 }
2222
2223 unlock_fallback:
2224 folio_unlock(folio);
2225 spin_unlock(vmf->ptl);
2226 fallback:
2227 __split_huge_pmd(vma, vmf->pmd, vmf->address, false);
2228 return VM_FAULT_FALLBACK;
2229 }
2230
can_change_pmd_writable(struct vm_area_struct * vma,unsigned long addr,pmd_t pmd)2231 static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
2232 unsigned long addr, pmd_t pmd)
2233 {
2234 struct page *page;
2235
2236 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
2237 return false;
2238
2239 /* Don't touch entries that are not even readable (NUMA hinting). */
2240 if (pmd_protnone(pmd))
2241 return false;
2242
2243 /* Do we need write faults for softdirty tracking? */
2244 if (pmd_needs_soft_dirty_wp(vma, pmd))
2245 return false;
2246
2247 /* Do we need write faults for uffd-wp tracking? */
2248 if (userfaultfd_huge_pmd_wp(vma, pmd))
2249 return false;
2250
2251 if (!(vma->vm_flags & VM_SHARED)) {
2252 /* See can_change_pte_writable(). */
2253 page = vm_normal_page_pmd(vma, addr, pmd);
2254 return page && PageAnon(page) && PageAnonExclusive(page);
2255 }
2256
2257 /* See can_change_pte_writable(). */
2258 return pmd_dirty(pmd);
2259 }
2260
2261 /* NUMA hinting page fault entry point for trans huge pmds */
do_huge_pmd_numa_page(struct vm_fault * vmf)2262 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
2263 {
2264 struct vm_area_struct *vma = vmf->vma;
2265 struct folio *folio;
2266 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
2267 int nid = NUMA_NO_NODE;
2268 int target_nid, last_cpupid;
2269 pmd_t pmd, old_pmd;
2270 bool writable = false;
2271 int flags = 0;
2272
2273 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
2274 old_pmd = pmdp_get(vmf->pmd);
2275
2276 if (unlikely(!pmd_same(old_pmd, vmf->orig_pmd))) {
2277 spin_unlock(vmf->ptl);
2278 return 0;
2279 }
2280
2281 pmd = pmd_modify(old_pmd, vma->vm_page_prot);
2282
2283 /*
2284 * Detect now whether the PMD could be writable; this information
2285 * is only valid while holding the PT lock.
2286 */
2287 writable = pmd_write(pmd);
2288 if (!writable && vma_wants_manual_pte_write_upgrade(vma) &&
2289 can_change_pmd_writable(vma, vmf->address, pmd))
2290 writable = true;
2291
2292 folio = vm_normal_folio_pmd(vma, haddr, pmd);
2293 if (!folio)
2294 goto out_map;
2295
2296 nid = folio_nid(folio);
2297
2298 target_nid = numa_migrate_check(folio, vmf, haddr, &flags, writable,
2299 &last_cpupid);
2300 if (target_nid == NUMA_NO_NODE)
2301 goto out_map;
2302 if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
2303 flags |= TNF_MIGRATE_FAIL;
2304 goto out_map;
2305 }
2306 /* The folio is isolated and isolation code holds a folio reference. */
2307 spin_unlock(vmf->ptl);
2308 writable = false;
2309
2310 if (!migrate_misplaced_folio(folio, target_nid)) {
2311 flags |= TNF_MIGRATED;
2312 nid = target_nid;
2313 task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
2314 return 0;
2315 }
2316
2317 flags |= TNF_MIGRATE_FAIL;
2318 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
2319 if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) {
2320 spin_unlock(vmf->ptl);
2321 return 0;
2322 }
2323 out_map:
2324 /* Restore the PMD */
2325 pmd = pmd_modify(pmdp_get(vmf->pmd), vma->vm_page_prot);
2326 pmd = pmd_mkyoung(pmd);
2327 if (writable)
2328 pmd = pmd_mkwrite(pmd, vma);
2329 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
2330 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
2331 spin_unlock(vmf->ptl);
2332
2333 if (nid != NUMA_NO_NODE)
2334 task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
2335 return 0;
2336 }
2337
2338 /*
2339 * Return true if we do MADV_FREE successfully on entire pmd page.
2340 * Otherwise, return false.
2341 */
madvise_free_huge_pmd(struct mmu_gather * tlb,struct vm_area_struct * vma,pmd_t * pmd,unsigned long addr,unsigned long next)2342 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
2343 pmd_t *pmd, unsigned long addr, unsigned long next)
2344 {
2345 spinlock_t *ptl;
2346 pmd_t orig_pmd;
2347 struct folio *folio;
2348 struct mm_struct *mm = tlb->mm;
2349 bool ret = false;
2350
2351 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
2352
2353 ptl = pmd_trans_huge_lock(pmd, vma);
2354 if (!ptl)
2355 goto out_unlocked;
2356
2357 orig_pmd = *pmd;
2358 if (is_huge_zero_pmd(orig_pmd))
2359 goto out;
2360
2361 if (unlikely(!pmd_present(orig_pmd))) {
2362 VM_BUG_ON(thp_migration_supported() &&
2363 !pmd_is_migration_entry(orig_pmd));
2364 goto out;
2365 }
2366
2367 folio = pmd_folio(orig_pmd);
2368 /*
2369 * If other processes are mapping this folio, we couldn't discard
2370 * the folio unless they all do MADV_FREE so let's skip the folio.
2371 */
2372 if (folio_maybe_mapped_shared(folio))
2373 goto out;
2374
2375 if (!folio_trylock(folio))
2376 goto out;
2377
2378 /*
2379 * If user want to discard part-pages of THP, split it so MADV_FREE
2380 * will deactivate only them.
2381 */
2382 if (next - addr != HPAGE_PMD_SIZE) {
2383 folio_get(folio);
2384 spin_unlock(ptl);
2385 split_folio(folio);
2386 folio_unlock(folio);
2387 folio_put(folio);
2388 goto out_unlocked;
2389 }
2390
2391 if (folio_test_dirty(folio))
2392 folio_clear_dirty(folio);
2393 folio_unlock(folio);
2394
2395 if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
2396 pmdp_invalidate(vma, addr, pmd);
2397 orig_pmd = pmd_mkold(orig_pmd);
2398 orig_pmd = pmd_mkclean(orig_pmd);
2399
2400 set_pmd_at(mm, addr, pmd, orig_pmd);
2401 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
2402 }
2403
2404 folio_mark_lazyfree(folio);
2405 ret = true;
2406 out:
2407 spin_unlock(ptl);
2408 out_unlocked:
2409 return ret;
2410 }
2411
zap_deposited_table(struct mm_struct * mm,pmd_t * pmd)2412 static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
2413 {
2414 pgtable_t pgtable;
2415
2416 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2417 pte_free(mm, pgtable);
2418 mm_dec_nr_ptes(mm);
2419 }
2420
zap_huge_pmd_folio(struct mm_struct * mm,struct vm_area_struct * vma,pmd_t pmdval,struct folio * folio,bool is_present)2421 static void zap_huge_pmd_folio(struct mm_struct *mm, struct vm_area_struct *vma,
2422 pmd_t pmdval, struct folio *folio, bool is_present)
2423 {
2424 const bool is_device_private = folio_is_device_private(folio);
2425
2426 /* Present and device private folios are rmappable. */
2427 if (is_present || is_device_private)
2428 folio_remove_rmap_pmd(folio, &folio->page, vma);
2429
2430 if (folio_test_anon(folio)) {
2431 add_mm_counter(mm, MM_ANONPAGES, -HPAGE_PMD_NR);
2432 } else {
2433 add_mm_counter(mm, mm_counter_file(folio),
2434 -HPAGE_PMD_NR);
2435
2436 if (is_present && pmd_young(pmdval) &&
2437 likely(vma_has_recency(vma)))
2438 folio_mark_accessed(folio);
2439 }
2440
2441 /* Device private folios are pinned. */
2442 if (is_device_private)
2443 folio_put(folio);
2444 }
2445
normal_or_softleaf_folio_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t pmdval,bool is_present)2446 static struct folio *normal_or_softleaf_folio_pmd(struct vm_area_struct *vma,
2447 unsigned long addr, pmd_t pmdval, bool is_present)
2448 {
2449 if (is_present)
2450 return vm_normal_folio_pmd(vma, addr, pmdval);
2451
2452 if (!thp_migration_supported())
2453 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
2454 return pmd_to_softleaf_folio(pmdval);
2455 }
2456
has_deposited_pgtable(struct vm_area_struct * vma,pmd_t pmdval,struct folio * folio)2457 static bool has_deposited_pgtable(struct vm_area_struct *vma, pmd_t pmdval,
2458 struct folio *folio)
2459 {
2460 /* Some architectures require unconditional depositing. */
2461 if (arch_needs_pgtable_deposit())
2462 return true;
2463
2464 /*
2465 * Huge zero always deposited except for DAX which handles itself, see
2466 * set_huge_zero_folio().
2467 */
2468 if (is_huge_zero_pmd(pmdval))
2469 return !vma_is_dax(vma);
2470
2471 /*
2472 * Otherwise, only anonymous folios are deposited, see
2473 * __do_huge_pmd_anonymous_page().
2474 */
2475 return folio && folio_test_anon(folio);
2476 }
2477
2478 /**
2479 * zap_huge_pmd - Zap a huge THP which is of PMD size.
2480 * @tlb: The MMU gather TLB state associated with the operation.
2481 * @vma: The VMA containing the range to zap.
2482 * @pmd: A pointer to the leaf PMD entry.
2483 * @addr: The virtual address for the range to zap.
2484 *
2485 * Returns: %true on success, %false otherwise.
2486 */
zap_huge_pmd(struct mmu_gather * tlb,struct vm_area_struct * vma,pmd_t * pmd,unsigned long addr)2487 bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
2488 pmd_t *pmd, unsigned long addr)
2489 {
2490 struct mm_struct *mm = tlb->mm;
2491 struct folio *folio = NULL;
2492 bool is_present = false;
2493 bool has_deposit;
2494 spinlock_t *ptl;
2495 pmd_t orig_pmd;
2496
2497 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
2498
2499 ptl = __pmd_trans_huge_lock(pmd, vma);
2500 if (!ptl)
2501 return false;
2502 /*
2503 * For architectures like ppc64 we look at deposited pgtable
2504 * when calling pmdp_huge_get_and_clear. So do the
2505 * pgtable_trans_huge_withdraw after finishing pmdp related
2506 * operations.
2507 */
2508 orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd,
2509 tlb->fullmm);
2510 arch_check_zapped_pmd(vma, orig_pmd);
2511 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
2512
2513 is_present = pmd_present(orig_pmd);
2514 folio = normal_or_softleaf_folio_pmd(vma, addr, orig_pmd, is_present);
2515 has_deposit = has_deposited_pgtable(vma, orig_pmd, folio);
2516 if (folio)
2517 zap_huge_pmd_folio(mm, vma, orig_pmd, folio, is_present);
2518 if (has_deposit)
2519 zap_deposited_table(mm, pmd);
2520
2521 spin_unlock(ptl);
2522 if (is_present && folio)
2523 tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE);
2524 return true;
2525 }
2526
2527 #ifndef pmd_move_must_withdraw
pmd_move_must_withdraw(spinlock_t * new_pmd_ptl,spinlock_t * old_pmd_ptl,struct vm_area_struct * vma)2528 static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
2529 spinlock_t *old_pmd_ptl,
2530 struct vm_area_struct *vma)
2531 {
2532 /*
2533 * With split pmd lock we also need to move preallocated
2534 * PTE page table if new_pmd is on different PMD page table.
2535 *
2536 * We also don't deposit and withdraw tables for file pages.
2537 */
2538 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
2539 }
2540 #endif
2541
move_soft_dirty_pmd(pmd_t pmd)2542 static pmd_t move_soft_dirty_pmd(pmd_t pmd)
2543 {
2544 if (pgtable_supports_soft_dirty()) {
2545 if (unlikely(pmd_is_migration_entry(pmd)))
2546 pmd = pmd_swp_mksoft_dirty(pmd);
2547 else if (pmd_present(pmd))
2548 pmd = pmd_mksoft_dirty(pmd);
2549 }
2550
2551 return pmd;
2552 }
2553
clear_uffd_wp_pmd(pmd_t pmd)2554 static pmd_t clear_uffd_wp_pmd(pmd_t pmd)
2555 {
2556 if (pmd_none(pmd))
2557 return pmd;
2558 if (pmd_present(pmd))
2559 pmd = pmd_clear_uffd_wp(pmd);
2560 else
2561 pmd = pmd_swp_clear_uffd_wp(pmd);
2562
2563 return pmd;
2564 }
2565
move_huge_pmd(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pmd_t * old_pmd,pmd_t * new_pmd)2566 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
2567 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
2568 {
2569 spinlock_t *old_ptl, *new_ptl;
2570 pmd_t pmd;
2571 struct mm_struct *mm = vma->vm_mm;
2572 bool force_flush = false;
2573
2574 /*
2575 * The destination pmd shouldn't be established, free_pgtables()
2576 * should have released it; but move_page_tables() might have already
2577 * inserted a page table, if racing against shmem/file collapse.
2578 */
2579 if (!pmd_none(*new_pmd)) {
2580 VM_BUG_ON(pmd_trans_huge(*new_pmd));
2581 return false;
2582 }
2583
2584 /*
2585 * We don't have to worry about the ordering of src and dst
2586 * ptlocks because exclusive mmap_lock prevents deadlock.
2587 */
2588 old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
2589 if (old_ptl) {
2590 new_ptl = pmd_lockptr(mm, new_pmd);
2591 if (new_ptl != old_ptl)
2592 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
2593 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
2594 if (pmd_present(pmd))
2595 force_flush = true;
2596 VM_BUG_ON(!pmd_none(*new_pmd));
2597
2598 if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) {
2599 pgtable_t pgtable;
2600 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
2601 pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
2602 }
2603 pmd = move_soft_dirty_pmd(pmd);
2604 if (vma_has_uffd_without_event_remap(vma))
2605 pmd = clear_uffd_wp_pmd(pmd);
2606 set_pmd_at(mm, new_addr, new_pmd, pmd);
2607 if (force_flush)
2608 flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
2609 if (new_ptl != old_ptl)
2610 spin_unlock(new_ptl);
2611 spin_unlock(old_ptl);
2612 return true;
2613 }
2614 return false;
2615 }
2616
change_non_present_huge_pmd(struct mm_struct * mm,unsigned long addr,pmd_t * pmd,bool uffd_wp,bool uffd_wp_resolve)2617 static void change_non_present_huge_pmd(struct mm_struct *mm,
2618 unsigned long addr, pmd_t *pmd, bool uffd_wp,
2619 bool uffd_wp_resolve)
2620 {
2621 softleaf_t entry = softleaf_from_pmd(*pmd);
2622 const struct folio *folio = softleaf_to_folio(entry);
2623 pmd_t newpmd;
2624
2625 VM_WARN_ON(!pmd_is_valid_softleaf(*pmd));
2626 if (softleaf_is_migration_write(entry)) {
2627 /*
2628 * A protection check is difficult so
2629 * just be safe and disable write
2630 */
2631 if (folio_test_anon(folio))
2632 entry = make_readable_exclusive_migration_entry(swp_offset(entry));
2633 else
2634 entry = make_readable_migration_entry(swp_offset(entry));
2635 newpmd = swp_entry_to_pmd(entry);
2636 if (pmd_swp_soft_dirty(*pmd))
2637 newpmd = pmd_swp_mksoft_dirty(newpmd);
2638 } else if (softleaf_is_device_private_write(entry)) {
2639 entry = make_readable_device_private_entry(swp_offset(entry));
2640 newpmd = swp_entry_to_pmd(entry);
2641 } else {
2642 newpmd = *pmd;
2643 }
2644
2645 if (uffd_wp)
2646 newpmd = pmd_swp_mkuffd_wp(newpmd);
2647 else if (uffd_wp_resolve)
2648 newpmd = pmd_swp_clear_uffd_wp(newpmd);
2649 if (!pmd_same(*pmd, newpmd))
2650 set_pmd_at(mm, addr, pmd, newpmd);
2651 }
2652
2653 /*
2654 * Returns
2655 * - 0 if PMD could not be locked
2656 * - 1 if PMD was locked but protections unchanged and TLB flush unnecessary
2657 * or if prot_numa but THP migration is not supported
2658 * - HPAGE_PMD_NR if protections changed and TLB flush necessary
2659 */
change_huge_pmd(struct mmu_gather * tlb,struct vm_area_struct * vma,pmd_t * pmd,unsigned long addr,pgprot_t newprot,unsigned long cp_flags)2660 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
2661 pmd_t *pmd, unsigned long addr, pgprot_t newprot,
2662 unsigned long cp_flags)
2663 {
2664 struct mm_struct *mm = vma->vm_mm;
2665 spinlock_t *ptl;
2666 pmd_t oldpmd, entry;
2667 bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
2668 bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
2669 bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
2670 int ret = 1;
2671
2672 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
2673
2674 if (prot_numa && !thp_migration_supported())
2675 return 1;
2676
2677 ptl = __pmd_trans_huge_lock(pmd, vma);
2678 if (!ptl)
2679 return 0;
2680
2681 if (thp_migration_supported() && pmd_is_valid_softleaf(*pmd)) {
2682 change_non_present_huge_pmd(mm, addr, pmd, uffd_wp,
2683 uffd_wp_resolve);
2684 goto unlock;
2685 }
2686
2687 if (prot_numa) {
2688
2689 /*
2690 * Avoid trapping faults against the zero page. The read-only
2691 * data is likely to be read-cached on the local CPU and
2692 * local/remote hits to the zero page are not interesting.
2693 */
2694 if (is_huge_zero_pmd(*pmd))
2695 goto unlock;
2696
2697 if (pmd_protnone(*pmd))
2698 goto unlock;
2699
2700 if (!folio_can_map_prot_numa(pmd_folio(*pmd), vma,
2701 vma_is_single_threaded_private(vma)))
2702 goto unlock;
2703 }
2704 /*
2705 * In case prot_numa, we are under mmap_read_lock(mm). It's critical
2706 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
2707 * which is also under mmap_read_lock(mm):
2708 *
2709 * CPU0: CPU1:
2710 * change_huge_pmd(prot_numa=1)
2711 * pmdp_huge_get_and_clear_notify()
2712 * madvise_dontneed()
2713 * zap_pmd_range()
2714 * pmd_trans_huge(*pmd) == 0 (without ptl)
2715 * // skip the pmd
2716 * set_pmd_at();
2717 * // pmd is re-established
2718 *
2719 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
2720 * which may break userspace.
2721 *
2722 * pmdp_invalidate_ad() is required to make sure we don't miss
2723 * dirty/young flags set by hardware.
2724 */
2725 oldpmd = pmdp_invalidate_ad(vma, addr, pmd);
2726
2727 entry = pmd_modify(oldpmd, newprot);
2728 if (uffd_wp)
2729 entry = pmd_mkuffd_wp(entry);
2730 else if (uffd_wp_resolve)
2731 /*
2732 * Leave the write bit to be handled by PF interrupt
2733 * handler, then things like COW could be properly
2734 * handled.
2735 */
2736 entry = pmd_clear_uffd_wp(entry);
2737
2738 /* See change_pte_range(). */
2739 if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && !pmd_write(entry) &&
2740 can_change_pmd_writable(vma, addr, entry))
2741 entry = pmd_mkwrite(entry, vma);
2742
2743 ret = HPAGE_PMD_NR;
2744 set_pmd_at(mm, addr, pmd, entry);
2745
2746 if (huge_pmd_needs_flush(oldpmd, entry))
2747 tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE);
2748 unlock:
2749 spin_unlock(ptl);
2750 return ret;
2751 }
2752
2753 /*
2754 * Returns:
2755 *
2756 * - 0: if pud leaf changed from under us
2757 * - 1: if pud can be skipped
2758 * - HPAGE_PUD_NR: if pud was successfully processed
2759 */
2760 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
change_huge_pud(struct mmu_gather * tlb,struct vm_area_struct * vma,pud_t * pudp,unsigned long addr,pgprot_t newprot,unsigned long cp_flags)2761 int change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
2762 pud_t *pudp, unsigned long addr, pgprot_t newprot,
2763 unsigned long cp_flags)
2764 {
2765 struct mm_struct *mm = vma->vm_mm;
2766 pud_t oldpud, entry;
2767 spinlock_t *ptl;
2768
2769 tlb_change_page_size(tlb, HPAGE_PUD_SIZE);
2770
2771 /* NUMA balancing doesn't apply to dax */
2772 if (cp_flags & MM_CP_PROT_NUMA)
2773 return 1;
2774
2775 /*
2776 * Huge entries on userfault-wp only works with anonymous, while we
2777 * don't have anonymous PUDs yet.
2778 */
2779 if (WARN_ON_ONCE(cp_flags & MM_CP_UFFD_WP_ALL))
2780 return 1;
2781
2782 ptl = __pud_trans_huge_lock(pudp, vma);
2783 if (!ptl)
2784 return 0;
2785
2786 /*
2787 * Can't clear PUD or it can race with concurrent zapping. See
2788 * change_huge_pmd().
2789 */
2790 oldpud = pudp_invalidate(vma, addr, pudp);
2791 entry = pud_modify(oldpud, newprot);
2792 set_pud_at(mm, addr, pudp, entry);
2793 tlb_flush_pud_range(tlb, addr, HPAGE_PUD_SIZE);
2794
2795 spin_unlock(ptl);
2796 return HPAGE_PUD_NR;
2797 }
2798 #endif
2799
2800 #ifdef CONFIG_USERFAULTFD
2801 /*
2802 * The PT lock for src_pmd and dst_vma/src_vma (for reading) are locked by
2803 * the caller, but it must return after releasing the page_table_lock.
2804 * Just move the page from src_pmd to dst_pmd if possible.
2805 * Return zero if succeeded in moving the page, -EAGAIN if it needs to be
2806 * repeated by the caller, or other errors in case of failure.
2807 */
move_pages_huge_pmd(struct mm_struct * mm,pmd_t * dst_pmd,pmd_t * src_pmd,pmd_t dst_pmdval,struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,unsigned long dst_addr,unsigned long src_addr)2808 int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval,
2809 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
2810 unsigned long dst_addr, unsigned long src_addr)
2811 {
2812 pmd_t _dst_pmd, src_pmdval;
2813 struct page *src_page;
2814 struct folio *src_folio;
2815 spinlock_t *src_ptl, *dst_ptl;
2816 pgtable_t src_pgtable;
2817 struct mmu_notifier_range range;
2818 int err = 0;
2819
2820 src_pmdval = *src_pmd;
2821 src_ptl = pmd_lockptr(mm, src_pmd);
2822
2823 lockdep_assert_held(src_ptl);
2824 vma_assert_locked(src_vma);
2825 vma_assert_locked(dst_vma);
2826
2827 /* Sanity checks before the operation */
2828 if (WARN_ON_ONCE(!pmd_none(dst_pmdval)) || WARN_ON_ONCE(src_addr & ~HPAGE_PMD_MASK) ||
2829 WARN_ON_ONCE(dst_addr & ~HPAGE_PMD_MASK)) {
2830 spin_unlock(src_ptl);
2831 return -EINVAL;
2832 }
2833
2834 if (!pmd_trans_huge(src_pmdval)) {
2835 spin_unlock(src_ptl);
2836 if (pmd_is_migration_entry(src_pmdval)) {
2837 pmd_migration_entry_wait(mm, &src_pmdval);
2838 return -EAGAIN;
2839 }
2840 return -ENOENT;
2841 }
2842
2843 src_page = pmd_page(src_pmdval);
2844
2845 if (!is_huge_zero_pmd(src_pmdval)) {
2846 if (unlikely(!PageAnonExclusive(src_page))) {
2847 spin_unlock(src_ptl);
2848 return -EBUSY;
2849 }
2850
2851 src_folio = page_folio(src_page);
2852 folio_get(src_folio);
2853 } else
2854 src_folio = NULL;
2855
2856 spin_unlock(src_ptl);
2857
2858 flush_cache_range(src_vma, src_addr, src_addr + HPAGE_PMD_SIZE);
2859 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, src_addr,
2860 src_addr + HPAGE_PMD_SIZE);
2861 mmu_notifier_invalidate_range_start(&range);
2862
2863 if (src_folio)
2864 folio_lock(src_folio);
2865
2866 dst_ptl = pmd_lockptr(mm, dst_pmd);
2867 double_pt_lock(src_ptl, dst_ptl);
2868 if (unlikely(!pmd_same(*src_pmd, src_pmdval) ||
2869 !pmd_same(*dst_pmd, dst_pmdval))) {
2870 err = -EAGAIN;
2871 goto unlock_ptls;
2872 }
2873 if (src_folio) {
2874 if (folio_maybe_dma_pinned(src_folio) ||
2875 !PageAnonExclusive(&src_folio->page)) {
2876 err = -EBUSY;
2877 goto unlock_ptls;
2878 }
2879
2880 if (WARN_ON_ONCE(!folio_test_head(src_folio)) ||
2881 WARN_ON_ONCE(!folio_test_anon(src_folio))) {
2882 err = -EBUSY;
2883 goto unlock_ptls;
2884 }
2885
2886 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
2887 /* Folio got pinned from under us. Put it back and fail the move. */
2888 if (folio_maybe_dma_pinned(src_folio)) {
2889 set_pmd_at(mm, src_addr, src_pmd, src_pmdval);
2890 err = -EBUSY;
2891 goto unlock_ptls;
2892 }
2893
2894 folio_move_anon_rmap(src_folio, dst_vma);
2895 src_folio->index = linear_page_index(dst_vma, dst_addr);
2896
2897 _dst_pmd = folio_mk_pmd(src_folio, dst_vma->vm_page_prot);
2898 /* Follow mremap() behavior and treat the entry dirty after the move */
2899 _dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
2900 } else {
2901 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
2902 _dst_pmd = move_soft_dirty_pmd(src_pmdval);
2903 _dst_pmd = clear_uffd_wp_pmd(_dst_pmd);
2904 }
2905 set_pmd_at(mm, dst_addr, dst_pmd, _dst_pmd);
2906
2907 src_pgtable = pgtable_trans_huge_withdraw(mm, src_pmd);
2908 pgtable_trans_huge_deposit(mm, dst_pmd, src_pgtable);
2909 unlock_ptls:
2910 double_pt_unlock(src_ptl, dst_ptl);
2911 /* unblock rmap walks */
2912 if (src_folio)
2913 folio_unlock(src_folio);
2914 mmu_notifier_invalidate_range_end(&range);
2915 if (src_folio)
2916 folio_put(src_folio);
2917 return err;
2918 }
2919 #endif /* CONFIG_USERFAULTFD */
2920
2921 /*
2922 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
2923 *
2924 * Note that if it returns page table lock pointer, this routine returns without
2925 * unlocking page table lock. So callers must unlock it.
2926 */
__pmd_trans_huge_lock(pmd_t * pmd,struct vm_area_struct * vma)2927 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
2928 {
2929 spinlock_t *ptl;
2930
2931 ptl = pmd_lock(vma->vm_mm, pmd);
2932 if (likely(pmd_is_huge(*pmd)))
2933 return ptl;
2934 spin_unlock(ptl);
2935 return NULL;
2936 }
2937
2938 /*
2939 * Returns page table lock pointer if a given pud maps a thp, NULL otherwise.
2940 *
2941 * Note that if it returns page table lock pointer, this routine returns without
2942 * unlocking page table lock. So callers must unlock it.
2943 */
__pud_trans_huge_lock(pud_t * pud,struct vm_area_struct * vma)2944 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
2945 {
2946 spinlock_t *ptl;
2947
2948 ptl = pud_lock(vma->vm_mm, pud);
2949 if (likely(pud_trans_huge(*pud)))
2950 return ptl;
2951 spin_unlock(ptl);
2952 return NULL;
2953 }
2954
2955 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
zap_huge_pud(struct mmu_gather * tlb,struct vm_area_struct * vma,pud_t * pud,unsigned long addr)2956 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
2957 pud_t *pud, unsigned long addr)
2958 {
2959 spinlock_t *ptl;
2960 pud_t orig_pud;
2961
2962 ptl = __pud_trans_huge_lock(pud, vma);
2963 if (!ptl)
2964 return 0;
2965
2966 orig_pud = pudp_huge_get_and_clear_full(vma, addr, pud, tlb->fullmm);
2967 arch_check_zapped_pud(vma, orig_pud);
2968 tlb_remove_pud_tlb_entry(tlb, pud, addr);
2969 if (vma_is_special_huge(vma)) {
2970 spin_unlock(ptl);
2971 /* No zero page support yet */
2972 } else {
2973 struct page *page = NULL;
2974 struct folio *folio;
2975
2976 /* No support for anonymous PUD pages or migration yet */
2977 VM_WARN_ON_ONCE(vma_is_anonymous(vma) ||
2978 !pud_present(orig_pud));
2979
2980 page = pud_page(orig_pud);
2981 folio = page_folio(page);
2982 folio_remove_rmap_pud(folio, page, vma);
2983 add_mm_counter(tlb->mm, mm_counter_file(folio), -HPAGE_PUD_NR);
2984
2985 spin_unlock(ptl);
2986 tlb_remove_page_size(tlb, page, HPAGE_PUD_SIZE);
2987 }
2988 return 1;
2989 }
2990
__split_huge_pud_locked(struct vm_area_struct * vma,pud_t * pud,unsigned long haddr)2991 static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
2992 unsigned long haddr)
2993 {
2994 struct folio *folio;
2995 struct page *page;
2996 pud_t old_pud;
2997
2998 VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
2999 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
3000 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
3001 VM_BUG_ON(!pud_trans_huge(*pud));
3002
3003 count_vm_event(THP_SPLIT_PUD);
3004
3005 old_pud = pudp_huge_clear_flush(vma, haddr, pud);
3006
3007 if (!vma_is_dax(vma))
3008 return;
3009
3010 page = pud_page(old_pud);
3011 folio = page_folio(page);
3012
3013 if (!folio_test_dirty(folio) && pud_dirty(old_pud))
3014 folio_mark_dirty(folio);
3015 if (!folio_test_referenced(folio) && pud_young(old_pud))
3016 folio_set_referenced(folio);
3017 folio_remove_rmap_pud(folio, page, vma);
3018 folio_put(folio);
3019 add_mm_counter(vma->vm_mm, mm_counter_file(folio),
3020 -HPAGE_PUD_NR);
3021 }
3022
__split_huge_pud(struct vm_area_struct * vma,pud_t * pud,unsigned long address)3023 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
3024 unsigned long address)
3025 {
3026 spinlock_t *ptl;
3027 struct mmu_notifier_range range;
3028
3029 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
3030 address & HPAGE_PUD_MASK,
3031 (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
3032 mmu_notifier_invalidate_range_start(&range);
3033 ptl = pud_lock(vma->vm_mm, pud);
3034 if (unlikely(!pud_trans_huge(*pud)))
3035 goto out;
3036 __split_huge_pud_locked(vma, pud, range.start);
3037
3038 out:
3039 spin_unlock(ptl);
3040 mmu_notifier_invalidate_range_end(&range);
3041 }
3042 #else
__split_huge_pud(struct vm_area_struct * vma,pud_t * pud,unsigned long address)3043 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
3044 unsigned long address)
3045 {
3046 }
3047 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
3048
__split_huge_zero_page_pmd(struct vm_area_struct * vma,unsigned long haddr,pmd_t * pmd)3049 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
3050 unsigned long haddr, pmd_t *pmd)
3051 {
3052 struct mm_struct *mm = vma->vm_mm;
3053 pgtable_t pgtable;
3054 pmd_t _pmd, old_pmd;
3055 unsigned long addr;
3056 pte_t *pte;
3057 int i;
3058
3059 /*
3060 * Leave pmd empty until pte is filled note that it is fine to delay
3061 * notification until mmu_notifier_invalidate_range_end() as we are
3062 * replacing a zero pmd write protected page with a zero pte write
3063 * protected page.
3064 *
3065 * See Documentation/mm/mmu_notifier.rst
3066 */
3067 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
3068
3069 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
3070 pmd_populate(mm, &_pmd, pgtable);
3071
3072 pte = pte_offset_map(&_pmd, haddr);
3073 VM_BUG_ON(!pte);
3074 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
3075 pte_t entry;
3076
3077 entry = pfn_pte(zero_pfn(addr), vma->vm_page_prot);
3078 entry = pte_mkspecial(entry);
3079 if (pmd_uffd_wp(old_pmd))
3080 entry = pte_mkuffd_wp(entry);
3081 VM_BUG_ON(!pte_none(ptep_get(pte)));
3082 set_pte_at(mm, addr, pte, entry);
3083 pte++;
3084 }
3085 pte_unmap(pte - 1);
3086 smp_wmb(); /* make pte visible before pmd */
3087 pmd_populate(mm, pmd, pgtable);
3088 }
3089
__split_huge_pmd_locked(struct vm_area_struct * vma,pmd_t * pmd,unsigned long haddr,bool freeze)3090 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
3091 unsigned long haddr, bool freeze)
3092 {
3093 struct mm_struct *mm = vma->vm_mm;
3094 struct folio *folio;
3095 struct page *page;
3096 pgtable_t pgtable;
3097 pmd_t old_pmd, _pmd;
3098 bool soft_dirty, uffd_wp = false, young = false, write = false;
3099 bool anon_exclusive = false, dirty = false;
3100 unsigned long addr;
3101 pte_t *pte;
3102 int i;
3103
3104 VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
3105 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
3106 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
3107
3108 VM_WARN_ON_ONCE(!pmd_is_valid_softleaf(*pmd) && !pmd_trans_huge(*pmd));
3109
3110 count_vm_event(THP_SPLIT_PMD);
3111
3112 if (!vma_is_anonymous(vma)) {
3113 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
3114 /*
3115 * We are going to unmap this huge page. So
3116 * just go ahead and zap it
3117 */
3118 if (arch_needs_pgtable_deposit())
3119 zap_deposited_table(mm, pmd);
3120 if (vma_is_special_huge(vma))
3121 return;
3122 if (unlikely(pmd_is_migration_entry(old_pmd))) {
3123 const softleaf_t old_entry = softleaf_from_pmd(old_pmd);
3124
3125 folio = softleaf_to_folio(old_entry);
3126 } else if (is_huge_zero_pmd(old_pmd)) {
3127 return;
3128 } else {
3129 page = pmd_page(old_pmd);
3130 folio = page_folio(page);
3131 if (!folio_test_dirty(folio) && pmd_dirty(old_pmd))
3132 folio_mark_dirty(folio);
3133 if (!folio_test_referenced(folio) && pmd_young(old_pmd))
3134 folio_set_referenced(folio);
3135 folio_remove_rmap_pmd(folio, page, vma);
3136 folio_put(folio);
3137 }
3138 add_mm_counter(mm, mm_counter_file(folio), -HPAGE_PMD_NR);
3139 return;
3140 }
3141
3142 if (is_huge_zero_pmd(*pmd)) {
3143 /*
3144 * FIXME: Do we want to invalidate secondary mmu by calling
3145 * mmu_notifier_arch_invalidate_secondary_tlbs() see comments below
3146 * inside __split_huge_pmd() ?
3147 *
3148 * We are going from a zero huge page write protected to zero
3149 * small page also write protected so it does not seems useful
3150 * to invalidate secondary mmu at this time.
3151 */
3152 return __split_huge_zero_page_pmd(vma, haddr, pmd);
3153 }
3154
3155 if (pmd_is_migration_entry(*pmd)) {
3156 softleaf_t entry;
3157
3158 old_pmd = *pmd;
3159 entry = softleaf_from_pmd(old_pmd);
3160 page = softleaf_to_page(entry);
3161 folio = page_folio(page);
3162
3163 soft_dirty = pmd_swp_soft_dirty(old_pmd);
3164 uffd_wp = pmd_swp_uffd_wp(old_pmd);
3165
3166 write = softleaf_is_migration_write(entry);
3167 if (PageAnon(page))
3168 anon_exclusive = softleaf_is_migration_read_exclusive(entry);
3169 young = softleaf_is_migration_young(entry);
3170 dirty = softleaf_is_migration_dirty(entry);
3171 } else if (pmd_is_device_private_entry(*pmd)) {
3172 softleaf_t entry;
3173
3174 old_pmd = *pmd;
3175 entry = softleaf_from_pmd(old_pmd);
3176 page = softleaf_to_page(entry);
3177 folio = page_folio(page);
3178
3179 soft_dirty = pmd_swp_soft_dirty(old_pmd);
3180 uffd_wp = pmd_swp_uffd_wp(old_pmd);
3181
3182 write = softleaf_is_device_private_write(entry);
3183 anon_exclusive = PageAnonExclusive(page);
3184
3185 /*
3186 * Device private THP should be treated the same as regular
3187 * folios w.r.t anon exclusive handling. See the comments for
3188 * folio handling and anon_exclusive below.
3189 */
3190 if (freeze && anon_exclusive &&
3191 folio_try_share_anon_rmap_pmd(folio, page))
3192 freeze = false;
3193 if (!freeze) {
3194 rmap_t rmap_flags = RMAP_NONE;
3195
3196 folio_ref_add(folio, HPAGE_PMD_NR - 1);
3197 if (anon_exclusive)
3198 rmap_flags |= RMAP_EXCLUSIVE;
3199
3200 folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR,
3201 vma, haddr, rmap_flags);
3202 }
3203 } else {
3204 /*
3205 * Up to this point the pmd is present and huge and userland has
3206 * the whole access to the hugepage during the split (which
3207 * happens in place). If we overwrite the pmd with the not-huge
3208 * version pointing to the pte here (which of course we could if
3209 * all CPUs were bug free), userland could trigger a small page
3210 * size TLB miss on the small sized TLB while the hugepage TLB
3211 * entry is still established in the huge TLB. Some CPU doesn't
3212 * like that. See
3213 * http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
3214 * 383 on page 105. Intel should be safe but is also warns that
3215 * it's only safe if the permission and cache attributes of the
3216 * two entries loaded in the two TLB is identical (which should
3217 * be the case here). But it is generally safer to never allow
3218 * small and huge TLB entries for the same virtual address to be
3219 * loaded simultaneously. So instead of doing "pmd_populate();
3220 * flush_pmd_tlb_range();" we first mark the current pmd
3221 * notpresent (atomically because here the pmd_trans_huge must
3222 * remain set at all times on the pmd until the split is
3223 * complete for this pmd), then we flush the SMP TLB and finally
3224 * we write the non-huge version of the pmd entry with
3225 * pmd_populate.
3226 */
3227 old_pmd = pmdp_invalidate(vma, haddr, pmd);
3228 page = pmd_page(old_pmd);
3229 folio = page_folio(page);
3230 if (pmd_dirty(old_pmd)) {
3231 dirty = true;
3232 folio_set_dirty(folio);
3233 }
3234 write = pmd_write(old_pmd);
3235 young = pmd_young(old_pmd);
3236 soft_dirty = pmd_soft_dirty(old_pmd);
3237 uffd_wp = pmd_uffd_wp(old_pmd);
3238
3239 VM_WARN_ON_FOLIO(!folio_ref_count(folio), folio);
3240 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
3241
3242 /*
3243 * Without "freeze", we'll simply split the PMD, propagating the
3244 * PageAnonExclusive() flag for each PTE by setting it for
3245 * each subpage -- no need to (temporarily) clear.
3246 *
3247 * With "freeze" we want to replace mapped pages by
3248 * migration entries right away. This is only possible if we
3249 * managed to clear PageAnonExclusive() -- see
3250 * set_pmd_migration_entry().
3251 *
3252 * In case we cannot clear PageAnonExclusive(), split the PMD
3253 * only and let try_to_migrate_one() fail later.
3254 *
3255 * See folio_try_share_anon_rmap_pmd(): invalidate PMD first.
3256 */
3257 anon_exclusive = PageAnonExclusive(page);
3258 if (freeze && anon_exclusive &&
3259 folio_try_share_anon_rmap_pmd(folio, page))
3260 freeze = false;
3261 if (!freeze) {
3262 rmap_t rmap_flags = RMAP_NONE;
3263
3264 folio_ref_add(folio, HPAGE_PMD_NR - 1);
3265 if (anon_exclusive)
3266 rmap_flags |= RMAP_EXCLUSIVE;
3267 folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR,
3268 vma, haddr, rmap_flags);
3269 }
3270 }
3271
3272 /*
3273 * Withdraw the table only after we mark the pmd entry invalid.
3274 * This's critical for some architectures (Power).
3275 */
3276 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
3277 pmd_populate(mm, &_pmd, pgtable);
3278
3279 pte = pte_offset_map(&_pmd, haddr);
3280 VM_BUG_ON(!pte);
3281
3282 /*
3283 * Note that NUMA hinting access restrictions are not transferred to
3284 * avoid any possibility of altering permissions across VMAs.
3285 */
3286 if (freeze || pmd_is_migration_entry(old_pmd)) {
3287 pte_t entry;
3288 swp_entry_t swp_entry;
3289
3290 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
3291 if (write)
3292 swp_entry = make_writable_migration_entry(
3293 page_to_pfn(page + i));
3294 else if (anon_exclusive)
3295 swp_entry = make_readable_exclusive_migration_entry(
3296 page_to_pfn(page + i));
3297 else
3298 swp_entry = make_readable_migration_entry(
3299 page_to_pfn(page + i));
3300 if (young)
3301 swp_entry = make_migration_entry_young(swp_entry);
3302 if (dirty)
3303 swp_entry = make_migration_entry_dirty(swp_entry);
3304 entry = swp_entry_to_pte(swp_entry);
3305 if (soft_dirty)
3306 entry = pte_swp_mksoft_dirty(entry);
3307 if (uffd_wp)
3308 entry = pte_swp_mkuffd_wp(entry);
3309 VM_WARN_ON(!pte_none(ptep_get(pte + i)));
3310 set_pte_at(mm, addr, pte + i, entry);
3311 }
3312 } else if (pmd_is_device_private_entry(old_pmd)) {
3313 pte_t entry;
3314 swp_entry_t swp_entry;
3315
3316 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
3317 /*
3318 * anon_exclusive was already propagated to the relevant
3319 * pages corresponding to the pte entries when freeze
3320 * is false.
3321 */
3322 if (write)
3323 swp_entry = make_writable_device_private_entry(
3324 page_to_pfn(page + i));
3325 else
3326 swp_entry = make_readable_device_private_entry(
3327 page_to_pfn(page + i));
3328 /*
3329 * Young and dirty bits are not progated via swp_entry
3330 */
3331 entry = swp_entry_to_pte(swp_entry);
3332 if (soft_dirty)
3333 entry = pte_swp_mksoft_dirty(entry);
3334 if (uffd_wp)
3335 entry = pte_swp_mkuffd_wp(entry);
3336 VM_WARN_ON(!pte_none(ptep_get(pte + i)));
3337 set_pte_at(mm, addr, pte + i, entry);
3338 }
3339 } else {
3340 pte_t entry;
3341
3342 entry = mk_pte(page, READ_ONCE(vma->vm_page_prot));
3343 if (write)
3344 entry = pte_mkwrite(entry, vma);
3345 if (!young)
3346 entry = pte_mkold(entry);
3347 /* NOTE: this may set soft-dirty too on some archs */
3348 if (dirty)
3349 entry = pte_mkdirty(entry);
3350 if (soft_dirty)
3351 entry = pte_mksoft_dirty(entry);
3352 if (uffd_wp)
3353 entry = pte_mkuffd_wp(entry);
3354
3355 for (i = 0; i < HPAGE_PMD_NR; i++)
3356 VM_WARN_ON(!pte_none(ptep_get(pte + i)));
3357
3358 set_ptes(mm, haddr, pte, entry, HPAGE_PMD_NR);
3359 }
3360 pte_unmap(pte);
3361
3362 if (!pmd_is_migration_entry(*pmd))
3363 folio_remove_rmap_pmd(folio, page, vma);
3364 if (freeze)
3365 put_page(page);
3366
3367 smp_wmb(); /* make pte visible before pmd */
3368 pmd_populate(mm, pmd, pgtable);
3369 }
3370
split_huge_pmd_locked(struct vm_area_struct * vma,unsigned long address,pmd_t * pmd,bool freeze)3371 void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
3372 pmd_t *pmd, bool freeze)
3373 {
3374 VM_WARN_ON_ONCE(!IS_ALIGNED(address, HPAGE_PMD_SIZE));
3375 if (pmd_trans_huge(*pmd) || pmd_is_valid_softleaf(*pmd))
3376 __split_huge_pmd_locked(vma, pmd, address, freeze);
3377 }
3378
__split_huge_pmd(struct vm_area_struct * vma,pmd_t * pmd,unsigned long address,bool freeze)3379 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
3380 unsigned long address, bool freeze)
3381 {
3382 spinlock_t *ptl;
3383 struct mmu_notifier_range range;
3384
3385 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
3386 address & HPAGE_PMD_MASK,
3387 (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
3388 mmu_notifier_invalidate_range_start(&range);
3389 ptl = pmd_lock(vma->vm_mm, pmd);
3390 split_huge_pmd_locked(vma, range.start, pmd, freeze);
3391 spin_unlock(ptl);
3392 mmu_notifier_invalidate_range_end(&range);
3393 }
3394
split_huge_pmd_address(struct vm_area_struct * vma,unsigned long address,bool freeze)3395 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
3396 bool freeze)
3397 {
3398 pmd_t *pmd = mm_find_pmd(vma->vm_mm, address);
3399
3400 if (!pmd)
3401 return;
3402
3403 __split_huge_pmd(vma, pmd, address, freeze);
3404 }
3405
split_huge_pmd_if_needed(struct vm_area_struct * vma,unsigned long address)3406 static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address)
3407 {
3408 /*
3409 * If the new address isn't hpage aligned and it could previously
3410 * contain an hugepage: check if we need to split an huge pmd.
3411 */
3412 if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) &&
3413 range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE),
3414 ALIGN(address, HPAGE_PMD_SIZE)))
3415 split_huge_pmd_address(vma, address, false);
3416 }
3417
vma_adjust_trans_huge(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct vm_area_struct * next)3418 void vma_adjust_trans_huge(struct vm_area_struct *vma,
3419 unsigned long start,
3420 unsigned long end,
3421 struct vm_area_struct *next)
3422 {
3423 /* Check if we need to split start first. */
3424 split_huge_pmd_if_needed(vma, start);
3425
3426 /* Check if we need to split end next. */
3427 split_huge_pmd_if_needed(vma, end);
3428
3429 /* If we're incrementing next->vm_start, we might need to split it. */
3430 if (next)
3431 split_huge_pmd_if_needed(next, end);
3432 }
3433
unmap_folio(struct folio * folio)3434 static void unmap_folio(struct folio *folio)
3435 {
3436 enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SYNC |
3437 TTU_BATCH_FLUSH;
3438
3439 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
3440
3441 if (folio_test_pmd_mappable(folio))
3442 ttu_flags |= TTU_SPLIT_HUGE_PMD;
3443
3444 /*
3445 * Anon pages need migration entries to preserve them, but file
3446 * pages can simply be left unmapped, then faulted back on demand.
3447 * If that is ever changed (perhaps for mlock), update remap_page().
3448 */
3449 if (folio_test_anon(folio))
3450 try_to_migrate(folio, ttu_flags);
3451 else
3452 try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK);
3453
3454 try_to_unmap_flush();
3455 }
3456
__discard_anon_folio_pmd_locked(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp,struct folio * folio)3457 static bool __discard_anon_folio_pmd_locked(struct vm_area_struct *vma,
3458 unsigned long addr, pmd_t *pmdp,
3459 struct folio *folio)
3460 {
3461 struct mm_struct *mm = vma->vm_mm;
3462 int ref_count, map_count;
3463 pmd_t orig_pmd = *pmdp;
3464
3465 if (pmd_dirty(orig_pmd))
3466 folio_set_dirty(folio);
3467 if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) {
3468 folio_set_swapbacked(folio);
3469 return false;
3470 }
3471
3472 orig_pmd = pmdp_huge_clear_flush(vma, addr, pmdp);
3473
3474 /*
3475 * Syncing against concurrent GUP-fast:
3476 * - clear PMD; barrier; read refcount
3477 * - inc refcount; barrier; read PMD
3478 */
3479 smp_mb();
3480
3481 ref_count = folio_ref_count(folio);
3482 map_count = folio_mapcount(folio);
3483
3484 /*
3485 * Order reads for folio refcount and dirty flag
3486 * (see comments in __remove_mapping()).
3487 */
3488 smp_rmb();
3489
3490 /*
3491 * If the folio or its PMD is redirtied at this point, or if there
3492 * are unexpected references, we will give up to discard this folio
3493 * and remap it.
3494 *
3495 * The only folio refs must be one from isolation plus the rmap(s).
3496 */
3497 if (pmd_dirty(orig_pmd))
3498 folio_set_dirty(folio);
3499 if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) {
3500 folio_set_swapbacked(folio);
3501 set_pmd_at(mm, addr, pmdp, orig_pmd);
3502 return false;
3503 }
3504
3505 if (ref_count != map_count + 1) {
3506 set_pmd_at(mm, addr, pmdp, orig_pmd);
3507 return false;
3508 }
3509
3510 folio_remove_rmap_pmd(folio, pmd_page(orig_pmd), vma);
3511 zap_deposited_table(mm, pmdp);
3512 add_mm_counter(mm, MM_ANONPAGES, -HPAGE_PMD_NR);
3513 if (vma->vm_flags & VM_LOCKED)
3514 mlock_drain_local();
3515 folio_put(folio);
3516
3517 return true;
3518 }
3519
unmap_huge_pmd_locked(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp,struct folio * folio)3520 bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
3521 pmd_t *pmdp, struct folio *folio)
3522 {
3523 VM_WARN_ON_FOLIO(!folio_test_pmd_mappable(folio), folio);
3524 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
3525 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
3526 VM_WARN_ON_FOLIO(folio_test_swapbacked(folio), folio);
3527 VM_WARN_ON_ONCE(!IS_ALIGNED(addr, HPAGE_PMD_SIZE));
3528
3529 return __discard_anon_folio_pmd_locked(vma, addr, pmdp, folio);
3530 }
3531
remap_page(struct folio * folio,unsigned long nr,int flags)3532 static void remap_page(struct folio *folio, unsigned long nr, int flags)
3533 {
3534 int i = 0;
3535
3536 /* If unmap_folio() uses try_to_migrate() on file, remove this check */
3537 if (!folio_test_anon(folio))
3538 return;
3539 for (;;) {
3540 remove_migration_ptes(folio, folio, TTU_RMAP_LOCKED | flags);
3541 i += folio_nr_pages(folio);
3542 if (i >= nr)
3543 break;
3544 folio = folio_next(folio);
3545 }
3546 }
3547
lru_add_split_folio(struct folio * folio,struct folio * new_folio,struct lruvec * lruvec,struct list_head * list)3548 static void lru_add_split_folio(struct folio *folio, struct folio *new_folio,
3549 struct lruvec *lruvec, struct list_head *list)
3550 {
3551 VM_BUG_ON_FOLIO(folio_test_lru(new_folio), folio);
3552 lockdep_assert_held(&lruvec->lru_lock);
3553
3554 if (folio_is_device_private(folio))
3555 return;
3556
3557 if (list) {
3558 /* page reclaim is reclaiming a huge page */
3559 VM_WARN_ON(folio_test_lru(folio));
3560 folio_get(new_folio);
3561 list_add_tail(&new_folio->lru, list);
3562 } else {
3563 /* head is still on lru (and we have it frozen) */
3564 VM_WARN_ON(!folio_test_lru(folio));
3565 if (folio_test_unevictable(folio))
3566 new_folio->mlock_count = 0;
3567 else
3568 list_add_tail(&new_folio->lru, &folio->lru);
3569 folio_set_lru(new_folio);
3570 }
3571 }
3572
page_range_has_hwpoisoned(struct page * page,long nr_pages)3573 static bool page_range_has_hwpoisoned(struct page *page, long nr_pages)
3574 {
3575 for (; nr_pages; page++, nr_pages--)
3576 if (PageHWPoison(page))
3577 return true;
3578 return false;
3579 }
3580
3581 /*
3582 * It splits @folio into @new_order folios and copies the @folio metadata to
3583 * all the resulting folios.
3584 */
__split_folio_to_order(struct folio * folio,int old_order,int new_order)3585 static void __split_folio_to_order(struct folio *folio, int old_order,
3586 int new_order)
3587 {
3588 /* Scan poisoned pages when split a poisoned folio to large folios */
3589 const bool handle_hwpoison = folio_test_has_hwpoisoned(folio) && new_order;
3590 long new_nr_pages = 1 << new_order;
3591 long nr_pages = 1 << old_order;
3592 long i;
3593
3594 folio_clear_has_hwpoisoned(folio);
3595
3596 /* Check first new_nr_pages since the loop below skips them */
3597 if (handle_hwpoison &&
3598 page_range_has_hwpoisoned(folio_page(folio, 0), new_nr_pages))
3599 folio_set_has_hwpoisoned(folio);
3600 /*
3601 * Skip the first new_nr_pages, since the new folio from them have all
3602 * the flags from the original folio.
3603 */
3604 for (i = new_nr_pages; i < nr_pages; i += new_nr_pages) {
3605 struct page *new_head = &folio->page + i;
3606 /*
3607 * Careful: new_folio is not a "real" folio before we cleared PageTail.
3608 * Don't pass it around before clear_compound_head().
3609 */
3610 struct folio *new_folio = (struct folio *)new_head;
3611
3612 VM_BUG_ON_PAGE(atomic_read(&new_folio->_mapcount) != -1, new_head);
3613
3614 /*
3615 * Clone page flags before unfreezing refcount.
3616 *
3617 * After successful get_page_unless_zero() might follow flags change,
3618 * for example lock_page() which set PG_waiters.
3619 *
3620 * Note that for mapped sub-pages of an anonymous THP,
3621 * PG_anon_exclusive has been cleared in unmap_folio() and is stored in
3622 * the migration entry instead from where remap_page() will restore it.
3623 * We can still have PG_anon_exclusive set on effectively unmapped and
3624 * unreferenced sub-pages of an anonymous THP: we can simply drop
3625 * PG_anon_exclusive (-> PG_mappedtodisk) for these here.
3626 */
3627 new_folio->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP;
3628 new_folio->flags.f |= (folio->flags.f &
3629 ((1L << PG_referenced) |
3630 (1L << PG_swapbacked) |
3631 (1L << PG_swapcache) |
3632 (1L << PG_mlocked) |
3633 (1L << PG_uptodate) |
3634 (1L << PG_active) |
3635 (1L << PG_workingset) |
3636 (1L << PG_locked) |
3637 (1L << PG_unevictable) |
3638 #ifdef CONFIG_ARCH_USES_PG_ARCH_2
3639 (1L << PG_arch_2) |
3640 #endif
3641 #ifdef CONFIG_ARCH_USES_PG_ARCH_3
3642 (1L << PG_arch_3) |
3643 #endif
3644 (1L << PG_dirty) |
3645 LRU_GEN_MASK | LRU_REFS_MASK));
3646
3647 if (handle_hwpoison &&
3648 page_range_has_hwpoisoned(new_head, new_nr_pages))
3649 folio_set_has_hwpoisoned(new_folio);
3650
3651 new_folio->mapping = folio->mapping;
3652 new_folio->index = folio->index + i;
3653
3654 if (folio_test_swapcache(folio))
3655 new_folio->swap.val = folio->swap.val + i;
3656
3657 /* Page flags must be visible before we make the page non-compound. */
3658 smp_wmb();
3659
3660 /*
3661 * Clear PageTail before unfreezing page refcount.
3662 *
3663 * After successful get_page_unless_zero() might follow put_page()
3664 * which needs correct compound_head().
3665 */
3666 clear_compound_head(new_head);
3667 if (new_order) {
3668 prep_compound_page(new_head, new_order);
3669 folio_set_large_rmappable(new_folio);
3670 }
3671
3672 if (folio_test_young(folio))
3673 folio_set_young(new_folio);
3674 if (folio_test_idle(folio))
3675 folio_set_idle(new_folio);
3676 #ifdef CONFIG_MEMCG
3677 new_folio->memcg_data = folio->memcg_data;
3678 #endif
3679
3680 folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio));
3681 }
3682
3683 if (new_order)
3684 folio_set_order(folio, new_order);
3685 else
3686 ClearPageCompound(&folio->page);
3687 }
3688
3689 /**
3690 * __split_unmapped_folio() - splits an unmapped @folio to lower order folios in
3691 * two ways: uniform split or non-uniform split.
3692 * @folio: the to-be-split folio
3693 * @new_order: the smallest order of the after split folios (since buddy
3694 * allocator like split generates folios with orders from @folio's
3695 * order - 1 to new_order).
3696 * @split_at: in buddy allocator like split, the folio containing @split_at
3697 * will be split until its order becomes @new_order.
3698 * @xas: xa_state pointing to folio->mapping->i_pages and locked by caller
3699 * @mapping: @folio->mapping
3700 * @split_type: if the split is uniform or not (buddy allocator like split)
3701 *
3702 *
3703 * 1. uniform split: the given @folio into multiple @new_order small folios,
3704 * where all small folios have the same order. This is done when
3705 * split_type is SPLIT_TYPE_UNIFORM.
3706 * 2. buddy allocator like (non-uniform) split: the given @folio is split into
3707 * half and one of the half (containing the given page) is split into half
3708 * until the given @folio's order becomes @new_order. This is done when
3709 * split_type is SPLIT_TYPE_NON_UNIFORM.
3710 *
3711 * The high level flow for these two methods are:
3712 *
3713 * 1. uniform split: @xas is split with no expectation of failure and a single
3714 * __split_folio_to_order() is called to split the @folio into @new_order
3715 * along with stats update.
3716 * 2. non-uniform split: folio_order - @new_order calls to
3717 * __split_folio_to_order() are expected to be made in a for loop to split
3718 * the @folio to one lower order at a time. The folio containing @split_at
3719 * is split in each iteration. @xas is split into half in each iteration and
3720 * can fail. A failed @xas split leaves split folios as is without merging
3721 * them back.
3722 *
3723 * After splitting, the caller's folio reference will be transferred to the
3724 * folio containing @split_at. The caller needs to unlock and/or free
3725 * after-split folios if necessary.
3726 *
3727 * Return: 0 - successful, <0 - failed (if -ENOMEM is returned, @folio might be
3728 * split but not to @new_order, the caller needs to check)
3729 */
__split_unmapped_folio(struct folio * folio,int new_order,struct page * split_at,struct xa_state * xas,struct address_space * mapping,enum split_type split_type)3730 static int __split_unmapped_folio(struct folio *folio, int new_order,
3731 struct page *split_at, struct xa_state *xas,
3732 struct address_space *mapping, enum split_type split_type)
3733 {
3734 const bool is_anon = folio_test_anon(folio);
3735 int old_order = folio_order(folio);
3736 int start_order = split_type == SPLIT_TYPE_UNIFORM ? new_order : old_order - 1;
3737 struct folio *old_folio = folio;
3738 int split_order;
3739
3740 /*
3741 * split to new_order one order at a time. For uniform split,
3742 * folio is split to new_order directly.
3743 */
3744 for (split_order = start_order;
3745 split_order >= new_order;
3746 split_order--) {
3747 int nr_new_folios = 1UL << (old_order - split_order);
3748
3749 /* order-1 anonymous folio is not supported */
3750 if (is_anon && split_order == 1)
3751 continue;
3752
3753 if (mapping) {
3754 /*
3755 * uniform split has xas_split_alloc() called before
3756 * irq is disabled to allocate enough memory, whereas
3757 * non-uniform split can handle ENOMEM.
3758 * Use the to-be-split folio, so that a parallel
3759 * folio_try_get() waits on it until xarray is updated
3760 * with after-split folios and the original one is
3761 * unfrozen.
3762 */
3763 if (split_type == SPLIT_TYPE_UNIFORM) {
3764 xas_split(xas, old_folio, old_order);
3765 } else {
3766 xas_set_order(xas, folio->index, split_order);
3767 xas_try_split(xas, old_folio, old_order);
3768 if (xas_error(xas))
3769 return xas_error(xas);
3770 }
3771 }
3772
3773 folio_split_memcg_refs(folio, old_order, split_order);
3774 split_page_owner(&folio->page, old_order, split_order);
3775 pgalloc_tag_split(folio, old_order, split_order);
3776 __split_folio_to_order(folio, old_order, split_order);
3777
3778 if (is_anon) {
3779 mod_mthp_stat(old_order, MTHP_STAT_NR_ANON, -1);
3780 mod_mthp_stat(split_order, MTHP_STAT_NR_ANON, nr_new_folios);
3781 }
3782 /*
3783 * If uniform split, the process is complete.
3784 * If non-uniform, continue splitting the folio at @split_at
3785 * as long as the next @split_order is >= @new_order.
3786 */
3787 folio = page_folio(split_at);
3788 old_order = split_order;
3789 }
3790
3791 return 0;
3792 }
3793
3794 /**
3795 * folio_check_splittable() - check if a folio can be split to a given order
3796 * @folio: folio to be split
3797 * @new_order: the smallest order of the after split folios (since buddy
3798 * allocator like split generates folios with orders from @folio's
3799 * order - 1 to new_order).
3800 * @split_type: uniform or non-uniform split
3801 *
3802 * folio_check_splittable() checks if @folio can be split to @new_order using
3803 * @split_type method. The truncated folio check must come first.
3804 *
3805 * Context: folio must be locked.
3806 *
3807 * Return: 0 - @folio can be split to @new_order, otherwise an error number is
3808 * returned.
3809 */
folio_check_splittable(struct folio * folio,unsigned int new_order,enum split_type split_type)3810 int folio_check_splittable(struct folio *folio, unsigned int new_order,
3811 enum split_type split_type)
3812 {
3813 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
3814 /*
3815 * Folios that just got truncated cannot get split. Signal to the
3816 * caller that there was a race.
3817 *
3818 * TODO: this will also currently refuse folios without a mapping in the
3819 * swapcache (shmem or to-be-anon folios).
3820 */
3821 if (!folio->mapping && !folio_test_anon(folio))
3822 return -EBUSY;
3823
3824 if (folio_test_anon(folio)) {
3825 /* order-1 is not supported for anonymous THP. */
3826 if (new_order == 1)
3827 return -EINVAL;
3828 } else if (split_type == SPLIT_TYPE_NON_UNIFORM || new_order) {
3829 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
3830 !mapping_large_folio_support(folio->mapping)) {
3831 /*
3832 * We can always split a folio down to a single page
3833 * (new_order == 0) uniformly.
3834 *
3835 * For any other scenario
3836 * a) uniform split targeting a large folio
3837 * (new_order > 0)
3838 * b) any non-uniform split
3839 * we must confirm that the file system supports large
3840 * folios.
3841 *
3842 * Note that we might still have THPs in such
3843 * mappings, which is created from khugepaged when
3844 * CONFIG_READ_ONLY_THP_FOR_FS is enabled. But in that
3845 * case, the mapping does not actually support large
3846 * folios properly.
3847 */
3848 return -EINVAL;
3849 }
3850 }
3851
3852 /*
3853 * swapcache folio could only be split to order 0
3854 *
3855 * non-uniform split creates after-split folios with orders from
3856 * folio_order(folio) - 1 to new_order, making it not suitable for any
3857 * swapcache folio split. Only uniform split to order-0 can be used
3858 * here.
3859 */
3860 if ((split_type == SPLIT_TYPE_NON_UNIFORM || new_order) && folio_test_swapcache(folio)) {
3861 return -EINVAL;
3862 }
3863
3864 if (is_huge_zero_folio(folio))
3865 return -EINVAL;
3866
3867 if (folio_test_writeback(folio))
3868 return -EBUSY;
3869
3870 return 0;
3871 }
3872
3873 /* Number of folio references from the pagecache or the swapcache. */
folio_cache_ref_count(const struct folio * folio)3874 static unsigned int folio_cache_ref_count(const struct folio *folio)
3875 {
3876 if (folio_test_anon(folio) && !folio_test_swapcache(folio))
3877 return 0;
3878 return folio_nr_pages(folio);
3879 }
3880
__folio_freeze_and_split_unmapped(struct folio * folio,unsigned int new_order,struct page * split_at,struct xa_state * xas,struct address_space * mapping,bool do_lru,struct list_head * list,enum split_type split_type,pgoff_t end,int * nr_shmem_dropped)3881 static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int new_order,
3882 struct page *split_at, struct xa_state *xas,
3883 struct address_space *mapping, bool do_lru,
3884 struct list_head *list, enum split_type split_type,
3885 pgoff_t end, int *nr_shmem_dropped)
3886 {
3887 struct folio *end_folio = folio_next(folio);
3888 struct folio *new_folio, *next;
3889 int old_order = folio_order(folio);
3890 int ret = 0;
3891 struct deferred_split *ds_queue;
3892
3893 VM_WARN_ON_ONCE(!mapping && end);
3894 /* Prevent deferred_split_scan() touching ->_refcount */
3895 ds_queue = folio_split_queue_lock(folio);
3896 if (folio_ref_freeze(folio, folio_cache_ref_count(folio) + 1)) {
3897 struct swap_cluster_info *ci = NULL;
3898 struct lruvec *lruvec;
3899
3900 if (old_order > 1) {
3901 if (!list_empty(&folio->_deferred_list)) {
3902 ds_queue->split_queue_len--;
3903 /*
3904 * Reinitialize page_deferred_list after removing the
3905 * page from the split_queue, otherwise a subsequent
3906 * split will see list corruption when checking the
3907 * page_deferred_list.
3908 */
3909 list_del_init(&folio->_deferred_list);
3910 }
3911 if (folio_test_partially_mapped(folio)) {
3912 folio_clear_partially_mapped(folio);
3913 mod_mthp_stat(old_order,
3914 MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
3915 }
3916 }
3917 split_queue_unlock(ds_queue);
3918 if (mapping) {
3919 int nr = folio_nr_pages(folio);
3920
3921 if (folio_test_pmd_mappable(folio) &&
3922 new_order < HPAGE_PMD_ORDER) {
3923 if (folio_test_swapbacked(folio)) {
3924 lruvec_stat_mod_folio(folio,
3925 NR_SHMEM_THPS, -nr);
3926 } else {
3927 lruvec_stat_mod_folio(folio,
3928 NR_FILE_THPS, -nr);
3929 filemap_nr_thps_dec(mapping);
3930 }
3931 }
3932 }
3933
3934 if (folio_test_swapcache(folio)) {
3935 if (mapping) {
3936 VM_WARN_ON_ONCE_FOLIO(mapping, folio);
3937 return -EINVAL;
3938 }
3939
3940 ci = swap_cluster_get_and_lock(folio);
3941 }
3942
3943 /* lock lru list/PageCompound, ref frozen by page_ref_freeze */
3944 if (do_lru)
3945 lruvec = folio_lruvec_lock(folio);
3946
3947 ret = __split_unmapped_folio(folio, new_order, split_at, xas,
3948 mapping, split_type);
3949
3950 /*
3951 * Unfreeze after-split folios and put them back to the right
3952 * list. @folio should be kept frozon until page cache
3953 * entries are updated with all the other after-split folios
3954 * to prevent others seeing stale page cache entries.
3955 * As a result, new_folio starts from the next folio of
3956 * @folio.
3957 */
3958 for (new_folio = folio_next(folio); new_folio != end_folio;
3959 new_folio = next) {
3960 unsigned long nr_pages = folio_nr_pages(new_folio);
3961
3962 next = folio_next(new_folio);
3963
3964 zone_device_private_split_cb(folio, new_folio);
3965
3966 folio_ref_unfreeze(new_folio,
3967 folio_cache_ref_count(new_folio) + 1);
3968
3969 if (do_lru)
3970 lru_add_split_folio(folio, new_folio, lruvec, list);
3971
3972 /*
3973 * Anonymous folio with swap cache.
3974 * NOTE: shmem in swap cache is not supported yet.
3975 */
3976 if (ci) {
3977 __swap_cache_replace_folio(ci, folio, new_folio);
3978 continue;
3979 }
3980
3981 /* Anonymous folio without swap cache */
3982 if (!mapping)
3983 continue;
3984
3985 /* Add the new folio to the page cache. */
3986 if (new_folio->index < end) {
3987 __xa_store(&mapping->i_pages, new_folio->index,
3988 new_folio, 0);
3989 continue;
3990 }
3991
3992 VM_WARN_ON_ONCE(!nr_shmem_dropped);
3993 /* Drop folio beyond EOF: ->index >= end */
3994 if (shmem_mapping(mapping) && nr_shmem_dropped)
3995 *nr_shmem_dropped += nr_pages;
3996 else if (folio_test_clear_dirty(new_folio))
3997 folio_account_cleaned(
3998 new_folio, inode_to_wb(mapping->host));
3999 __filemap_remove_folio(new_folio, NULL);
4000 folio_put_refs(new_folio, nr_pages);
4001 }
4002
4003 zone_device_private_split_cb(folio, NULL);
4004 /*
4005 * Unfreeze @folio only after all page cache entries, which
4006 * used to point to it, have been updated with new folios.
4007 * Otherwise, a parallel folio_try_get() can grab @folio
4008 * and its caller can see stale page cache entries.
4009 */
4010 folio_ref_unfreeze(folio, folio_cache_ref_count(folio) + 1);
4011
4012 if (do_lru)
4013 lruvec_unlock(lruvec);
4014
4015 if (ci)
4016 swap_cluster_unlock(ci);
4017 } else {
4018 split_queue_unlock(ds_queue);
4019 return -EAGAIN;
4020 }
4021
4022 return ret;
4023 }
4024
4025 /**
4026 * __folio_split() - split a folio at @split_at to a @new_order folio
4027 * @folio: folio to split
4028 * @new_order: the order of the new folio
4029 * @split_at: a page within the new folio
4030 * @lock_at: a page within @folio to be left locked to caller
4031 * @list: after-split folios will be put on it if non NULL
4032 * @split_type: perform uniform split or not (non-uniform split)
4033 *
4034 * It calls __split_unmapped_folio() to perform uniform and non-uniform split.
4035 * It is in charge of checking whether the split is supported or not and
4036 * preparing @folio for __split_unmapped_folio().
4037 *
4038 * After splitting, the after-split folio containing @lock_at remains locked
4039 * and others are unlocked:
4040 * 1. for uniform split, @lock_at points to one of @folio's subpages;
4041 * 2. for buddy allocator like (non-uniform) split, @lock_at points to @folio.
4042 *
4043 * Return: 0 - successful, <0 - failed (if -ENOMEM is returned, @folio might be
4044 * split but not to @new_order, the caller needs to check)
4045 */
__folio_split(struct folio * folio,unsigned int new_order,struct page * split_at,struct page * lock_at,struct list_head * list,enum split_type split_type)4046 static int __folio_split(struct folio *folio, unsigned int new_order,
4047 struct page *split_at, struct page *lock_at,
4048 struct list_head *list, enum split_type split_type)
4049 {
4050 XA_STATE(xas, &folio->mapping->i_pages, folio->index);
4051 struct folio *end_folio = folio_next(folio);
4052 bool is_anon = folio_test_anon(folio);
4053 struct address_space *mapping = NULL;
4054 struct anon_vma *anon_vma = NULL;
4055 int old_order = folio_order(folio);
4056 struct folio *new_folio, *next;
4057 int nr_shmem_dropped = 0;
4058 enum ttu_flags ttu_flags = 0;
4059 int ret;
4060 pgoff_t end = 0;
4061
4062 VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio);
4063 VM_WARN_ON_ONCE_FOLIO(!folio_test_large(folio), folio);
4064
4065 if (folio != page_folio(split_at) || folio != page_folio(lock_at)) {
4066 ret = -EINVAL;
4067 goto out;
4068 }
4069
4070 if (new_order >= old_order) {
4071 ret = -EINVAL;
4072 goto out;
4073 }
4074
4075 ret = folio_check_splittable(folio, new_order, split_type);
4076 if (ret) {
4077 VM_WARN_ONCE(ret == -EINVAL, "Tried to split an unsplittable folio");
4078 goto out;
4079 }
4080
4081 if (is_anon) {
4082 /*
4083 * The caller does not necessarily hold an mmap_lock that would
4084 * prevent the anon_vma disappearing so we first we take a
4085 * reference to it and then lock the anon_vma for write. This
4086 * is similar to folio_lock_anon_vma_read except the write lock
4087 * is taken to serialise against parallel split or collapse
4088 * operations.
4089 */
4090 anon_vma = folio_get_anon_vma(folio);
4091 if (!anon_vma) {
4092 ret = -EBUSY;
4093 goto out;
4094 }
4095 anon_vma_lock_write(anon_vma);
4096 mapping = NULL;
4097 } else {
4098 unsigned int min_order;
4099 gfp_t gfp;
4100
4101 mapping = folio->mapping;
4102 min_order = mapping_min_folio_order(folio->mapping);
4103 if (new_order < min_order) {
4104 ret = -EINVAL;
4105 goto out;
4106 }
4107
4108 gfp = current_gfp_context(mapping_gfp_mask(mapping) &
4109 GFP_RECLAIM_MASK);
4110
4111 if (!filemap_release_folio(folio, gfp)) {
4112 ret = -EBUSY;
4113 goto out;
4114 }
4115
4116 if (split_type == SPLIT_TYPE_UNIFORM) {
4117 xas_set_order(&xas, folio->index, new_order);
4118 xas_split_alloc(&xas, folio, old_order, gfp);
4119 if (xas_error(&xas)) {
4120 ret = xas_error(&xas);
4121 goto out;
4122 }
4123 }
4124
4125 anon_vma = NULL;
4126 i_mmap_lock_read(mapping);
4127
4128 /*
4129 *__split_unmapped_folio() may need to trim off pages beyond
4130 * EOF: but on 32-bit, i_size_read() takes an irq-unsafe
4131 * seqlock, which cannot be nested inside the page tree lock.
4132 * So note end now: i_size itself may be changed at any moment,
4133 * but folio lock is good enough to serialize the trimming.
4134 */
4135 end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
4136 if (shmem_mapping(mapping))
4137 end = shmem_fallocend(mapping->host, end);
4138 }
4139
4140 /*
4141 * Racy check if we can split the page, before unmap_folio() will
4142 * split PMDs
4143 */
4144 if (folio_expected_ref_count(folio) != folio_ref_count(folio) - 1) {
4145 ret = -EAGAIN;
4146 goto out_unlock;
4147 }
4148
4149 unmap_folio(folio);
4150
4151 /* block interrupt reentry in xa_lock and spinlock */
4152 local_irq_disable();
4153 if (mapping) {
4154 /*
4155 * Check if the folio is present in page cache.
4156 * We assume all tail are present too, if folio is there.
4157 */
4158 xas_lock(&xas);
4159 xas_reset(&xas);
4160 if (xas_load(&xas) != folio) {
4161 ret = -EAGAIN;
4162 goto fail;
4163 }
4164 }
4165
4166 ret = __folio_freeze_and_split_unmapped(folio, new_order, split_at, &xas, mapping,
4167 true, list, split_type, end, &nr_shmem_dropped);
4168 fail:
4169 if (mapping)
4170 xas_unlock(&xas);
4171
4172 local_irq_enable();
4173
4174 if (nr_shmem_dropped)
4175 shmem_uncharge(mapping->host, nr_shmem_dropped);
4176
4177 if (!ret && is_anon && !folio_is_device_private(folio))
4178 ttu_flags = TTU_USE_SHARED_ZEROPAGE;
4179
4180 remap_page(folio, 1 << old_order, ttu_flags);
4181
4182 /*
4183 * Unlock all after-split folios except the one containing
4184 * @lock_at page. If @folio is not split, it will be kept locked.
4185 */
4186 for (new_folio = folio; new_folio != end_folio; new_folio = next) {
4187 next = folio_next(new_folio);
4188 if (new_folio == page_folio(lock_at))
4189 continue;
4190
4191 folio_unlock(new_folio);
4192 /*
4193 * Subpages may be freed if there wasn't any mapping
4194 * like if add_to_swap() is running on a lru page that
4195 * had its mapping zapped. And freeing these pages
4196 * requires taking the lru_lock so we do the put_page
4197 * of the tail pages after the split is complete.
4198 */
4199 free_folio_and_swap_cache(new_folio);
4200 }
4201
4202 out_unlock:
4203 if (anon_vma) {
4204 anon_vma_unlock_write(anon_vma);
4205 put_anon_vma(anon_vma);
4206 }
4207 if (mapping)
4208 i_mmap_unlock_read(mapping);
4209 out:
4210 xas_destroy(&xas);
4211 if (is_pmd_order(old_order))
4212 count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
4213 count_mthp_stat(old_order, !ret ? MTHP_STAT_SPLIT : MTHP_STAT_SPLIT_FAILED);
4214 return ret;
4215 }
4216
4217 /**
4218 * folio_split_unmapped() - split a large anon folio that is already unmapped
4219 * @folio: folio to split
4220 * @new_order: the order of folios after split
4221 *
4222 * This function is a helper for splitting folios that have already been
4223 * unmapped. The use case is that the device or the CPU can refuse to migrate
4224 * THP pages in the middle of migration, due to allocation issues on either
4225 * side.
4226 *
4227 * anon_vma_lock is not required to be held, mmap_read_lock() or
4228 * mmap_write_lock() should be held. @folio is expected to be locked by the
4229 * caller. device-private and non device-private folios are supported along
4230 * with folios that are in the swapcache. @folio should also be unmapped and
4231 * isolated from LRU (if applicable)
4232 *
4233 * Upon return, the folio is not remapped, split folios are not added to LRU,
4234 * free_folio_and_swap_cache() is not called, and new folios remain locked.
4235 *
4236 * Return: 0 on success, -EAGAIN if the folio cannot be split (e.g., due to
4237 * insufficient reference count or extra pins).
4238 */
folio_split_unmapped(struct folio * folio,unsigned int new_order)4239 int folio_split_unmapped(struct folio *folio, unsigned int new_order)
4240 {
4241 int ret = 0;
4242
4243 VM_WARN_ON_ONCE_FOLIO(folio_mapped(folio), folio);
4244 VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio);
4245 VM_WARN_ON_ONCE_FOLIO(!folio_test_large(folio), folio);
4246 VM_WARN_ON_ONCE_FOLIO(!folio_test_anon(folio), folio);
4247
4248 if (folio_expected_ref_count(folio) != folio_ref_count(folio) - 1)
4249 return -EAGAIN;
4250
4251 local_irq_disable();
4252 ret = __folio_freeze_and_split_unmapped(folio, new_order, &folio->page, NULL,
4253 NULL, false, NULL, SPLIT_TYPE_UNIFORM,
4254 0, NULL);
4255 local_irq_enable();
4256 return ret;
4257 }
4258
4259 /*
4260 * This function splits a large folio into smaller folios of order @new_order.
4261 * @page can point to any page of the large folio to split. The split operation
4262 * does not change the position of @page.
4263 *
4264 * Prerequisites:
4265 *
4266 * 1) The caller must hold a reference on the @page's owning folio, also known
4267 * as the large folio.
4268 *
4269 * 2) The large folio must be locked.
4270 *
4271 * 3) The folio must not be pinned. Any unexpected folio references, including
4272 * GUP pins, will result in the folio not getting split; instead, the caller
4273 * will receive an -EAGAIN.
4274 *
4275 * 4) @new_order > 1, usually. Splitting to order-1 anonymous folios is not
4276 * supported for non-file-backed folios, because folio->_deferred_list, which
4277 * is used by partially mapped folios, is stored in subpage 2, but an order-1
4278 * folio only has subpages 0 and 1. File-backed order-1 folios are supported,
4279 * since they do not use _deferred_list.
4280 *
4281 * After splitting, the caller's folio reference will be transferred to @page,
4282 * resulting in a raised refcount of @page after this call. The other pages may
4283 * be freed if they are not mapped.
4284 *
4285 * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
4286 *
4287 * Pages in @new_order will inherit the mapping, flags, and so on from the
4288 * huge page.
4289 *
4290 * Returns 0 if the huge page was split successfully.
4291 *
4292 * Returns -EAGAIN if the folio has unexpected reference (e.g., GUP) or if
4293 * the folio was concurrently removed from the page cache.
4294 *
4295 * Returns -EBUSY when trying to split the huge zeropage, if the folio is
4296 * under writeback, if fs-specific folio metadata cannot currently be
4297 * released, or if some unexpected race happened (e.g., anon VMA disappeared,
4298 * truncation).
4299 *
4300 * Callers should ensure that the order respects the address space mapping
4301 * min-order if one is set for non-anonymous folios.
4302 *
4303 * Returns -EINVAL when trying to split to an order that is incompatible
4304 * with the folio. Splitting to order 0 is compatible with all folios.
4305 */
__split_huge_page_to_list_to_order(struct page * page,struct list_head * list,unsigned int new_order)4306 int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
4307 unsigned int new_order)
4308 {
4309 struct folio *folio = page_folio(page);
4310
4311 return __folio_split(folio, new_order, &folio->page, page, list,
4312 SPLIT_TYPE_UNIFORM);
4313 }
4314
4315 /**
4316 * folio_split() - split a folio at @split_at to a @new_order folio
4317 * @folio: folio to split
4318 * @new_order: the order of the new folio
4319 * @split_at: a page within the new folio
4320 * @list: after-split folios are added to @list if not null, otherwise to LRU
4321 * list
4322 *
4323 * It has the same prerequisites and returns as
4324 * split_huge_page_to_list_to_order().
4325 *
4326 * Split a folio at @split_at to a new_order folio, leave the
4327 * remaining subpages of the original folio as large as possible. For example,
4328 * in the case of splitting an order-9 folio at its third order-3 subpages to
4329 * an order-3 folio, there are 2^(9-3)=64 order-3 subpages in the order-9 folio.
4330 * After the split, there will be a group of folios with different orders and
4331 * the new folio containing @split_at is marked in bracket:
4332 * [order-4, {order-3}, order-3, order-5, order-6, order-7, order-8].
4333 *
4334 * After split, folio is left locked for caller.
4335 *
4336 * Return: 0 - successful, <0 - failed (if -ENOMEM is returned, @folio might be
4337 * split but not to @new_order, the caller needs to check)
4338 */
folio_split(struct folio * folio,unsigned int new_order,struct page * split_at,struct list_head * list)4339 int folio_split(struct folio *folio, unsigned int new_order,
4340 struct page *split_at, struct list_head *list)
4341 {
4342 return __folio_split(folio, new_order, split_at, &folio->page, list,
4343 SPLIT_TYPE_NON_UNIFORM);
4344 }
4345
4346 /**
4347 * min_order_for_split() - get the minimum order @folio can be split to
4348 * @folio: folio to split
4349 *
4350 * min_order_for_split() tells the minimum order @folio can be split to.
4351 * If a file-backed folio is truncated, 0 will be returned. Any subsequent
4352 * split attempt should get -EBUSY from split checking code.
4353 *
4354 * Return: @folio's minimum order for split
4355 */
min_order_for_split(struct folio * folio)4356 unsigned int min_order_for_split(struct folio *folio)
4357 {
4358 if (folio_test_anon(folio))
4359 return 0;
4360
4361 /*
4362 * If the folio got truncated, we don't know the previous mapping and
4363 * consequently the old min order. But it doesn't matter, as any split
4364 * attempt will immediately fail with -EBUSY as the folio cannot get
4365 * split until freed.
4366 */
4367 if (!folio->mapping)
4368 return 0;
4369
4370 return mapping_min_folio_order(folio->mapping);
4371 }
4372
split_folio_to_list(struct folio * folio,struct list_head * list)4373 int split_folio_to_list(struct folio *folio, struct list_head *list)
4374 {
4375 return split_huge_page_to_list_to_order(&folio->page, list, 0);
4376 }
4377
4378 /*
4379 * __folio_unqueue_deferred_split() is not to be called directly:
4380 * the folio_unqueue_deferred_split() inline wrapper in mm/internal.h
4381 * limits its calls to those folios which may have a _deferred_list for
4382 * queueing THP splits, and that list is (racily observed to be) non-empty.
4383 *
4384 * It is unsafe to call folio_unqueue_deferred_split() until folio refcount is
4385 * zero: because even when split_queue_lock is held, a non-empty _deferred_list
4386 * might be in use on deferred_split_scan()'s unlocked on-stack list.
4387 *
4388 * If memory cgroups are enabled, split_queue_lock is in the mem_cgroup: it is
4389 * therefore important to unqueue deferred split before changing folio memcg.
4390 */
__folio_unqueue_deferred_split(struct folio * folio)4391 bool __folio_unqueue_deferred_split(struct folio *folio)
4392 {
4393 struct deferred_split *ds_queue;
4394 unsigned long flags;
4395 bool unqueued = false;
4396
4397 WARN_ON_ONCE(folio_ref_count(folio));
4398 WARN_ON_ONCE(!mem_cgroup_disabled() && !folio_memcg_charged(folio));
4399
4400 ds_queue = folio_split_queue_lock_irqsave(folio, &flags);
4401 if (!list_empty(&folio->_deferred_list)) {
4402 ds_queue->split_queue_len--;
4403 if (folio_test_partially_mapped(folio)) {
4404 folio_clear_partially_mapped(folio);
4405 mod_mthp_stat(folio_order(folio),
4406 MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
4407 }
4408 list_del_init(&folio->_deferred_list);
4409 unqueued = true;
4410 }
4411 split_queue_unlock_irqrestore(ds_queue, flags);
4412
4413 return unqueued; /* useful for debug warnings */
4414 }
4415
4416 /* partially_mapped=false won't clear PG_partially_mapped folio flag */
deferred_split_folio(struct folio * folio,bool partially_mapped)4417 void deferred_split_folio(struct folio *folio, bool partially_mapped)
4418 {
4419 struct deferred_split *ds_queue;
4420 unsigned long flags;
4421
4422 /*
4423 * Order 1 folios have no space for a deferred list, but we also
4424 * won't waste much memory by not adding them to the deferred list.
4425 */
4426 if (folio_order(folio) <= 1)
4427 return;
4428
4429 if (!partially_mapped && !split_underused_thp)
4430 return;
4431
4432 /*
4433 * Exclude swapcache: originally to avoid a corrupt deferred split
4434 * queue. Nowadays that is fully prevented by memcg1_swapout();
4435 * but if page reclaim is already handling the same folio, it is
4436 * unnecessary to handle it again in the shrinker, so excluding
4437 * swapcache here may still be a useful optimization.
4438 */
4439 if (folio_test_swapcache(folio))
4440 return;
4441
4442 ds_queue = folio_split_queue_lock_irqsave(folio, &flags);
4443 if (partially_mapped) {
4444 if (!folio_test_partially_mapped(folio)) {
4445 folio_set_partially_mapped(folio);
4446 if (folio_test_pmd_mappable(folio))
4447 count_vm_event(THP_DEFERRED_SPLIT_PAGE);
4448 count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED);
4449 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, 1);
4450
4451 }
4452 } else {
4453 /* partially mapped folios cannot become non-partially mapped */
4454 VM_WARN_ON_FOLIO(folio_test_partially_mapped(folio), folio);
4455 }
4456 if (list_empty(&folio->_deferred_list)) {
4457 struct mem_cgroup *memcg;
4458
4459 memcg = folio_split_queue_memcg(folio, ds_queue);
4460 list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
4461 ds_queue->split_queue_len++;
4462 if (memcg)
4463 set_shrinker_bit(memcg, folio_nid(folio),
4464 shrinker_id(deferred_split_shrinker));
4465 }
4466 split_queue_unlock_irqrestore(ds_queue, flags);
4467 }
4468
deferred_split_count(struct shrinker * shrink,struct shrink_control * sc)4469 static unsigned long deferred_split_count(struct shrinker *shrink,
4470 struct shrink_control *sc)
4471 {
4472 struct pglist_data *pgdata = NODE_DATA(sc->nid);
4473 struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
4474
4475 #ifdef CONFIG_MEMCG
4476 if (sc->memcg)
4477 ds_queue = &sc->memcg->deferred_split_queue;
4478 #endif
4479 return READ_ONCE(ds_queue->split_queue_len);
4480 }
4481
thp_underused(struct folio * folio)4482 static bool thp_underused(struct folio *folio)
4483 {
4484 int num_zero_pages = 0, num_filled_pages = 0;
4485 int i;
4486
4487 if (khugepaged_max_ptes_none == HPAGE_PMD_NR - 1)
4488 return false;
4489
4490 if (folio_contain_hwpoisoned_page(folio))
4491 return false;
4492
4493 for (i = 0; i < folio_nr_pages(folio); i++) {
4494 if (pages_identical(folio_page(folio, i), ZERO_PAGE(0))) {
4495 if (++num_zero_pages > khugepaged_max_ptes_none)
4496 return true;
4497 } else {
4498 /*
4499 * Another path for early exit once the number
4500 * of non-zero filled pages exceeds threshold.
4501 */
4502 if (++num_filled_pages >= HPAGE_PMD_NR - khugepaged_max_ptes_none)
4503 return false;
4504 }
4505 }
4506 return false;
4507 }
4508
deferred_split_scan(struct shrinker * shrink,struct shrink_control * sc)4509 static unsigned long deferred_split_scan(struct shrinker *shrink,
4510 struct shrink_control *sc)
4511 {
4512 struct deferred_split *ds_queue;
4513 unsigned long flags;
4514 struct folio *folio, *next;
4515 int split = 0, i;
4516 struct folio_batch fbatch;
4517
4518 folio_batch_init(&fbatch);
4519
4520 retry:
4521 ds_queue = split_queue_lock_irqsave(sc->nid, sc->memcg, &flags);
4522 /* Take pin on all head pages to avoid freeing them under us */
4523 list_for_each_entry_safe(folio, next, &ds_queue->split_queue,
4524 _deferred_list) {
4525 if (folio_try_get(folio)) {
4526 folio_batch_add(&fbatch, folio);
4527 } else if (folio_test_partially_mapped(folio)) {
4528 /* We lost race with folio_put() */
4529 folio_clear_partially_mapped(folio);
4530 mod_mthp_stat(folio_order(folio),
4531 MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
4532 }
4533 list_del_init(&folio->_deferred_list);
4534 ds_queue->split_queue_len--;
4535 if (!--sc->nr_to_scan)
4536 break;
4537 if (!folio_batch_space(&fbatch))
4538 break;
4539 }
4540 split_queue_unlock_irqrestore(ds_queue, flags);
4541
4542 for (i = 0; i < folio_batch_count(&fbatch); i++) {
4543 bool did_split = false;
4544 bool underused = false;
4545 struct deferred_split *fqueue;
4546
4547 folio = fbatch.folios[i];
4548 if (!folio_test_partially_mapped(folio)) {
4549 /*
4550 * See try_to_map_unused_to_zeropage(): we cannot
4551 * optimize zero-filled pages after splitting an
4552 * mlocked folio.
4553 */
4554 if (folio_test_mlocked(folio))
4555 goto next;
4556 underused = thp_underused(folio);
4557 if (!underused)
4558 goto next;
4559 }
4560 if (!folio_trylock(folio))
4561 goto requeue;
4562 if (!split_folio(folio)) {
4563 did_split = true;
4564 if (underused)
4565 count_vm_event(THP_UNDERUSED_SPLIT_PAGE);
4566 split++;
4567 }
4568 folio_unlock(folio);
4569 next:
4570 /*
4571 * If thp_underused() returns false, or if split_folio()
4572 * succeeds, or if split_folio() fails in the case it was
4573 * underused, then consider it used and don't add it back to
4574 * split_queue.
4575 */
4576 if (did_split || !folio_test_partially_mapped(folio))
4577 continue;
4578 requeue:
4579 /*
4580 * Add back partially mapped folios, or underused folios that
4581 * we could not lock this round.
4582 */
4583 fqueue = folio_split_queue_lock_irqsave(folio, &flags);
4584 if (list_empty(&folio->_deferred_list)) {
4585 list_add_tail(&folio->_deferred_list, &fqueue->split_queue);
4586 fqueue->split_queue_len++;
4587 }
4588 split_queue_unlock_irqrestore(fqueue, flags);
4589 }
4590 folios_put(&fbatch);
4591
4592 if (sc->nr_to_scan && !list_empty(&ds_queue->split_queue)) {
4593 cond_resched();
4594 goto retry;
4595 }
4596
4597 /*
4598 * Stop shrinker if we didn't split any page, but the queue is empty.
4599 * This can happen if pages were freed under us.
4600 */
4601 if (!split && list_empty(&ds_queue->split_queue))
4602 return SHRINK_STOP;
4603 return split;
4604 }
4605
4606 #ifdef CONFIG_MEMCG
reparent_deferred_split_queue(struct mem_cgroup * memcg)4607 void reparent_deferred_split_queue(struct mem_cgroup *memcg)
4608 {
4609 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
4610 struct deferred_split *ds_queue = &memcg->deferred_split_queue;
4611 struct deferred_split *parent_ds_queue = &parent->deferred_split_queue;
4612 int nid;
4613
4614 spin_lock_irq(&ds_queue->split_queue_lock);
4615 spin_lock_nested(&parent_ds_queue->split_queue_lock, SINGLE_DEPTH_NESTING);
4616
4617 if (!ds_queue->split_queue_len)
4618 goto unlock;
4619
4620 list_splice_tail_init(&ds_queue->split_queue, &parent_ds_queue->split_queue);
4621 parent_ds_queue->split_queue_len += ds_queue->split_queue_len;
4622 ds_queue->split_queue_len = 0;
4623
4624 for_each_node(nid)
4625 set_shrinker_bit(parent, nid, shrinker_id(deferred_split_shrinker));
4626
4627 unlock:
4628 spin_unlock(&parent_ds_queue->split_queue_lock);
4629 spin_unlock_irq(&ds_queue->split_queue_lock);
4630 }
4631 #endif
4632
4633 #ifdef CONFIG_DEBUG_FS
split_huge_pages_all(void)4634 static void split_huge_pages_all(void)
4635 {
4636 struct zone *zone;
4637 struct page *page;
4638 struct folio *folio;
4639 unsigned long pfn, max_zone_pfn;
4640 unsigned long total = 0, split = 0;
4641
4642 pr_debug("Split all THPs\n");
4643 for_each_zone(zone) {
4644 if (!managed_zone(zone))
4645 continue;
4646 max_zone_pfn = zone_end_pfn(zone);
4647 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
4648 int nr_pages;
4649
4650 page = pfn_to_online_page(pfn);
4651 if (!page || PageTail(page))
4652 continue;
4653 folio = page_folio(page);
4654 if (!folio_try_get(folio))
4655 continue;
4656
4657 if (unlikely(page_folio(page) != folio))
4658 goto next;
4659
4660 if (zone != folio_zone(folio))
4661 goto next;
4662
4663 if (!folio_test_large(folio)
4664 || folio_test_hugetlb(folio)
4665 || !folio_test_lru(folio))
4666 goto next;
4667
4668 total++;
4669 folio_lock(folio);
4670 nr_pages = folio_nr_pages(folio);
4671 if (!split_folio(folio))
4672 split++;
4673 pfn += nr_pages - 1;
4674 folio_unlock(folio);
4675 next:
4676 folio_put(folio);
4677 cond_resched();
4678 }
4679 }
4680
4681 pr_debug("%lu of %lu THP split\n", split, total);
4682 }
4683
vma_not_suitable_for_thp_split(struct vm_area_struct * vma)4684 static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma)
4685 {
4686 if (vma_is_dax(vma))
4687 return true;
4688 if (vma_is_special_huge(vma))
4689 return true;
4690 if (vma_test(vma, VMA_IO_BIT))
4691 return true;
4692 if (is_vm_hugetlb_page(vma))
4693 return true;
4694
4695 return false;
4696 }
4697
split_huge_pages_pid(int pid,unsigned long vaddr_start,unsigned long vaddr_end,unsigned int new_order,long in_folio_offset)4698 static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
4699 unsigned long vaddr_end, unsigned int new_order,
4700 long in_folio_offset)
4701 {
4702 int ret = 0;
4703 struct task_struct *task;
4704 struct mm_struct *mm;
4705 unsigned long total = 0, split = 0;
4706 unsigned long addr;
4707
4708 vaddr_start &= PAGE_MASK;
4709 vaddr_end &= PAGE_MASK;
4710
4711 task = find_get_task_by_vpid(pid);
4712 if (!task) {
4713 ret = -ESRCH;
4714 goto out;
4715 }
4716
4717 /* Find the mm_struct */
4718 mm = get_task_mm(task);
4719 put_task_struct(task);
4720
4721 if (!mm) {
4722 ret = -EINVAL;
4723 goto out;
4724 }
4725
4726 pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx], new_order: %u, in_folio_offset: %ld\n",
4727 pid, vaddr_start, vaddr_end, new_order, in_folio_offset);
4728
4729 mmap_read_lock(mm);
4730 /*
4731 * always increase addr by PAGE_SIZE, since we could have a PTE page
4732 * table filled with PTE-mapped THPs, each of which is distinct.
4733 */
4734 for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) {
4735 struct vm_area_struct *vma = vma_lookup(mm, addr);
4736 struct folio_walk fw;
4737 struct folio *folio;
4738 struct address_space *mapping;
4739 unsigned int target_order = new_order;
4740
4741 if (!vma)
4742 break;
4743
4744 /* skip special VMA and hugetlb VMA */
4745 if (vma_not_suitable_for_thp_split(vma)) {
4746 addr = vma->vm_end;
4747 continue;
4748 }
4749
4750 folio = folio_walk_start(&fw, vma, addr, 0);
4751 if (!folio)
4752 continue;
4753
4754 if (!is_transparent_hugepage(folio))
4755 goto next;
4756
4757 if (!folio_test_anon(folio)) {
4758 mapping = folio->mapping;
4759 target_order = max(new_order,
4760 mapping_min_folio_order(mapping));
4761 }
4762
4763 if (target_order >= folio_order(folio))
4764 goto next;
4765
4766 total++;
4767 /*
4768 * For folios with private, split_huge_page_to_list_to_order()
4769 * will try to drop it before split and then check if the folio
4770 * can be split or not. So skip the check here.
4771 */
4772 if (!folio_test_private(folio) &&
4773 folio_expected_ref_count(folio) != folio_ref_count(folio))
4774 goto next;
4775
4776 if (!folio_trylock(folio))
4777 goto next;
4778 folio_get(folio);
4779 folio_walk_end(&fw, vma);
4780
4781 if (!folio_test_anon(folio) && folio->mapping != mapping)
4782 goto unlock;
4783
4784 if (in_folio_offset < 0 ||
4785 in_folio_offset >= folio_nr_pages(folio)) {
4786 if (!split_folio_to_order(folio, target_order))
4787 split++;
4788 } else {
4789 struct page *split_at = folio_page(folio,
4790 in_folio_offset);
4791 if (!folio_split(folio, target_order, split_at, NULL))
4792 split++;
4793 }
4794
4795 unlock:
4796
4797 folio_unlock(folio);
4798 folio_put(folio);
4799
4800 cond_resched();
4801 continue;
4802 next:
4803 folio_walk_end(&fw, vma);
4804 cond_resched();
4805 }
4806 mmap_read_unlock(mm);
4807 mmput(mm);
4808
4809 pr_debug("%lu of %lu THP split\n", split, total);
4810
4811 out:
4812 return ret;
4813 }
4814
split_huge_pages_in_file(const char * file_path,pgoff_t off_start,pgoff_t off_end,unsigned int new_order,long in_folio_offset)4815 static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
4816 pgoff_t off_end, unsigned int new_order,
4817 long in_folio_offset)
4818 {
4819 struct file *candidate;
4820 struct address_space *mapping;
4821 pgoff_t index;
4822 int nr_pages = 1;
4823 unsigned long total = 0, split = 0;
4824 unsigned int min_order;
4825 unsigned int target_order;
4826
4827 CLASS(filename_kernel, file)(file_path);
4828 candidate = file_open_name(file, O_RDONLY, 0);
4829 if (IS_ERR(candidate))
4830 return -EINVAL;
4831
4832 pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx], new_order: %u, in_folio_offset: %ld\n",
4833 file_path, off_start, off_end, new_order, in_folio_offset);
4834
4835 mapping = candidate->f_mapping;
4836 min_order = mapping_min_folio_order(mapping);
4837 target_order = max(new_order, min_order);
4838
4839 for (index = off_start; index < off_end; index += nr_pages) {
4840 struct folio *folio = filemap_get_folio(mapping, index);
4841
4842 nr_pages = 1;
4843 if (IS_ERR(folio))
4844 continue;
4845
4846 if (!folio_test_large(folio))
4847 goto next;
4848
4849 total++;
4850 nr_pages = folio_nr_pages(folio);
4851
4852 if (target_order >= folio_order(folio))
4853 goto next;
4854
4855 if (!folio_trylock(folio))
4856 goto next;
4857
4858 if (folio->mapping != mapping)
4859 goto unlock;
4860
4861 if (in_folio_offset < 0 || in_folio_offset >= nr_pages) {
4862 if (!split_folio_to_order(folio, target_order))
4863 split++;
4864 } else {
4865 struct page *split_at = folio_page(folio,
4866 in_folio_offset);
4867 if (!folio_split(folio, target_order, split_at, NULL))
4868 split++;
4869 }
4870
4871 unlock:
4872 folio_unlock(folio);
4873 next:
4874 folio_put(folio);
4875 cond_resched();
4876 }
4877
4878 filp_close(candidate, NULL);
4879 pr_debug("%lu of %lu file-backed THP split\n", split, total);
4880 return 0;
4881 }
4882
4883 #define MAX_INPUT_BUF_SZ 255
4884
split_huge_pages_write(struct file * file,const char __user * buf,size_t count,loff_t * ppops)4885 static ssize_t split_huge_pages_write(struct file *file, const char __user *buf,
4886 size_t count, loff_t *ppops)
4887 {
4888 static DEFINE_MUTEX(split_debug_mutex);
4889 ssize_t ret;
4890 /*
4891 * hold pid, start_vaddr, end_vaddr, new_order or
4892 * file_path, off_start, off_end, new_order
4893 */
4894 char input_buf[MAX_INPUT_BUF_SZ];
4895 int pid;
4896 unsigned long vaddr_start, vaddr_end;
4897 unsigned int new_order = 0;
4898 long in_folio_offset = -1;
4899
4900 ret = mutex_lock_interruptible(&split_debug_mutex);
4901 if (ret)
4902 return ret;
4903
4904 ret = -EFAULT;
4905
4906 memset(input_buf, 0, MAX_INPUT_BUF_SZ);
4907 if (copy_from_user(input_buf, buf, min_t(size_t, count, MAX_INPUT_BUF_SZ)))
4908 goto out;
4909
4910 input_buf[MAX_INPUT_BUF_SZ - 1] = '\0';
4911
4912 if (input_buf[0] == '/') {
4913 char *tok;
4914 char *tok_buf = input_buf;
4915 char file_path[MAX_INPUT_BUF_SZ];
4916 pgoff_t off_start = 0, off_end = 0;
4917 size_t input_len = strlen(input_buf);
4918
4919 tok = strsep(&tok_buf, ",");
4920 if (tok && tok_buf) {
4921 strscpy(file_path, tok);
4922 } else {
4923 ret = -EINVAL;
4924 goto out;
4925 }
4926
4927 ret = sscanf(tok_buf, "0x%lx,0x%lx,%d,%ld", &off_start, &off_end,
4928 &new_order, &in_folio_offset);
4929 if (ret != 2 && ret != 3 && ret != 4) {
4930 ret = -EINVAL;
4931 goto out;
4932 }
4933 ret = split_huge_pages_in_file(file_path, off_start, off_end,
4934 new_order, in_folio_offset);
4935 if (!ret)
4936 ret = input_len;
4937
4938 goto out;
4939 }
4940
4941 ret = sscanf(input_buf, "%d,0x%lx,0x%lx,%d,%ld", &pid, &vaddr_start,
4942 &vaddr_end, &new_order, &in_folio_offset);
4943 if (ret == 1 && pid == 1) {
4944 split_huge_pages_all();
4945 ret = strlen(input_buf);
4946 goto out;
4947 } else if (ret != 3 && ret != 4 && ret != 5) {
4948 ret = -EINVAL;
4949 goto out;
4950 }
4951
4952 ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end, new_order,
4953 in_folio_offset);
4954 if (!ret)
4955 ret = strlen(input_buf);
4956 out:
4957 mutex_unlock(&split_debug_mutex);
4958 return ret;
4959
4960 }
4961
4962 static const struct file_operations split_huge_pages_fops = {
4963 .owner = THIS_MODULE,
4964 .write = split_huge_pages_write,
4965 };
4966
split_huge_pages_debugfs(void)4967 static int __init split_huge_pages_debugfs(void)
4968 {
4969 debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
4970 &split_huge_pages_fops);
4971 return 0;
4972 }
4973 late_initcall(split_huge_pages_debugfs);
4974 #endif
4975
4976 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
set_pmd_migration_entry(struct page_vma_mapped_walk * pvmw,struct page * page)4977 int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
4978 struct page *page)
4979 {
4980 struct folio *folio = page_folio(page);
4981 struct vm_area_struct *vma = pvmw->vma;
4982 struct mm_struct *mm = vma->vm_mm;
4983 unsigned long address = pvmw->address;
4984 bool anon_exclusive;
4985 pmd_t pmdval;
4986 swp_entry_t entry;
4987 pmd_t pmdswp;
4988
4989 if (!(pvmw->pmd && !pvmw->pte))
4990 return 0;
4991
4992 flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
4993 if (unlikely(!pmd_present(*pvmw->pmd)))
4994 pmdval = pmdp_huge_get_and_clear(vma->vm_mm, address, pvmw->pmd);
4995 else
4996 pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
4997
4998 /* See folio_try_share_anon_rmap_pmd(): invalidate PMD first. */
4999 anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page);
5000 if (anon_exclusive && folio_try_share_anon_rmap_pmd(folio, page)) {
5001 set_pmd_at(mm, address, pvmw->pmd, pmdval);
5002 return -EBUSY;
5003 }
5004
5005 if (pmd_dirty(pmdval))
5006 folio_mark_dirty(folio);
5007 if (pmd_write(pmdval))
5008 entry = make_writable_migration_entry(page_to_pfn(page));
5009 else if (anon_exclusive)
5010 entry = make_readable_exclusive_migration_entry(page_to_pfn(page));
5011 else
5012 entry = make_readable_migration_entry(page_to_pfn(page));
5013 if (pmd_young(pmdval))
5014 entry = make_migration_entry_young(entry);
5015 if (pmd_dirty(pmdval))
5016 entry = make_migration_entry_dirty(entry);
5017 pmdswp = swp_entry_to_pmd(entry);
5018 if (pmd_soft_dirty(pmdval))
5019 pmdswp = pmd_swp_mksoft_dirty(pmdswp);
5020 if (pmd_uffd_wp(pmdval))
5021 pmdswp = pmd_swp_mkuffd_wp(pmdswp);
5022 set_pmd_at(mm, address, pvmw->pmd, pmdswp);
5023 folio_remove_rmap_pmd(folio, page, vma);
5024 folio_put(folio);
5025 trace_set_migration_pmd(address, pmd_val(pmdswp));
5026
5027 return 0;
5028 }
5029
remove_migration_pmd(struct page_vma_mapped_walk * pvmw,struct page * new)5030 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
5031 {
5032 struct folio *folio = page_folio(new);
5033 struct vm_area_struct *vma = pvmw->vma;
5034 struct mm_struct *mm = vma->vm_mm;
5035 unsigned long address = pvmw->address;
5036 unsigned long haddr = address & HPAGE_PMD_MASK;
5037 pmd_t pmde;
5038 softleaf_t entry;
5039
5040 if (!(pvmw->pmd && !pvmw->pte))
5041 return;
5042
5043 entry = softleaf_from_pmd(*pvmw->pmd);
5044 folio_get(folio);
5045 pmde = folio_mk_pmd(folio, READ_ONCE(vma->vm_page_prot));
5046
5047 if (pmd_swp_soft_dirty(*pvmw->pmd))
5048 pmde = pmd_mksoft_dirty(pmde);
5049 if (softleaf_is_migration_write(entry))
5050 pmde = pmd_mkwrite(pmde, vma);
5051 if (pmd_swp_uffd_wp(*pvmw->pmd))
5052 pmde = pmd_mkuffd_wp(pmde);
5053 if (!softleaf_is_migration_young(entry))
5054 pmde = pmd_mkold(pmde);
5055 /* NOTE: this may contain setting soft-dirty on some archs */
5056 if (folio_test_dirty(folio) && softleaf_is_migration_dirty(entry))
5057 pmde = pmd_mkdirty(pmde);
5058
5059 if (folio_is_device_private(folio)) {
5060 swp_entry_t entry;
5061
5062 if (pmd_write(pmde))
5063 entry = make_writable_device_private_entry(
5064 page_to_pfn(new));
5065 else
5066 entry = make_readable_device_private_entry(
5067 page_to_pfn(new));
5068 pmde = swp_entry_to_pmd(entry);
5069
5070 if (pmd_swp_soft_dirty(*pvmw->pmd))
5071 pmde = pmd_swp_mksoft_dirty(pmde);
5072 if (pmd_swp_uffd_wp(*pvmw->pmd))
5073 pmde = pmd_swp_mkuffd_wp(pmde);
5074 }
5075
5076 if (folio_test_anon(folio)) {
5077 rmap_t rmap_flags = RMAP_NONE;
5078
5079 if (!softleaf_is_migration_read(entry))
5080 rmap_flags |= RMAP_EXCLUSIVE;
5081
5082 folio_add_anon_rmap_pmd(folio, new, vma, haddr, rmap_flags);
5083 } else {
5084 folio_add_file_rmap_pmd(folio, new, vma);
5085 }
5086 VM_BUG_ON(pmd_write(pmde) && folio_test_anon(folio) && !PageAnonExclusive(new));
5087 set_pmd_at(mm, haddr, pvmw->pmd, pmde);
5088
5089 /* No need to invalidate - it was non-present before */
5090 update_mmu_cache_pmd(vma, address, pvmw->pmd);
5091 trace_remove_migration_pmd(address, pmd_val(pmde));
5092 }
5093 #endif
5094