1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4 #include <linux/mm.h>
5 #include <linux/sched.h>
6 #include <linux/sched/mm.h>
7 #include <linux/mmu_notifier.h>
8 #include <linux/rmap.h>
9 #include <linux/swap.h>
10 #include <linux/mm_inline.h>
11 #include <linux/kthread.h>
12 #include <linux/khugepaged.h>
13 #include <linux/freezer.h>
14 #include <linux/mman.h>
15 #include <linux/hashtable.h>
16 #include <linux/userfaultfd_k.h>
17 #include <linux/page_idle.h>
18 #include <linux/page_table_check.h>
19 #include <linux/rcupdate_wait.h>
20 #include <linux/leafops.h>
21 #include <linux/shmem_fs.h>
22 #include <linux/dax.h>
23 #include <linux/ksm.h>
24 #include <linux/pgalloc.h>
25 #include <linux/backing-dev.h>
26
27 #include <asm/tlb.h>
28 #include "internal.h"
29 #include "mm_slot.h"
30
31 enum scan_result {
32 SCAN_FAIL,
33 SCAN_SUCCEED,
34 SCAN_NO_PTE_TABLE,
35 SCAN_PMD_MAPPED,
36 SCAN_EXCEED_NONE_PTE,
37 SCAN_EXCEED_SWAP_PTE,
38 SCAN_EXCEED_SHARED_PTE,
39 SCAN_PTE_NON_PRESENT,
40 SCAN_PTE_UFFD_WP,
41 SCAN_PTE_MAPPED_HUGEPAGE,
42 SCAN_LACK_REFERENCED_PAGE,
43 SCAN_PAGE_NULL,
44 SCAN_SCAN_ABORT,
45 SCAN_PAGE_COUNT,
46 SCAN_PAGE_LRU,
47 SCAN_PAGE_LOCK,
48 SCAN_PAGE_ANON,
49 SCAN_PAGE_COMPOUND,
50 SCAN_ANY_PROCESS,
51 SCAN_VMA_NULL,
52 SCAN_VMA_CHECK,
53 SCAN_ADDRESS_RANGE,
54 SCAN_DEL_PAGE_LRU,
55 SCAN_ALLOC_HUGE_PAGE_FAIL,
56 SCAN_CGROUP_CHARGE_FAIL,
57 SCAN_TRUNCATED,
58 SCAN_PAGE_HAS_PRIVATE,
59 SCAN_STORE_FAILED,
60 SCAN_COPY_MC,
61 SCAN_PAGE_FILLED,
62 SCAN_PAGE_DIRTY_OR_WRITEBACK,
63 };
64
65 #define CREATE_TRACE_POINTS
66 #include <trace/events/huge_memory.h>
67
68 static struct task_struct *khugepaged_thread __read_mostly;
69 static DEFINE_MUTEX(khugepaged_mutex);
70
71 /* default scan 8*HPAGE_PMD_NR ptes (or vmas) every 10 second */
72 static unsigned int khugepaged_pages_to_scan __read_mostly;
73 static unsigned int khugepaged_pages_collapsed;
74 static unsigned int khugepaged_full_scans;
75 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
76 /* during fragmentation poll the hugepage allocator once every minute */
77 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
78 static unsigned long khugepaged_sleep_expire;
79 static DEFINE_SPINLOCK(khugepaged_mm_lock);
80 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
81 /*
82 * default collapse hugepages if there is at least one pte mapped like
83 * it would have happened if the vma was large enough during page
84 * fault.
85 *
86 * Note that these are only respected if collapse was initiated by khugepaged.
87 */
88 unsigned int khugepaged_max_ptes_none __read_mostly;
89 static unsigned int khugepaged_max_ptes_swap __read_mostly;
90 static unsigned int khugepaged_max_ptes_shared __read_mostly;
91
92 #define MM_SLOTS_HASH_BITS 10
93 static DEFINE_READ_MOSTLY_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
94
95 static struct kmem_cache *mm_slot_cache __ro_after_init;
96
97 struct collapse_control {
98 bool is_khugepaged;
99
100 /* Num pages scanned per node */
101 u32 node_load[MAX_NUMNODES];
102
103 /* nodemask for allocation fallback */
104 nodemask_t alloc_nmask;
105 };
106
107 /**
108 * struct khugepaged_scan - cursor for scanning
109 * @mm_head: the head of the mm list to scan
110 * @mm_slot: the current mm_slot we are scanning
111 * @address: the next address inside that to be scanned
112 *
113 * There is only the one khugepaged_scan instance of this cursor structure.
114 */
115 struct khugepaged_scan {
116 struct list_head mm_head;
117 struct mm_slot *mm_slot;
118 unsigned long address;
119 };
120
121 static struct khugepaged_scan khugepaged_scan = {
122 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
123 };
124
125 #ifdef CONFIG_SYSFS
scan_sleep_millisecs_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)126 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
127 struct kobj_attribute *attr,
128 char *buf)
129 {
130 return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
131 }
132
__sleep_millisecs_store(const char * buf,size_t count,unsigned int * millisecs)133 static ssize_t __sleep_millisecs_store(const char *buf, size_t count,
134 unsigned int *millisecs)
135 {
136 unsigned int msecs;
137 int err;
138
139 err = kstrtouint(buf, 10, &msecs);
140 if (err)
141 return -EINVAL;
142
143 *millisecs = msecs;
144 khugepaged_sleep_expire = 0;
145 wake_up_interruptible(&khugepaged_wait);
146
147 return count;
148 }
149
scan_sleep_millisecs_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)150 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
151 struct kobj_attribute *attr,
152 const char *buf, size_t count)
153 {
154 return __sleep_millisecs_store(buf, count, &khugepaged_scan_sleep_millisecs);
155 }
156 static struct kobj_attribute scan_sleep_millisecs_attr =
157 __ATTR_RW(scan_sleep_millisecs);
158
alloc_sleep_millisecs_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)159 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
160 struct kobj_attribute *attr,
161 char *buf)
162 {
163 return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
164 }
165
alloc_sleep_millisecs_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)166 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
167 struct kobj_attribute *attr,
168 const char *buf, size_t count)
169 {
170 return __sleep_millisecs_store(buf, count, &khugepaged_alloc_sleep_millisecs);
171 }
172 static struct kobj_attribute alloc_sleep_millisecs_attr =
173 __ATTR_RW(alloc_sleep_millisecs);
174
pages_to_scan_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)175 static ssize_t pages_to_scan_show(struct kobject *kobj,
176 struct kobj_attribute *attr,
177 char *buf)
178 {
179 return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
180 }
pages_to_scan_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)181 static ssize_t pages_to_scan_store(struct kobject *kobj,
182 struct kobj_attribute *attr,
183 const char *buf, size_t count)
184 {
185 unsigned int pages;
186 int err;
187
188 err = kstrtouint(buf, 10, &pages);
189 if (err || !pages)
190 return -EINVAL;
191
192 khugepaged_pages_to_scan = pages;
193
194 return count;
195 }
196 static struct kobj_attribute pages_to_scan_attr =
197 __ATTR_RW(pages_to_scan);
198
pages_collapsed_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)199 static ssize_t pages_collapsed_show(struct kobject *kobj,
200 struct kobj_attribute *attr,
201 char *buf)
202 {
203 return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
204 }
205 static struct kobj_attribute pages_collapsed_attr =
206 __ATTR_RO(pages_collapsed);
207
full_scans_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)208 static ssize_t full_scans_show(struct kobject *kobj,
209 struct kobj_attribute *attr,
210 char *buf)
211 {
212 return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
213 }
214 static struct kobj_attribute full_scans_attr =
215 __ATTR_RO(full_scans);
216
defrag_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)217 static ssize_t defrag_show(struct kobject *kobj,
218 struct kobj_attribute *attr, char *buf)
219 {
220 return single_hugepage_flag_show(kobj, attr, buf,
221 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
222 }
defrag_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)223 static ssize_t defrag_store(struct kobject *kobj,
224 struct kobj_attribute *attr,
225 const char *buf, size_t count)
226 {
227 return single_hugepage_flag_store(kobj, attr, buf, count,
228 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
229 }
230 static struct kobj_attribute khugepaged_defrag_attr =
231 __ATTR_RW(defrag);
232
233 /*
234 * max_ptes_none controls if khugepaged should collapse hugepages over
235 * any unmapped ptes in turn potentially increasing the memory
236 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
237 * reduce the available free memory in the system as it
238 * runs. Increasing max_ptes_none will instead potentially reduce the
239 * free memory in the system during the khugepaged scan.
240 */
max_ptes_none_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)241 static ssize_t max_ptes_none_show(struct kobject *kobj,
242 struct kobj_attribute *attr,
243 char *buf)
244 {
245 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
246 }
max_ptes_none_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)247 static ssize_t max_ptes_none_store(struct kobject *kobj,
248 struct kobj_attribute *attr,
249 const char *buf, size_t count)
250 {
251 int err;
252 unsigned long max_ptes_none;
253
254 err = kstrtoul(buf, 10, &max_ptes_none);
255 if (err || max_ptes_none > HPAGE_PMD_NR - 1)
256 return -EINVAL;
257
258 khugepaged_max_ptes_none = max_ptes_none;
259
260 return count;
261 }
262 static struct kobj_attribute khugepaged_max_ptes_none_attr =
263 __ATTR_RW(max_ptes_none);
264
max_ptes_swap_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)265 static ssize_t max_ptes_swap_show(struct kobject *kobj,
266 struct kobj_attribute *attr,
267 char *buf)
268 {
269 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
270 }
271
max_ptes_swap_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)272 static ssize_t max_ptes_swap_store(struct kobject *kobj,
273 struct kobj_attribute *attr,
274 const char *buf, size_t count)
275 {
276 int err;
277 unsigned long max_ptes_swap;
278
279 err = kstrtoul(buf, 10, &max_ptes_swap);
280 if (err || max_ptes_swap > HPAGE_PMD_NR - 1)
281 return -EINVAL;
282
283 khugepaged_max_ptes_swap = max_ptes_swap;
284
285 return count;
286 }
287
288 static struct kobj_attribute khugepaged_max_ptes_swap_attr =
289 __ATTR_RW(max_ptes_swap);
290
max_ptes_shared_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)291 static ssize_t max_ptes_shared_show(struct kobject *kobj,
292 struct kobj_attribute *attr,
293 char *buf)
294 {
295 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
296 }
297
max_ptes_shared_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)298 static ssize_t max_ptes_shared_store(struct kobject *kobj,
299 struct kobj_attribute *attr,
300 const char *buf, size_t count)
301 {
302 int err;
303 unsigned long max_ptes_shared;
304
305 err = kstrtoul(buf, 10, &max_ptes_shared);
306 if (err || max_ptes_shared > HPAGE_PMD_NR - 1)
307 return -EINVAL;
308
309 khugepaged_max_ptes_shared = max_ptes_shared;
310
311 return count;
312 }
313
314 static struct kobj_attribute khugepaged_max_ptes_shared_attr =
315 __ATTR_RW(max_ptes_shared);
316
317 static struct attribute *khugepaged_attr[] = {
318 &khugepaged_defrag_attr.attr,
319 &khugepaged_max_ptes_none_attr.attr,
320 &khugepaged_max_ptes_swap_attr.attr,
321 &khugepaged_max_ptes_shared_attr.attr,
322 &pages_to_scan_attr.attr,
323 &pages_collapsed_attr.attr,
324 &full_scans_attr.attr,
325 &scan_sleep_millisecs_attr.attr,
326 &alloc_sleep_millisecs_attr.attr,
327 NULL,
328 };
329
330 struct attribute_group khugepaged_attr_group = {
331 .attrs = khugepaged_attr,
332 .name = "khugepaged",
333 };
334 #endif /* CONFIG_SYSFS */
335
pte_none_or_zero(pte_t pte)336 static bool pte_none_or_zero(pte_t pte)
337 {
338 if (pte_none(pte))
339 return true;
340 return pte_present(pte) && is_zero_pfn(pte_pfn(pte));
341 }
342
hugepage_madvise(struct vm_area_struct * vma,vm_flags_t * vm_flags,int advice)343 int hugepage_madvise(struct vm_area_struct *vma,
344 vm_flags_t *vm_flags, int advice)
345 {
346 switch (advice) {
347 case MADV_HUGEPAGE:
348 *vm_flags &= ~VM_NOHUGEPAGE;
349 *vm_flags |= VM_HUGEPAGE;
350 /*
351 * If the vma become good for khugepaged to scan,
352 * register it here without waiting a page fault that
353 * may not happen any time soon.
354 */
355 khugepaged_enter_vma(vma, *vm_flags);
356 break;
357 case MADV_NOHUGEPAGE:
358 *vm_flags &= ~VM_HUGEPAGE;
359 *vm_flags |= VM_NOHUGEPAGE;
360 /*
361 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
362 * this vma even if we leave the mm registered in khugepaged if
363 * it got registered before VM_NOHUGEPAGE was set.
364 */
365 break;
366 }
367
368 return 0;
369 }
370
khugepaged_init(void)371 int __init khugepaged_init(void)
372 {
373 mm_slot_cache = KMEM_CACHE(mm_slot, 0);
374 if (!mm_slot_cache)
375 return -ENOMEM;
376
377 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
378 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
379 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
380 khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
381
382 return 0;
383 }
384
khugepaged_destroy(void)385 void __init khugepaged_destroy(void)
386 {
387 kmem_cache_destroy(mm_slot_cache);
388 }
389
hpage_collapse_test_exit(struct mm_struct * mm)390 static inline int hpage_collapse_test_exit(struct mm_struct *mm)
391 {
392 return atomic_read(&mm->mm_users) == 0;
393 }
394
hpage_collapse_test_exit_or_disable(struct mm_struct * mm)395 static inline int hpage_collapse_test_exit_or_disable(struct mm_struct *mm)
396 {
397 return hpage_collapse_test_exit(mm) ||
398 mm_flags_test(MMF_DISABLE_THP_COMPLETELY, mm);
399 }
400
hugepage_pmd_enabled(void)401 static bool hugepage_pmd_enabled(void)
402 {
403 /*
404 * We cover the anon, shmem and the file-backed case here; file-backed
405 * hugepages, when configured in, are determined by the global control.
406 * Anon pmd-sized hugepages are determined by the pmd-size control.
407 * Shmem pmd-sized hugepages are also determined by its pmd-size control,
408 * except when the global shmem_huge is set to SHMEM_HUGE_DENY.
409 */
410 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
411 hugepage_global_enabled())
412 return true;
413 if (test_bit(PMD_ORDER, &huge_anon_orders_always))
414 return true;
415 if (test_bit(PMD_ORDER, &huge_anon_orders_madvise))
416 return true;
417 if (test_bit(PMD_ORDER, &huge_anon_orders_inherit) &&
418 hugepage_global_enabled())
419 return true;
420 if (IS_ENABLED(CONFIG_SHMEM) && shmem_hpage_pmd_enabled())
421 return true;
422 return false;
423 }
424
__khugepaged_enter(struct mm_struct * mm)425 void __khugepaged_enter(struct mm_struct *mm)
426 {
427 struct mm_slot *slot;
428 int wakeup;
429
430 /* __khugepaged_exit() must not run from under us */
431 VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm);
432 if (unlikely(mm_flags_test_and_set(MMF_VM_HUGEPAGE, mm)))
433 return;
434
435 slot = mm_slot_alloc(mm_slot_cache);
436 if (!slot)
437 return;
438
439 spin_lock(&khugepaged_mm_lock);
440 mm_slot_insert(mm_slots_hash, mm, slot);
441 /*
442 * Insert just behind the scanning cursor, to let the area settle
443 * down a little.
444 */
445 wakeup = list_empty(&khugepaged_scan.mm_head);
446 list_add_tail(&slot->mm_node, &khugepaged_scan.mm_head);
447 spin_unlock(&khugepaged_mm_lock);
448
449 mmgrab(mm);
450 if (wakeup)
451 wake_up_interruptible(&khugepaged_wait);
452 }
453
khugepaged_enter_vma(struct vm_area_struct * vma,vm_flags_t vm_flags)454 void khugepaged_enter_vma(struct vm_area_struct *vma,
455 vm_flags_t vm_flags)
456 {
457 if (!mm_flags_test(MMF_VM_HUGEPAGE, vma->vm_mm) &&
458 hugepage_pmd_enabled()) {
459 if (thp_vma_allowable_order(vma, vm_flags, TVA_KHUGEPAGED, PMD_ORDER))
460 __khugepaged_enter(vma->vm_mm);
461 }
462 }
463
__khugepaged_exit(struct mm_struct * mm)464 void __khugepaged_exit(struct mm_struct *mm)
465 {
466 struct mm_slot *slot;
467 int free = 0;
468
469 spin_lock(&khugepaged_mm_lock);
470 slot = mm_slot_lookup(mm_slots_hash, mm);
471 if (slot && khugepaged_scan.mm_slot != slot) {
472 hash_del(&slot->hash);
473 list_del(&slot->mm_node);
474 free = 1;
475 }
476 spin_unlock(&khugepaged_mm_lock);
477
478 if (free) {
479 mm_flags_clear(MMF_VM_HUGEPAGE, mm);
480 mm_slot_free(mm_slot_cache, slot);
481 mmdrop(mm);
482 } else if (slot) {
483 /*
484 * This is required to serialize against
485 * hpage_collapse_test_exit() (which is guaranteed to run
486 * under mmap sem read mode). Stop here (after we return all
487 * pagetables will be destroyed) until khugepaged has finished
488 * working on the pagetables under the mmap_lock.
489 */
490 mmap_write_lock(mm);
491 mmap_write_unlock(mm);
492 }
493 }
494
release_pte_folio(struct folio * folio)495 static void release_pte_folio(struct folio *folio)
496 {
497 node_stat_mod_folio(folio,
498 NR_ISOLATED_ANON + folio_is_file_lru(folio),
499 -folio_nr_pages(folio));
500 folio_unlock(folio);
501 folio_putback_lru(folio);
502 }
503
release_pte_pages(pte_t * pte,pte_t * _pte,struct list_head * compound_pagelist)504 static void release_pte_pages(pte_t *pte, pte_t *_pte,
505 struct list_head *compound_pagelist)
506 {
507 struct folio *folio, *tmp;
508
509 while (--_pte >= pte) {
510 pte_t pteval = ptep_get(_pte);
511 unsigned long pfn;
512
513 if (pte_none(pteval))
514 continue;
515 VM_WARN_ON_ONCE(!pte_present(pteval));
516 pfn = pte_pfn(pteval);
517 if (is_zero_pfn(pfn))
518 continue;
519 folio = pfn_folio(pfn);
520 if (folio_test_large(folio))
521 continue;
522 release_pte_folio(folio);
523 }
524
525 list_for_each_entry_safe(folio, tmp, compound_pagelist, lru) {
526 list_del(&folio->lru);
527 release_pte_folio(folio);
528 }
529 }
530
__collapse_huge_page_isolate(struct vm_area_struct * vma,unsigned long start_addr,pte_t * pte,struct collapse_control * cc,struct list_head * compound_pagelist)531 static enum scan_result __collapse_huge_page_isolate(struct vm_area_struct *vma,
532 unsigned long start_addr, pte_t *pte, struct collapse_control *cc,
533 struct list_head *compound_pagelist)
534 {
535 struct page *page = NULL;
536 struct folio *folio = NULL;
537 unsigned long addr = start_addr;
538 pte_t *_pte;
539 int none_or_zero = 0, shared = 0, referenced = 0;
540 enum scan_result result = SCAN_FAIL;
541
542 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
543 _pte++, addr += PAGE_SIZE) {
544 pte_t pteval = ptep_get(_pte);
545 if (pte_none_or_zero(pteval)) {
546 ++none_or_zero;
547 if (!userfaultfd_armed(vma) &&
548 (!cc->is_khugepaged ||
549 none_or_zero <= khugepaged_max_ptes_none)) {
550 continue;
551 } else {
552 result = SCAN_EXCEED_NONE_PTE;
553 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
554 goto out;
555 }
556 }
557 if (!pte_present(pteval)) {
558 result = SCAN_PTE_NON_PRESENT;
559 goto out;
560 }
561 if (pte_uffd_wp(pteval)) {
562 result = SCAN_PTE_UFFD_WP;
563 goto out;
564 }
565 page = vm_normal_page(vma, addr, pteval);
566 if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
567 result = SCAN_PAGE_NULL;
568 goto out;
569 }
570
571 folio = page_folio(page);
572 VM_BUG_ON_FOLIO(!folio_test_anon(folio), folio);
573
574 /* See hpage_collapse_scan_pmd(). */
575 if (folio_maybe_mapped_shared(folio)) {
576 ++shared;
577 if (cc->is_khugepaged &&
578 shared > khugepaged_max_ptes_shared) {
579 result = SCAN_EXCEED_SHARED_PTE;
580 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
581 goto out;
582 }
583 }
584
585 if (folio_test_large(folio)) {
586 struct folio *f;
587
588 /*
589 * Check if we have dealt with the compound page
590 * already
591 */
592 list_for_each_entry(f, compound_pagelist, lru) {
593 if (folio == f)
594 goto next;
595 }
596 }
597
598 /*
599 * We can do it before folio_isolate_lru because the
600 * folio can't be freed from under us. NOTE: PG_lock
601 * is needed to serialize against split_huge_page
602 * when invoked from the VM.
603 */
604 if (!folio_trylock(folio)) {
605 result = SCAN_PAGE_LOCK;
606 goto out;
607 }
608
609 /*
610 * Check if the page has any GUP (or other external) pins.
611 *
612 * The page table that maps the page has been already unlinked
613 * from the page table tree and this process cannot get
614 * an additional pin on the page.
615 *
616 * New pins can come later if the page is shared across fork,
617 * but not from this process. The other process cannot write to
618 * the page, only trigger CoW.
619 */
620 if (folio_expected_ref_count(folio) != folio_ref_count(folio)) {
621 folio_unlock(folio);
622 result = SCAN_PAGE_COUNT;
623 goto out;
624 }
625
626 /*
627 * Isolate the page to avoid collapsing an hugepage
628 * currently in use by the VM.
629 */
630 if (!folio_isolate_lru(folio)) {
631 folio_unlock(folio);
632 result = SCAN_DEL_PAGE_LRU;
633 goto out;
634 }
635 node_stat_mod_folio(folio,
636 NR_ISOLATED_ANON + folio_is_file_lru(folio),
637 folio_nr_pages(folio));
638 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
639 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
640
641 if (folio_test_large(folio))
642 list_add_tail(&folio->lru, compound_pagelist);
643 next:
644 /*
645 * If collapse was initiated by khugepaged, check that there is
646 * enough young pte to justify collapsing the page
647 */
648 if (cc->is_khugepaged &&
649 (pte_young(pteval) || folio_test_young(folio) ||
650 folio_test_referenced(folio) ||
651 mmu_notifier_test_young(vma->vm_mm, addr)))
652 referenced++;
653 }
654
655 if (unlikely(cc->is_khugepaged && !referenced)) {
656 result = SCAN_LACK_REFERENCED_PAGE;
657 } else {
658 result = SCAN_SUCCEED;
659 trace_mm_collapse_huge_page_isolate(folio, none_or_zero,
660 referenced, result);
661 return result;
662 }
663 out:
664 release_pte_pages(pte, _pte, compound_pagelist);
665 trace_mm_collapse_huge_page_isolate(folio, none_or_zero,
666 referenced, result);
667 return result;
668 }
669
__collapse_huge_page_copy_succeeded(pte_t * pte,struct vm_area_struct * vma,unsigned long address,spinlock_t * ptl,struct list_head * compound_pagelist)670 static void __collapse_huge_page_copy_succeeded(pte_t *pte,
671 struct vm_area_struct *vma,
672 unsigned long address,
673 spinlock_t *ptl,
674 struct list_head *compound_pagelist)
675 {
676 unsigned long end = address + HPAGE_PMD_SIZE;
677 struct folio *src, *tmp;
678 pte_t pteval;
679 pte_t *_pte;
680 unsigned int nr_ptes;
681
682 for (_pte = pte; _pte < pte + HPAGE_PMD_NR; _pte += nr_ptes,
683 address += nr_ptes * PAGE_SIZE) {
684 nr_ptes = 1;
685 pteval = ptep_get(_pte);
686 if (pte_none_or_zero(pteval)) {
687 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
688 if (pte_none(pteval))
689 continue;
690 /*
691 * ptl mostly unnecessary.
692 */
693 spin_lock(ptl);
694 ptep_clear(vma->vm_mm, address, _pte);
695 spin_unlock(ptl);
696 ksm_might_unmap_zero_page(vma->vm_mm, pteval);
697 } else {
698 struct page *src_page = pte_page(pteval);
699
700 src = page_folio(src_page);
701
702 if (folio_test_large(src)) {
703 unsigned int max_nr_ptes = (end - address) >> PAGE_SHIFT;
704
705 nr_ptes = folio_pte_batch(src, _pte, pteval, max_nr_ptes);
706 } else {
707 release_pte_folio(src);
708 }
709
710 /*
711 * ptl mostly unnecessary, but preempt has to
712 * be disabled to update the per-cpu stats
713 * inside folio_remove_rmap_pte().
714 */
715 spin_lock(ptl);
716 clear_ptes(vma->vm_mm, address, _pte, nr_ptes);
717 folio_remove_rmap_ptes(src, src_page, nr_ptes, vma);
718 spin_unlock(ptl);
719 free_swap_cache(src);
720 folio_put_refs(src, nr_ptes);
721 }
722 }
723
724 list_for_each_entry_safe(src, tmp, compound_pagelist, lru) {
725 list_del(&src->lru);
726 node_stat_sub_folio(src, NR_ISOLATED_ANON +
727 folio_is_file_lru(src));
728 folio_unlock(src);
729 free_swap_cache(src);
730 folio_putback_lru(src);
731 }
732 }
733
__collapse_huge_page_copy_failed(pte_t * pte,pmd_t * pmd,pmd_t orig_pmd,struct vm_area_struct * vma,struct list_head * compound_pagelist)734 static void __collapse_huge_page_copy_failed(pte_t *pte,
735 pmd_t *pmd,
736 pmd_t orig_pmd,
737 struct vm_area_struct *vma,
738 struct list_head *compound_pagelist)
739 {
740 spinlock_t *pmd_ptl;
741
742 /*
743 * Re-establish the PMD to point to the original page table
744 * entry. Restoring PMD needs to be done prior to releasing
745 * pages. Since pages are still isolated and locked here,
746 * acquiring anon_vma_lock_write is unnecessary.
747 */
748 pmd_ptl = pmd_lock(vma->vm_mm, pmd);
749 pmd_populate(vma->vm_mm, pmd, pmd_pgtable(orig_pmd));
750 spin_unlock(pmd_ptl);
751 /*
752 * Release both raw and compound pages isolated
753 * in __collapse_huge_page_isolate.
754 */
755 release_pte_pages(pte, pte + HPAGE_PMD_NR, compound_pagelist);
756 }
757
758 /*
759 * __collapse_huge_page_copy - attempts to copy memory contents from raw
760 * pages to a hugepage. Cleans up the raw pages if copying succeeds;
761 * otherwise restores the original page table and releases isolated raw pages.
762 * Returns SCAN_SUCCEED if copying succeeds, otherwise returns SCAN_COPY_MC.
763 *
764 * @pte: starting of the PTEs to copy from
765 * @folio: the new hugepage to copy contents to
766 * @pmd: pointer to the new hugepage's PMD
767 * @orig_pmd: the original raw pages' PMD
768 * @vma: the original raw pages' virtual memory area
769 * @address: starting address to copy
770 * @ptl: lock on raw pages' PTEs
771 * @compound_pagelist: list that stores compound pages
772 */
__collapse_huge_page_copy(pte_t * pte,struct folio * folio,pmd_t * pmd,pmd_t orig_pmd,struct vm_area_struct * vma,unsigned long address,spinlock_t * ptl,struct list_head * compound_pagelist)773 static enum scan_result __collapse_huge_page_copy(pte_t *pte, struct folio *folio,
774 pmd_t *pmd, pmd_t orig_pmd, struct vm_area_struct *vma,
775 unsigned long address, spinlock_t *ptl,
776 struct list_head *compound_pagelist)
777 {
778 unsigned int i;
779 enum scan_result result = SCAN_SUCCEED;
780
781 /*
782 * Copying pages' contents is subject to memory poison at any iteration.
783 */
784 for (i = 0; i < HPAGE_PMD_NR; i++) {
785 pte_t pteval = ptep_get(pte + i);
786 struct page *page = folio_page(folio, i);
787 unsigned long src_addr = address + i * PAGE_SIZE;
788 struct page *src_page;
789
790 if (pte_none_or_zero(pteval)) {
791 clear_user_highpage(page, src_addr);
792 continue;
793 }
794 src_page = pte_page(pteval);
795 if (copy_mc_user_highpage(page, src_page, src_addr, vma) > 0) {
796 result = SCAN_COPY_MC;
797 break;
798 }
799 }
800
801 if (likely(result == SCAN_SUCCEED))
802 __collapse_huge_page_copy_succeeded(pte, vma, address, ptl,
803 compound_pagelist);
804 else
805 __collapse_huge_page_copy_failed(pte, pmd, orig_pmd, vma,
806 compound_pagelist);
807
808 return result;
809 }
810
khugepaged_alloc_sleep(void)811 static void khugepaged_alloc_sleep(void)
812 {
813 DEFINE_WAIT(wait);
814
815 add_wait_queue(&khugepaged_wait, &wait);
816 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
817 schedule_timeout(msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
818 remove_wait_queue(&khugepaged_wait, &wait);
819 }
820
821 static struct collapse_control khugepaged_collapse_control = {
822 .is_khugepaged = true,
823 };
824
hpage_collapse_scan_abort(int nid,struct collapse_control * cc)825 static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
826 {
827 int i;
828
829 /*
830 * If node_reclaim_mode is disabled, then no extra effort is made to
831 * allocate memory locally.
832 */
833 if (!node_reclaim_enabled())
834 return false;
835
836 /* If there is a count for this node already, it must be acceptable */
837 if (cc->node_load[nid])
838 return false;
839
840 for (i = 0; i < MAX_NUMNODES; i++) {
841 if (!cc->node_load[i])
842 continue;
843 if (node_distance(nid, i) > node_reclaim_distance)
844 return true;
845 }
846 return false;
847 }
848
849 #define khugepaged_defrag() \
850 (transparent_hugepage_flags & \
851 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
852
853 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
alloc_hugepage_khugepaged_gfpmask(void)854 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
855 {
856 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
857 }
858
859 #ifdef CONFIG_NUMA
hpage_collapse_find_target_node(struct collapse_control * cc)860 static int hpage_collapse_find_target_node(struct collapse_control *cc)
861 {
862 int nid, target_node = 0, max_value = 0;
863
864 /* find first node with max normal pages hit */
865 for (nid = 0; nid < MAX_NUMNODES; nid++)
866 if (cc->node_load[nid] > max_value) {
867 max_value = cc->node_load[nid];
868 target_node = nid;
869 }
870
871 for_each_online_node(nid) {
872 if (max_value == cc->node_load[nid])
873 node_set(nid, cc->alloc_nmask);
874 }
875
876 return target_node;
877 }
878 #else
hpage_collapse_find_target_node(struct collapse_control * cc)879 static int hpage_collapse_find_target_node(struct collapse_control *cc)
880 {
881 return 0;
882 }
883 #endif
884
885 /*
886 * If mmap_lock temporarily dropped, revalidate vma
887 * before taking mmap_lock.
888 * Returns enum scan_result value.
889 */
890
hugepage_vma_revalidate(struct mm_struct * mm,unsigned long address,bool expect_anon,struct vm_area_struct ** vmap,struct collapse_control * cc)891 static enum scan_result hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
892 bool expect_anon, struct vm_area_struct **vmap, struct collapse_control *cc)
893 {
894 struct vm_area_struct *vma;
895 enum tva_type type = cc->is_khugepaged ? TVA_KHUGEPAGED :
896 TVA_FORCED_COLLAPSE;
897
898 if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
899 return SCAN_ANY_PROCESS;
900
901 *vmap = vma = find_vma(mm, address);
902 if (!vma)
903 return SCAN_VMA_NULL;
904
905 if (!thp_vma_suitable_order(vma, address, PMD_ORDER))
906 return SCAN_ADDRESS_RANGE;
907 if (!thp_vma_allowable_order(vma, vma->vm_flags, type, PMD_ORDER))
908 return SCAN_VMA_CHECK;
909 /*
910 * Anon VMA expected, the address may be unmapped then
911 * remapped to file after khugepaged reaquired the mmap_lock.
912 *
913 * thp_vma_allowable_order may return true for qualified file
914 * vmas.
915 */
916 if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
917 return SCAN_PAGE_ANON;
918 return SCAN_SUCCEED;
919 }
920
check_pmd_state(pmd_t * pmd)921 static inline enum scan_result check_pmd_state(pmd_t *pmd)
922 {
923 pmd_t pmde = pmdp_get_lockless(pmd);
924
925 if (pmd_none(pmde))
926 return SCAN_NO_PTE_TABLE;
927
928 /*
929 * The folio may be under migration when khugepaged is trying to
930 * collapse it. Migration success or failure will eventually end
931 * up with a present PMD mapping a folio again.
932 */
933 if (pmd_is_migration_entry(pmde))
934 return SCAN_PMD_MAPPED;
935 if (!pmd_present(pmde))
936 return SCAN_NO_PTE_TABLE;
937 if (pmd_trans_huge(pmde))
938 return SCAN_PMD_MAPPED;
939 if (pmd_bad(pmde))
940 return SCAN_NO_PTE_TABLE;
941 return SCAN_SUCCEED;
942 }
943
find_pmd_or_thp_or_none(struct mm_struct * mm,unsigned long address,pmd_t ** pmd)944 static enum scan_result find_pmd_or_thp_or_none(struct mm_struct *mm,
945 unsigned long address, pmd_t **pmd)
946 {
947 *pmd = mm_find_pmd(mm, address);
948 if (!*pmd)
949 return SCAN_NO_PTE_TABLE;
950
951 return check_pmd_state(*pmd);
952 }
953
check_pmd_still_valid(struct mm_struct * mm,unsigned long address,pmd_t * pmd)954 static enum scan_result check_pmd_still_valid(struct mm_struct *mm,
955 unsigned long address, pmd_t *pmd)
956 {
957 pmd_t *new_pmd;
958 enum scan_result result = find_pmd_or_thp_or_none(mm, address, &new_pmd);
959
960 if (result != SCAN_SUCCEED)
961 return result;
962 if (new_pmd != pmd)
963 return SCAN_FAIL;
964 return SCAN_SUCCEED;
965 }
966
967 /*
968 * Bring missing pages in from swap, to complete THP collapse.
969 * Only done if hpage_collapse_scan_pmd believes it is worthwhile.
970 *
971 * Called and returns without pte mapped or spinlocks held.
972 * Returns result: if not SCAN_SUCCEED, mmap_lock has been released.
973 */
__collapse_huge_page_swapin(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long start_addr,pmd_t * pmd,int referenced)974 static enum scan_result __collapse_huge_page_swapin(struct mm_struct *mm,
975 struct vm_area_struct *vma, unsigned long start_addr, pmd_t *pmd,
976 int referenced)
977 {
978 int swapped_in = 0;
979 vm_fault_t ret = 0;
980 unsigned long addr, end = start_addr + (HPAGE_PMD_NR * PAGE_SIZE);
981 enum scan_result result;
982 pte_t *pte = NULL;
983 spinlock_t *ptl;
984
985 for (addr = start_addr; addr < end; addr += PAGE_SIZE) {
986 struct vm_fault vmf = {
987 .vma = vma,
988 .address = addr,
989 .pgoff = linear_page_index(vma, addr),
990 .flags = FAULT_FLAG_ALLOW_RETRY,
991 .pmd = pmd,
992 };
993
994 if (!pte++) {
995 /*
996 * Here the ptl is only used to check pte_same() in
997 * do_swap_page(), so readonly version is enough.
998 */
999 pte = pte_offset_map_ro_nolock(mm, pmd, addr, &ptl);
1000 if (!pte) {
1001 mmap_read_unlock(mm);
1002 result = SCAN_NO_PTE_TABLE;
1003 goto out;
1004 }
1005 }
1006
1007 vmf.orig_pte = ptep_get_lockless(pte);
1008 if (pte_none(vmf.orig_pte) ||
1009 pte_present(vmf.orig_pte))
1010 continue;
1011
1012 vmf.pte = pte;
1013 vmf.ptl = ptl;
1014 ret = do_swap_page(&vmf);
1015 /* Which unmaps pte (after perhaps re-checking the entry) */
1016 pte = NULL;
1017
1018 /*
1019 * do_swap_page returns VM_FAULT_RETRY with released mmap_lock.
1020 * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because
1021 * we do not retry here and swap entry will remain in pagetable
1022 * resulting in later failure.
1023 */
1024 if (ret & VM_FAULT_RETRY) {
1025 /* Likely, but not guaranteed, that page lock failed */
1026 result = SCAN_PAGE_LOCK;
1027 goto out;
1028 }
1029 if (ret & VM_FAULT_ERROR) {
1030 mmap_read_unlock(mm);
1031 result = SCAN_FAIL;
1032 goto out;
1033 }
1034 swapped_in++;
1035 }
1036
1037 if (pte)
1038 pte_unmap(pte);
1039
1040 /* Drain LRU cache to remove extra pin on the swapped in pages */
1041 if (swapped_in)
1042 lru_add_drain();
1043
1044 result = SCAN_SUCCEED;
1045 out:
1046 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, result);
1047 return result;
1048 }
1049
alloc_charge_folio(struct folio ** foliop,struct mm_struct * mm,struct collapse_control * cc)1050 static enum scan_result alloc_charge_folio(struct folio **foliop, struct mm_struct *mm,
1051 struct collapse_control *cc)
1052 {
1053 gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
1054 GFP_TRANSHUGE);
1055 int node = hpage_collapse_find_target_node(cc);
1056 struct folio *folio;
1057
1058 folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, &cc->alloc_nmask);
1059 if (!folio) {
1060 *foliop = NULL;
1061 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
1062 return SCAN_ALLOC_HUGE_PAGE_FAIL;
1063 }
1064
1065 count_vm_event(THP_COLLAPSE_ALLOC);
1066 if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
1067 folio_put(folio);
1068 *foliop = NULL;
1069 return SCAN_CGROUP_CHARGE_FAIL;
1070 }
1071
1072 count_memcg_folio_events(folio, THP_COLLAPSE_ALLOC, 1);
1073
1074 *foliop = folio;
1075 return SCAN_SUCCEED;
1076 }
1077
collapse_huge_page(struct mm_struct * mm,unsigned long address,int referenced,int unmapped,struct collapse_control * cc)1078 static enum scan_result collapse_huge_page(struct mm_struct *mm, unsigned long address,
1079 int referenced, int unmapped, struct collapse_control *cc)
1080 {
1081 LIST_HEAD(compound_pagelist);
1082 pmd_t *pmd, _pmd;
1083 pte_t *pte;
1084 pgtable_t pgtable;
1085 struct folio *folio;
1086 spinlock_t *pmd_ptl, *pte_ptl;
1087 enum scan_result result = SCAN_FAIL;
1088 struct vm_area_struct *vma;
1089 struct mmu_notifier_range range;
1090
1091 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1092
1093 /*
1094 * Before allocating the hugepage, release the mmap_lock read lock.
1095 * The allocation can take potentially a long time if it involves
1096 * sync compaction, and we do not need to hold the mmap_lock during
1097 * that. We will recheck the vma after taking it again in write mode.
1098 */
1099 mmap_read_unlock(mm);
1100
1101 result = alloc_charge_folio(&folio, mm, cc);
1102 if (result != SCAN_SUCCEED)
1103 goto out_nolock;
1104
1105 mmap_read_lock(mm);
1106 result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
1107 if (result != SCAN_SUCCEED) {
1108 mmap_read_unlock(mm);
1109 goto out_nolock;
1110 }
1111
1112 result = find_pmd_or_thp_or_none(mm, address, &pmd);
1113 if (result != SCAN_SUCCEED) {
1114 mmap_read_unlock(mm);
1115 goto out_nolock;
1116 }
1117
1118 if (unmapped) {
1119 /*
1120 * __collapse_huge_page_swapin will return with mmap_lock
1121 * released when it fails. So we jump out_nolock directly in
1122 * that case. Continuing to collapse causes inconsistency.
1123 */
1124 result = __collapse_huge_page_swapin(mm, vma, address, pmd,
1125 referenced);
1126 if (result != SCAN_SUCCEED)
1127 goto out_nolock;
1128 }
1129
1130 mmap_read_unlock(mm);
1131 /*
1132 * Prevent all access to pagetables with the exception of
1133 * gup_fast later handled by the ptep_clear_flush and the VM
1134 * handled by the anon_vma lock + PG_lock.
1135 *
1136 * UFFDIO_MOVE is prevented to race as well thanks to the
1137 * mmap_lock.
1138 */
1139 mmap_write_lock(mm);
1140 result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
1141 if (result != SCAN_SUCCEED)
1142 goto out_up_write;
1143 /* check if the pmd is still valid */
1144 vma_start_write(vma);
1145 result = check_pmd_still_valid(mm, address, pmd);
1146 if (result != SCAN_SUCCEED)
1147 goto out_up_write;
1148
1149 anon_vma_lock_write(vma->anon_vma);
1150
1151 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address,
1152 address + HPAGE_PMD_SIZE);
1153 mmu_notifier_invalidate_range_start(&range);
1154
1155 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1156 /*
1157 * This removes any huge TLB entry from the CPU so we won't allow
1158 * huge and small TLB entries for the same virtual address to
1159 * avoid the risk of CPU bugs in that area.
1160 *
1161 * Parallel GUP-fast is fine since GUP-fast will back off when
1162 * it detects PMD is changed.
1163 */
1164 _pmd = pmdp_collapse_flush(vma, address, pmd);
1165 spin_unlock(pmd_ptl);
1166 mmu_notifier_invalidate_range_end(&range);
1167 tlb_remove_table_sync_one();
1168
1169 pte = pte_offset_map_lock(mm, &_pmd, address, &pte_ptl);
1170 if (pte) {
1171 result = __collapse_huge_page_isolate(vma, address, pte, cc,
1172 &compound_pagelist);
1173 spin_unlock(pte_ptl);
1174 } else {
1175 result = SCAN_NO_PTE_TABLE;
1176 }
1177
1178 if (unlikely(result != SCAN_SUCCEED)) {
1179 if (pte)
1180 pte_unmap(pte);
1181 spin_lock(pmd_ptl);
1182 BUG_ON(!pmd_none(*pmd));
1183 /*
1184 * We can only use set_pmd_at when establishing
1185 * hugepmds and never for establishing regular pmds that
1186 * points to regular pagetables. Use pmd_populate for that
1187 */
1188 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1189 spin_unlock(pmd_ptl);
1190 anon_vma_unlock_write(vma->anon_vma);
1191 goto out_up_write;
1192 }
1193
1194 /*
1195 * All pages are isolated and locked so anon_vma rmap
1196 * can't run anymore.
1197 */
1198 anon_vma_unlock_write(vma->anon_vma);
1199
1200 result = __collapse_huge_page_copy(pte, folio, pmd, _pmd,
1201 vma, address, pte_ptl,
1202 &compound_pagelist);
1203 pte_unmap(pte);
1204 if (unlikely(result != SCAN_SUCCEED))
1205 goto out_up_write;
1206
1207 /*
1208 * The smp_wmb() inside __folio_mark_uptodate() ensures the
1209 * copy_huge_page writes become visible before the set_pmd_at()
1210 * write.
1211 */
1212 __folio_mark_uptodate(folio);
1213 pgtable = pmd_pgtable(_pmd);
1214
1215 spin_lock(pmd_ptl);
1216 BUG_ON(!pmd_none(*pmd));
1217 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1218 map_anon_folio_pmd_nopf(folio, pmd, vma, address);
1219 spin_unlock(pmd_ptl);
1220
1221 folio = NULL;
1222
1223 result = SCAN_SUCCEED;
1224 out_up_write:
1225 mmap_write_unlock(mm);
1226 out_nolock:
1227 if (folio)
1228 folio_put(folio);
1229 trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result);
1230 return result;
1231 }
1232
hpage_collapse_scan_pmd(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long start_addr,bool * mmap_locked,struct collapse_control * cc)1233 static enum scan_result hpage_collapse_scan_pmd(struct mm_struct *mm,
1234 struct vm_area_struct *vma, unsigned long start_addr, bool *mmap_locked,
1235 struct collapse_control *cc)
1236 {
1237 pmd_t *pmd;
1238 pte_t *pte, *_pte;
1239 int none_or_zero = 0, shared = 0, referenced = 0;
1240 enum scan_result result = SCAN_FAIL;
1241 struct page *page = NULL;
1242 struct folio *folio = NULL;
1243 unsigned long addr;
1244 spinlock_t *ptl;
1245 int node = NUMA_NO_NODE, unmapped = 0;
1246
1247 VM_BUG_ON(start_addr & ~HPAGE_PMD_MASK);
1248
1249 result = find_pmd_or_thp_or_none(mm, start_addr, &pmd);
1250 if (result != SCAN_SUCCEED)
1251 goto out;
1252
1253 memset(cc->node_load, 0, sizeof(cc->node_load));
1254 nodes_clear(cc->alloc_nmask);
1255 pte = pte_offset_map_lock(mm, pmd, start_addr, &ptl);
1256 if (!pte) {
1257 result = SCAN_NO_PTE_TABLE;
1258 goto out;
1259 }
1260
1261 for (addr = start_addr, _pte = pte; _pte < pte + HPAGE_PMD_NR;
1262 _pte++, addr += PAGE_SIZE) {
1263 pte_t pteval = ptep_get(_pte);
1264 if (pte_none_or_zero(pteval)) {
1265 ++none_or_zero;
1266 if (!userfaultfd_armed(vma) &&
1267 (!cc->is_khugepaged ||
1268 none_or_zero <= khugepaged_max_ptes_none)) {
1269 continue;
1270 } else {
1271 result = SCAN_EXCEED_NONE_PTE;
1272 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
1273 goto out_unmap;
1274 }
1275 }
1276 if (!pte_present(pteval)) {
1277 ++unmapped;
1278 if (!cc->is_khugepaged ||
1279 unmapped <= khugepaged_max_ptes_swap) {
1280 /*
1281 * Always be strict with uffd-wp
1282 * enabled swap entries. Please see
1283 * comment below for pte_uffd_wp().
1284 */
1285 if (pte_swp_uffd_wp_any(pteval)) {
1286 result = SCAN_PTE_UFFD_WP;
1287 goto out_unmap;
1288 }
1289 continue;
1290 } else {
1291 result = SCAN_EXCEED_SWAP_PTE;
1292 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
1293 goto out_unmap;
1294 }
1295 }
1296 if (pte_uffd_wp(pteval)) {
1297 /*
1298 * Don't collapse the page if any of the small
1299 * PTEs are armed with uffd write protection.
1300 * Here we can also mark the new huge pmd as
1301 * write protected if any of the small ones is
1302 * marked but that could bring unknown
1303 * userfault messages that falls outside of
1304 * the registered range. So, just be simple.
1305 */
1306 result = SCAN_PTE_UFFD_WP;
1307 goto out_unmap;
1308 }
1309
1310 page = vm_normal_page(vma, addr, pteval);
1311 if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
1312 result = SCAN_PAGE_NULL;
1313 goto out_unmap;
1314 }
1315 folio = page_folio(page);
1316
1317 if (!folio_test_anon(folio)) {
1318 result = SCAN_PAGE_ANON;
1319 goto out_unmap;
1320 }
1321
1322 /*
1323 * We treat a single page as shared if any part of the THP
1324 * is shared.
1325 */
1326 if (folio_maybe_mapped_shared(folio)) {
1327 ++shared;
1328 if (cc->is_khugepaged &&
1329 shared > khugepaged_max_ptes_shared) {
1330 result = SCAN_EXCEED_SHARED_PTE;
1331 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
1332 goto out_unmap;
1333 }
1334 }
1335
1336 /*
1337 * Record which node the original page is from and save this
1338 * information to cc->node_load[].
1339 * Khugepaged will allocate hugepage from the node has the max
1340 * hit record.
1341 */
1342 node = folio_nid(folio);
1343 if (hpage_collapse_scan_abort(node, cc)) {
1344 result = SCAN_SCAN_ABORT;
1345 goto out_unmap;
1346 }
1347 cc->node_load[node]++;
1348 if (!folio_test_lru(folio)) {
1349 result = SCAN_PAGE_LRU;
1350 goto out_unmap;
1351 }
1352 if (folio_test_locked(folio)) {
1353 result = SCAN_PAGE_LOCK;
1354 goto out_unmap;
1355 }
1356
1357 /*
1358 * Check if the page has any GUP (or other external) pins.
1359 *
1360 * Here the check may be racy:
1361 * it may see folio_mapcount() > folio_ref_count().
1362 * But such case is ephemeral we could always retry collapse
1363 * later. However it may report false positive if the page
1364 * has excessive GUP pins (i.e. 512). Anyway the same check
1365 * will be done again later the risk seems low.
1366 */
1367 if (folio_expected_ref_count(folio) != folio_ref_count(folio)) {
1368 result = SCAN_PAGE_COUNT;
1369 goto out_unmap;
1370 }
1371
1372 /*
1373 * If collapse was initiated by khugepaged, check that there is
1374 * enough young pte to justify collapsing the page
1375 */
1376 if (cc->is_khugepaged &&
1377 (pte_young(pteval) || folio_test_young(folio) ||
1378 folio_test_referenced(folio) ||
1379 mmu_notifier_test_young(vma->vm_mm, addr)))
1380 referenced++;
1381 }
1382 if (cc->is_khugepaged &&
1383 (!referenced ||
1384 (unmapped && referenced < HPAGE_PMD_NR / 2))) {
1385 result = SCAN_LACK_REFERENCED_PAGE;
1386 } else {
1387 result = SCAN_SUCCEED;
1388 }
1389 out_unmap:
1390 pte_unmap_unlock(pte, ptl);
1391 if (result == SCAN_SUCCEED) {
1392 result = collapse_huge_page(mm, start_addr, referenced,
1393 unmapped, cc);
1394 /* collapse_huge_page will return with the mmap_lock released */
1395 *mmap_locked = false;
1396 }
1397 out:
1398 trace_mm_khugepaged_scan_pmd(mm, folio, referenced,
1399 none_or_zero, result, unmapped);
1400 return result;
1401 }
1402
collect_mm_slot(struct mm_slot * slot)1403 static void collect_mm_slot(struct mm_slot *slot)
1404 {
1405 struct mm_struct *mm = slot->mm;
1406
1407 lockdep_assert_held(&khugepaged_mm_lock);
1408
1409 if (hpage_collapse_test_exit(mm)) {
1410 /* free mm_slot */
1411 hash_del(&slot->hash);
1412 list_del(&slot->mm_node);
1413
1414 /*
1415 * Not strictly needed because the mm exited already.
1416 *
1417 * mm_flags_clear(MMF_VM_HUGEPAGE, mm);
1418 */
1419
1420 /* khugepaged_mm_lock actually not necessary for the below */
1421 mm_slot_free(mm_slot_cache, slot);
1422 mmdrop(mm);
1423 }
1424 }
1425
1426 /* folio must be locked, and mmap_lock must be held */
set_huge_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp,struct folio * folio,struct page * page)1427 static enum scan_result set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
1428 pmd_t *pmdp, struct folio *folio, struct page *page)
1429 {
1430 struct mm_struct *mm = vma->vm_mm;
1431 struct vm_fault vmf = {
1432 .vma = vma,
1433 .address = addr,
1434 .flags = 0,
1435 };
1436 pgd_t *pgdp;
1437 p4d_t *p4dp;
1438 pud_t *pudp;
1439
1440 mmap_assert_locked(vma->vm_mm);
1441
1442 if (!pmdp) {
1443 pgdp = pgd_offset(mm, addr);
1444 p4dp = p4d_alloc(mm, pgdp, addr);
1445 if (!p4dp)
1446 return SCAN_FAIL;
1447 pudp = pud_alloc(mm, p4dp, addr);
1448 if (!pudp)
1449 return SCAN_FAIL;
1450 pmdp = pmd_alloc(mm, pudp, addr);
1451 if (!pmdp)
1452 return SCAN_FAIL;
1453 }
1454
1455 vmf.pmd = pmdp;
1456 if (do_set_pmd(&vmf, folio, page))
1457 return SCAN_FAIL;
1458
1459 folio_get(folio);
1460 return SCAN_SUCCEED;
1461 }
1462
try_collapse_pte_mapped_thp(struct mm_struct * mm,unsigned long addr,bool install_pmd)1463 static enum scan_result try_collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
1464 bool install_pmd)
1465 {
1466 enum scan_result result = SCAN_FAIL;
1467 int nr_mapped_ptes = 0;
1468 unsigned int nr_batch_ptes;
1469 struct mmu_notifier_range range;
1470 bool notified = false;
1471 unsigned long haddr = addr & HPAGE_PMD_MASK;
1472 unsigned long end = haddr + HPAGE_PMD_SIZE;
1473 struct vm_area_struct *vma = vma_lookup(mm, haddr);
1474 struct folio *folio;
1475 pte_t *start_pte, *pte;
1476 pmd_t *pmd, pgt_pmd;
1477 spinlock_t *pml = NULL, *ptl;
1478 int i;
1479
1480 mmap_assert_locked(mm);
1481
1482 /* First check VMA found, in case page tables are being torn down */
1483 if (!vma || !vma->vm_file ||
1484 !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
1485 return SCAN_VMA_CHECK;
1486
1487 /* Fast check before locking page if already PMD-mapped */
1488 result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
1489 if (result == SCAN_PMD_MAPPED)
1490 return result;
1491
1492 /*
1493 * If we are here, we've succeeded in replacing all the native pages
1494 * in the page cache with a single hugepage. If a mm were to fault-in
1495 * this memory (mapped by a suitably aligned VMA), we'd get the hugepage
1496 * and map it by a PMD, regardless of sysfs THP settings. As such, let's
1497 * analogously elide sysfs THP settings here and force collapse.
1498 */
1499 if (!thp_vma_allowable_order(vma, vma->vm_flags, TVA_FORCED_COLLAPSE, PMD_ORDER))
1500 return SCAN_VMA_CHECK;
1501
1502 /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
1503 if (userfaultfd_wp(vma))
1504 return SCAN_PTE_UFFD_WP;
1505
1506 folio = filemap_lock_folio(vma->vm_file->f_mapping,
1507 linear_page_index(vma, haddr));
1508 if (IS_ERR(folio))
1509 return SCAN_PAGE_NULL;
1510
1511 if (folio_order(folio) != HPAGE_PMD_ORDER) {
1512 result = SCAN_PAGE_COMPOUND;
1513 goto drop_folio;
1514 }
1515
1516 result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
1517 switch (result) {
1518 case SCAN_SUCCEED:
1519 break;
1520 case SCAN_NO_PTE_TABLE:
1521 /*
1522 * All pte entries have been removed and pmd cleared.
1523 * Skip all the pte checks and just update the pmd mapping.
1524 */
1525 goto maybe_install_pmd;
1526 default:
1527 goto drop_folio;
1528 }
1529
1530 result = SCAN_FAIL;
1531 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1532 if (!start_pte) /* mmap_lock + page lock should prevent this */
1533 goto drop_folio;
1534
1535 /* step 1: check all mapped PTEs are to the right huge page */
1536 for (i = 0, addr = haddr, pte = start_pte;
1537 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1538 struct page *page;
1539 pte_t ptent = ptep_get(pte);
1540
1541 /* empty pte, skip */
1542 if (pte_none(ptent))
1543 continue;
1544
1545 /* page swapped out, abort */
1546 if (!pte_present(ptent)) {
1547 result = SCAN_PTE_NON_PRESENT;
1548 goto abort;
1549 }
1550
1551 page = vm_normal_page(vma, addr, ptent);
1552 if (WARN_ON_ONCE(page && is_zone_device_page(page)))
1553 page = NULL;
1554 /*
1555 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1556 * page table, but the new page will not be a subpage of hpage.
1557 */
1558 if (folio_page(folio, i) != page)
1559 goto abort;
1560 }
1561
1562 pte_unmap_unlock(start_pte, ptl);
1563 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
1564 haddr, haddr + HPAGE_PMD_SIZE);
1565 mmu_notifier_invalidate_range_start(&range);
1566 notified = true;
1567
1568 /*
1569 * pmd_lock covers a wider range than ptl, and (if split from mm's
1570 * page_table_lock) ptl nests inside pml. The less time we hold pml,
1571 * the better; but userfaultfd's mfill_atomic_pte() on a private VMA
1572 * inserts a valid as-if-COWed PTE without even looking up page cache.
1573 * So page lock of folio does not protect from it, so we must not drop
1574 * ptl before pgt_pmd is removed, so uffd private needs pml taken now.
1575 */
1576 if (userfaultfd_armed(vma) && !(vma->vm_flags & VM_SHARED))
1577 pml = pmd_lock(mm, pmd);
1578
1579 start_pte = pte_offset_map_rw_nolock(mm, pmd, haddr, &pgt_pmd, &ptl);
1580 if (!start_pte) /* mmap_lock + page lock should prevent this */
1581 goto abort;
1582 if (!pml)
1583 spin_lock(ptl);
1584 else if (ptl != pml)
1585 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
1586
1587 if (unlikely(!pmd_same(pgt_pmd, pmdp_get_lockless(pmd))))
1588 goto abort;
1589
1590 /* step 2: clear page table and adjust rmap */
1591 for (i = 0, addr = haddr, pte = start_pte; i < HPAGE_PMD_NR;
1592 i += nr_batch_ptes, addr += nr_batch_ptes * PAGE_SIZE,
1593 pte += nr_batch_ptes) {
1594 unsigned int max_nr_batch_ptes = (end - addr) >> PAGE_SHIFT;
1595 struct page *page;
1596 pte_t ptent = ptep_get(pte);
1597
1598 nr_batch_ptes = 1;
1599
1600 if (pte_none(ptent))
1601 continue;
1602 /*
1603 * We dropped ptl after the first scan, to do the mmu_notifier:
1604 * page lock stops more PTEs of the folio being faulted in, but
1605 * does not stop write faults COWing anon copies from existing
1606 * PTEs; and does not stop those being swapped out or migrated.
1607 */
1608 if (!pte_present(ptent)) {
1609 result = SCAN_PTE_NON_PRESENT;
1610 goto abort;
1611 }
1612 page = vm_normal_page(vma, addr, ptent);
1613
1614 if (folio_page(folio, i) != page)
1615 goto abort;
1616
1617 nr_batch_ptes = folio_pte_batch(folio, pte, ptent, max_nr_batch_ptes);
1618
1619 /*
1620 * Must clear entry, or a racing truncate may re-remove it.
1621 * TLB flush can be left until pmdp_collapse_flush() does it.
1622 * PTE dirty? Shmem page is already dirty; file is read-only.
1623 */
1624 clear_ptes(mm, addr, pte, nr_batch_ptes);
1625 folio_remove_rmap_ptes(folio, page, nr_batch_ptes, vma);
1626 nr_mapped_ptes += nr_batch_ptes;
1627 }
1628
1629 if (!pml)
1630 spin_unlock(ptl);
1631
1632 /* step 3: set proper refcount and mm_counters. */
1633 if (nr_mapped_ptes) {
1634 folio_ref_sub(folio, nr_mapped_ptes);
1635 add_mm_counter(mm, mm_counter_file(folio), -nr_mapped_ptes);
1636 }
1637
1638 /* step 4: remove empty page table */
1639 if (!pml) {
1640 pml = pmd_lock(mm, pmd);
1641 if (ptl != pml) {
1642 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
1643 if (unlikely(!pmd_same(pgt_pmd, pmdp_get_lockless(pmd)))) {
1644 flush_tlb_mm(mm);
1645 goto unlock;
1646 }
1647 }
1648 }
1649 pgt_pmd = pmdp_collapse_flush(vma, haddr, pmd);
1650 pmdp_get_lockless_sync();
1651 pte_unmap_unlock(start_pte, ptl);
1652 if (ptl != pml)
1653 spin_unlock(pml);
1654
1655 mmu_notifier_invalidate_range_end(&range);
1656
1657 mm_dec_nr_ptes(mm);
1658 page_table_check_pte_clear_range(mm, haddr, pgt_pmd);
1659 pte_free_defer(mm, pmd_pgtable(pgt_pmd));
1660
1661 maybe_install_pmd:
1662 /* step 5: install pmd entry */
1663 result = install_pmd
1664 ? set_huge_pmd(vma, haddr, pmd, folio, &folio->page)
1665 : SCAN_SUCCEED;
1666 goto drop_folio;
1667 abort:
1668 if (nr_mapped_ptes) {
1669 flush_tlb_mm(mm);
1670 folio_ref_sub(folio, nr_mapped_ptes);
1671 add_mm_counter(mm, mm_counter_file(folio), -nr_mapped_ptes);
1672 }
1673 unlock:
1674 if (start_pte)
1675 pte_unmap_unlock(start_pte, ptl);
1676 if (pml && pml != ptl)
1677 spin_unlock(pml);
1678 if (notified)
1679 mmu_notifier_invalidate_range_end(&range);
1680 drop_folio:
1681 folio_unlock(folio);
1682 folio_put(folio);
1683 return result;
1684 }
1685
1686 /**
1687 * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1688 * address haddr.
1689 *
1690 * @mm: process address space where collapse happens
1691 * @addr: THP collapse address
1692 * @install_pmd: If a huge PMD should be installed
1693 *
1694 * This function checks whether all the PTEs in the PMD are pointing to the
1695 * right THP. If so, retract the page table so the THP can refault in with
1696 * as pmd-mapped. Possibly install a huge PMD mapping the THP.
1697 */
collapse_pte_mapped_thp(struct mm_struct * mm,unsigned long addr,bool install_pmd)1698 void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
1699 bool install_pmd)
1700 {
1701 try_collapse_pte_mapped_thp(mm, addr, install_pmd);
1702 }
1703
1704 /* Can we retract page tables for this file-backed VMA? */
file_backed_vma_is_retractable(struct vm_area_struct * vma)1705 static bool file_backed_vma_is_retractable(struct vm_area_struct *vma)
1706 {
1707 /*
1708 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1709 * got written to. These VMAs are likely not worth removing
1710 * page tables from, as PMD-mapping is likely to be split later.
1711 */
1712 if (READ_ONCE(vma->anon_vma))
1713 return false;
1714
1715 /*
1716 * When a vma is registered with uffd-wp, we cannot recycle
1717 * the page table because there may be pte markers installed.
1718 * Other vmas can still have the same file mapped hugely, but
1719 * skip this one: it will always be mapped in small page size
1720 * for uffd-wp registered ranges.
1721 */
1722 if (userfaultfd_wp(vma))
1723 return false;
1724
1725 /*
1726 * If the VMA contains guard regions then we can't collapse it.
1727 *
1728 * This is set atomically on guard marker installation under mmap/VMA
1729 * read lock, and here we may not hold any VMA or mmap lock at all.
1730 *
1731 * This is therefore serialised on the PTE page table lock, which is
1732 * obtained on guard region installation after the flag is set, so this
1733 * check being performed under this lock excludes races.
1734 */
1735 if (vma_test_atomic_flag(vma, VMA_MAYBE_GUARD_BIT))
1736 return false;
1737
1738 return true;
1739 }
1740
retract_page_tables(struct address_space * mapping,pgoff_t pgoff)1741 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1742 {
1743 struct vm_area_struct *vma;
1744
1745 i_mmap_lock_read(mapping);
1746 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1747 struct mmu_notifier_range range;
1748 struct mm_struct *mm;
1749 unsigned long addr;
1750 pmd_t *pmd, pgt_pmd;
1751 spinlock_t *pml;
1752 spinlock_t *ptl;
1753 bool success = false;
1754
1755 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1756 if (addr & ~HPAGE_PMD_MASK ||
1757 vma->vm_end < addr + HPAGE_PMD_SIZE)
1758 continue;
1759
1760 mm = vma->vm_mm;
1761 if (find_pmd_or_thp_or_none(mm, addr, &pmd) != SCAN_SUCCEED)
1762 continue;
1763
1764 if (hpage_collapse_test_exit(mm))
1765 continue;
1766
1767 if (!file_backed_vma_is_retractable(vma))
1768 continue;
1769
1770 /* PTEs were notified when unmapped; but now for the PMD? */
1771 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
1772 addr, addr + HPAGE_PMD_SIZE);
1773 mmu_notifier_invalidate_range_start(&range);
1774
1775 pml = pmd_lock(mm, pmd);
1776 /*
1777 * The lock of new_folio is still held, we will be blocked in
1778 * the page fault path, which prevents the pte entries from
1779 * being set again. So even though the old empty PTE page may be
1780 * concurrently freed and a new PTE page is filled into the pmd
1781 * entry, it is still empty and can be removed.
1782 *
1783 * So here we only need to recheck if the state of pmd entry
1784 * still meets our requirements, rather than checking pmd_same()
1785 * like elsewhere.
1786 */
1787 if (check_pmd_state(pmd) != SCAN_SUCCEED)
1788 goto drop_pml;
1789 ptl = pte_lockptr(mm, pmd);
1790 if (ptl != pml)
1791 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
1792
1793 /*
1794 * Huge page lock is still held, so normally the page table must
1795 * remain empty; and we have already skipped anon_vma and
1796 * userfaultfd_wp() vmas. But since the mmap_lock is not held,
1797 * it is still possible for a racing userfaultfd_ioctl() or
1798 * madvise() to have inserted ptes or markers. Now that we hold
1799 * ptlock, repeating the retractable checks protects us from
1800 * races against the prior checks.
1801 */
1802 if (likely(file_backed_vma_is_retractable(vma))) {
1803 pgt_pmd = pmdp_collapse_flush(vma, addr, pmd);
1804 pmdp_get_lockless_sync();
1805 success = true;
1806 }
1807
1808 if (ptl != pml)
1809 spin_unlock(ptl);
1810 drop_pml:
1811 spin_unlock(pml);
1812
1813 mmu_notifier_invalidate_range_end(&range);
1814
1815 if (success) {
1816 mm_dec_nr_ptes(mm);
1817 page_table_check_pte_clear_range(mm, addr, pgt_pmd);
1818 pte_free_defer(mm, pmd_pgtable(pgt_pmd));
1819 }
1820 }
1821 i_mmap_unlock_read(mapping);
1822 }
1823
1824 /**
1825 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1826 *
1827 * @mm: process address space where collapse happens
1828 * @addr: virtual collapse start address
1829 * @file: file that collapse on
1830 * @start: collapse start address
1831 * @cc: collapse context and scratchpad
1832 *
1833 * Basic scheme is simple, details are more complex:
1834 * - allocate and lock a new huge page;
1835 * - scan page cache, locking old pages
1836 * + swap/gup in pages if necessary;
1837 * - copy data to new page
1838 * - handle shmem holes
1839 * + re-validate that holes weren't filled by someone else
1840 * + check for userfaultfd
1841 * - finalize updates to the page cache;
1842 * - if replacing succeeds:
1843 * + unlock huge page;
1844 * + free old pages;
1845 * - if replacing failed;
1846 * + unlock old pages
1847 * + unlock and free huge page;
1848 */
collapse_file(struct mm_struct * mm,unsigned long addr,struct file * file,pgoff_t start,struct collapse_control * cc)1849 static enum scan_result collapse_file(struct mm_struct *mm, unsigned long addr,
1850 struct file *file, pgoff_t start, struct collapse_control *cc)
1851 {
1852 struct address_space *mapping = file->f_mapping;
1853 struct page *dst;
1854 struct folio *folio, *tmp, *new_folio;
1855 pgoff_t index = 0, end = start + HPAGE_PMD_NR;
1856 LIST_HEAD(pagelist);
1857 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
1858 enum scan_result result = SCAN_SUCCEED;
1859 int nr_none = 0;
1860 bool is_shmem = shmem_file(file);
1861
1862 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1863 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1864
1865 result = alloc_charge_folio(&new_folio, mm, cc);
1866 if (result != SCAN_SUCCEED)
1867 goto out;
1868
1869 mapping_set_update(&xas, mapping);
1870
1871 __folio_set_locked(new_folio);
1872 if (is_shmem)
1873 __folio_set_swapbacked(new_folio);
1874 new_folio->index = start;
1875 new_folio->mapping = mapping;
1876
1877 /*
1878 * Ensure we have slots for all the pages in the range. This is
1879 * almost certainly a no-op because most of the pages must be present
1880 */
1881 do {
1882 xas_lock_irq(&xas);
1883 xas_create_range(&xas);
1884 if (!xas_error(&xas))
1885 break;
1886 xas_unlock_irq(&xas);
1887 if (!xas_nomem(&xas, GFP_KERNEL)) {
1888 result = SCAN_FAIL;
1889 goto rollback;
1890 }
1891 } while (1);
1892
1893 for (index = start; index < end;) {
1894 xas_set(&xas, index);
1895 folio = xas_load(&xas);
1896
1897 VM_BUG_ON(index != xas.xa_index);
1898 if (is_shmem) {
1899 if (!folio) {
1900 /*
1901 * Stop if extent has been truncated or
1902 * hole-punched, and is now completely
1903 * empty.
1904 */
1905 if (index == start) {
1906 if (!xas_next_entry(&xas, end - 1)) {
1907 result = SCAN_TRUNCATED;
1908 goto xa_locked;
1909 }
1910 }
1911 nr_none++;
1912 index++;
1913 continue;
1914 }
1915
1916 if (xa_is_value(folio) || !folio_test_uptodate(folio)) {
1917 xas_unlock_irq(&xas);
1918 /* swap in or instantiate fallocated page */
1919 if (shmem_get_folio(mapping->host, index, 0,
1920 &folio, SGP_NOALLOC)) {
1921 result = SCAN_FAIL;
1922 goto xa_unlocked;
1923 }
1924 /* drain lru cache to help folio_isolate_lru() */
1925 lru_add_drain();
1926 } else if (folio_trylock(folio)) {
1927 folio_get(folio);
1928 xas_unlock_irq(&xas);
1929 } else {
1930 result = SCAN_PAGE_LOCK;
1931 goto xa_locked;
1932 }
1933 } else { /* !is_shmem */
1934 if (!folio || xa_is_value(folio)) {
1935 xas_unlock_irq(&xas);
1936 page_cache_sync_readahead(mapping, &file->f_ra,
1937 file, index,
1938 end - index);
1939 /* drain lru cache to help folio_isolate_lru() */
1940 lru_add_drain();
1941 folio = filemap_lock_folio(mapping, index);
1942 if (IS_ERR(folio)) {
1943 result = SCAN_FAIL;
1944 goto xa_unlocked;
1945 }
1946 } else if (folio_test_dirty(folio)) {
1947 /*
1948 * khugepaged only works on read-only fd,
1949 * so this page is dirty because it hasn't
1950 * been flushed since first write. There
1951 * won't be new dirty pages.
1952 *
1953 * Trigger async flush here and hope the
1954 * writeback is done when khugepaged
1955 * revisits this page.
1956 *
1957 * This is a one-off situation. We are not
1958 * forcing writeback in loop.
1959 */
1960 xas_unlock_irq(&xas);
1961 filemap_flush(mapping);
1962 result = SCAN_PAGE_DIRTY_OR_WRITEBACK;
1963 goto xa_unlocked;
1964 } else if (folio_test_writeback(folio)) {
1965 xas_unlock_irq(&xas);
1966 result = SCAN_PAGE_DIRTY_OR_WRITEBACK;
1967 goto xa_unlocked;
1968 } else if (folio_trylock(folio)) {
1969 folio_get(folio);
1970 xas_unlock_irq(&xas);
1971 } else {
1972 result = SCAN_PAGE_LOCK;
1973 goto xa_locked;
1974 }
1975 }
1976
1977 /*
1978 * The folio must be locked, so we can drop the i_pages lock
1979 * without racing with truncate.
1980 */
1981 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1982
1983 /* make sure the folio is up to date */
1984 if (unlikely(!folio_test_uptodate(folio))) {
1985 result = SCAN_FAIL;
1986 goto out_unlock;
1987 }
1988
1989 /*
1990 * If file was truncated then extended, or hole-punched, before
1991 * we locked the first folio, then a THP might be there already.
1992 * This will be discovered on the first iteration.
1993 */
1994 if (folio_order(folio) == HPAGE_PMD_ORDER &&
1995 folio->index == start) {
1996 /* Maybe PMD-mapped */
1997 result = SCAN_PTE_MAPPED_HUGEPAGE;
1998 goto out_unlock;
1999 }
2000
2001 if (folio_mapping(folio) != mapping) {
2002 result = SCAN_TRUNCATED;
2003 goto out_unlock;
2004 }
2005
2006 if (!is_shmem && (folio_test_dirty(folio) ||
2007 folio_test_writeback(folio))) {
2008 /*
2009 * khugepaged only works on read-only fd, so this
2010 * folio is dirty because it hasn't been flushed
2011 * since first write.
2012 */
2013 result = SCAN_PAGE_DIRTY_OR_WRITEBACK;
2014 goto out_unlock;
2015 }
2016
2017 if (!folio_isolate_lru(folio)) {
2018 result = SCAN_DEL_PAGE_LRU;
2019 goto out_unlock;
2020 }
2021
2022 if (!filemap_release_folio(folio, GFP_KERNEL)) {
2023 result = SCAN_PAGE_HAS_PRIVATE;
2024 folio_putback_lru(folio);
2025 goto out_unlock;
2026 }
2027
2028 if (folio_mapped(folio))
2029 try_to_unmap(folio,
2030 TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
2031
2032 xas_lock_irq(&xas);
2033
2034 VM_BUG_ON_FOLIO(folio != xa_load(xas.xa, index), folio);
2035
2036 /*
2037 * We control 2 + nr_pages references to the folio:
2038 * - we hold a pin on it;
2039 * - nr_pages reference from page cache;
2040 * - one from lru_isolate_folio;
2041 * If those are the only references, then any new usage
2042 * of the folio will have to fetch it from the page
2043 * cache. That requires locking the folio to handle
2044 * truncate, so any new usage will be blocked until we
2045 * unlock folio after collapse/during rollback.
2046 */
2047 if (folio_ref_count(folio) != 2 + folio_nr_pages(folio)) {
2048 result = SCAN_PAGE_COUNT;
2049 xas_unlock_irq(&xas);
2050 folio_putback_lru(folio);
2051 goto out_unlock;
2052 }
2053
2054 /*
2055 * Accumulate the folios that are being collapsed.
2056 */
2057 list_add_tail(&folio->lru, &pagelist);
2058 index += folio_nr_pages(folio);
2059 continue;
2060 out_unlock:
2061 folio_unlock(folio);
2062 folio_put(folio);
2063 goto xa_unlocked;
2064 }
2065
2066 if (!is_shmem) {
2067 filemap_nr_thps_inc(mapping);
2068 /*
2069 * Paired with the fence in do_dentry_open() -> get_write_access()
2070 * to ensure i_writecount is up to date and the update to nr_thps
2071 * is visible. Ensures the page cache will be truncated if the
2072 * file is opened writable.
2073 */
2074 smp_mb();
2075 if (inode_is_open_for_write(mapping->host)) {
2076 result = SCAN_FAIL;
2077 filemap_nr_thps_dec(mapping);
2078 }
2079 }
2080
2081 xa_locked:
2082 xas_unlock_irq(&xas);
2083 xa_unlocked:
2084
2085 /*
2086 * If collapse is successful, flush must be done now before copying.
2087 * If collapse is unsuccessful, does flush actually need to be done?
2088 * Do it anyway, to clear the state.
2089 */
2090 try_to_unmap_flush();
2091
2092 if (result == SCAN_SUCCEED && nr_none &&
2093 !shmem_charge(mapping->host, nr_none))
2094 result = SCAN_FAIL;
2095 if (result != SCAN_SUCCEED) {
2096 nr_none = 0;
2097 goto rollback;
2098 }
2099
2100 /*
2101 * The old folios are locked, so they won't change anymore.
2102 */
2103 index = start;
2104 dst = folio_page(new_folio, 0);
2105 list_for_each_entry(folio, &pagelist, lru) {
2106 int i, nr_pages = folio_nr_pages(folio);
2107
2108 while (index < folio->index) {
2109 clear_highpage(dst);
2110 index++;
2111 dst++;
2112 }
2113
2114 for (i = 0; i < nr_pages; i++) {
2115 if (copy_mc_highpage(dst, folio_page(folio, i)) > 0) {
2116 result = SCAN_COPY_MC;
2117 goto rollback;
2118 }
2119 index++;
2120 dst++;
2121 }
2122 }
2123 while (index < end) {
2124 clear_highpage(dst);
2125 index++;
2126 dst++;
2127 }
2128
2129 if (nr_none) {
2130 struct vm_area_struct *vma;
2131 int nr_none_check = 0;
2132
2133 i_mmap_lock_read(mapping);
2134 xas_lock_irq(&xas);
2135
2136 xas_set(&xas, start);
2137 for (index = start; index < end; index++) {
2138 if (!xas_next(&xas)) {
2139 xas_store(&xas, XA_RETRY_ENTRY);
2140 if (xas_error(&xas)) {
2141 result = SCAN_STORE_FAILED;
2142 goto immap_locked;
2143 }
2144 nr_none_check++;
2145 }
2146 }
2147
2148 if (nr_none != nr_none_check) {
2149 result = SCAN_PAGE_FILLED;
2150 goto immap_locked;
2151 }
2152
2153 /*
2154 * If userspace observed a missing page in a VMA with
2155 * a MODE_MISSING userfaultfd, then it might expect a
2156 * UFFD_EVENT_PAGEFAULT for that page. If so, we need to
2157 * roll back to avoid suppressing such an event. Since
2158 * wp/minor userfaultfds don't give userspace any
2159 * guarantees that the kernel doesn't fill a missing
2160 * page with a zero page, so they don't matter here.
2161 *
2162 * Any userfaultfds registered after this point will
2163 * not be able to observe any missing pages due to the
2164 * previously inserted retry entries.
2165 */
2166 vma_interval_tree_foreach(vma, &mapping->i_mmap, start, end) {
2167 if (userfaultfd_missing(vma)) {
2168 result = SCAN_EXCEED_NONE_PTE;
2169 goto immap_locked;
2170 }
2171 }
2172
2173 immap_locked:
2174 i_mmap_unlock_read(mapping);
2175 if (result != SCAN_SUCCEED) {
2176 xas_set(&xas, start);
2177 for (index = start; index < end; index++) {
2178 if (xas_next(&xas) == XA_RETRY_ENTRY)
2179 xas_store(&xas, NULL);
2180 }
2181
2182 xas_unlock_irq(&xas);
2183 goto rollback;
2184 }
2185 } else {
2186 xas_lock_irq(&xas);
2187 }
2188
2189 if (is_shmem) {
2190 lruvec_stat_mod_folio(new_folio, NR_SHMEM, HPAGE_PMD_NR);
2191 lruvec_stat_mod_folio(new_folio, NR_SHMEM_THPS, HPAGE_PMD_NR);
2192 } else {
2193 lruvec_stat_mod_folio(new_folio, NR_FILE_THPS, HPAGE_PMD_NR);
2194 }
2195 lruvec_stat_mod_folio(new_folio, NR_FILE_PAGES, HPAGE_PMD_NR);
2196
2197 /*
2198 * Mark new_folio as uptodate before inserting it into the
2199 * page cache so that it isn't mistaken for an fallocated but
2200 * unwritten page.
2201 */
2202 folio_mark_uptodate(new_folio);
2203 folio_ref_add(new_folio, HPAGE_PMD_NR - 1);
2204
2205 if (is_shmem)
2206 folio_mark_dirty(new_folio);
2207 folio_add_lru(new_folio);
2208
2209 /* Join all the small entries into a single multi-index entry. */
2210 xas_set_order(&xas, start, HPAGE_PMD_ORDER);
2211 xas_store(&xas, new_folio);
2212 WARN_ON_ONCE(xas_error(&xas));
2213 xas_unlock_irq(&xas);
2214
2215 /*
2216 * Remove pte page tables, so we can re-fault the page as huge.
2217 * If MADV_COLLAPSE, adjust result to call try_collapse_pte_mapped_thp().
2218 */
2219 retract_page_tables(mapping, start);
2220 if (cc && !cc->is_khugepaged)
2221 result = SCAN_PTE_MAPPED_HUGEPAGE;
2222 folio_unlock(new_folio);
2223
2224 /*
2225 * The collapse has succeeded, so free the old folios.
2226 */
2227 list_for_each_entry_safe(folio, tmp, &pagelist, lru) {
2228 list_del(&folio->lru);
2229 lruvec_stat_mod_folio(folio, NR_FILE_PAGES,
2230 -folio_nr_pages(folio));
2231 if (is_shmem)
2232 lruvec_stat_mod_folio(folio, NR_SHMEM,
2233 -folio_nr_pages(folio));
2234 folio->mapping = NULL;
2235 folio_clear_active(folio);
2236 folio_clear_unevictable(folio);
2237 folio_unlock(folio);
2238 folio_put_refs(folio, 2 + folio_nr_pages(folio));
2239 }
2240
2241 goto out;
2242
2243 rollback:
2244 /* Something went wrong: roll back page cache changes */
2245 if (nr_none) {
2246 xas_lock_irq(&xas);
2247 mapping->nrpages -= nr_none;
2248 xas_unlock_irq(&xas);
2249 shmem_uncharge(mapping->host, nr_none);
2250 }
2251
2252 list_for_each_entry_safe(folio, tmp, &pagelist, lru) {
2253 list_del(&folio->lru);
2254 folio_unlock(folio);
2255 folio_putback_lru(folio);
2256 folio_put(folio);
2257 }
2258 /*
2259 * Undo the updates of filemap_nr_thps_inc for non-SHMEM
2260 * file only. This undo is not needed unless failure is
2261 * due to SCAN_COPY_MC.
2262 */
2263 if (!is_shmem && result == SCAN_COPY_MC) {
2264 filemap_nr_thps_dec(mapping);
2265 /*
2266 * Paired with the fence in do_dentry_open() -> get_write_access()
2267 * to ensure the update to nr_thps is visible.
2268 */
2269 smp_mb();
2270 }
2271
2272 new_folio->mapping = NULL;
2273
2274 folio_unlock(new_folio);
2275 folio_put(new_folio);
2276 out:
2277 VM_BUG_ON(!list_empty(&pagelist));
2278 trace_mm_khugepaged_collapse_file(mm, new_folio, index, addr, is_shmem, file, HPAGE_PMD_NR, result);
2279 return result;
2280 }
2281
hpage_collapse_scan_file(struct mm_struct * mm,unsigned long addr,struct file * file,pgoff_t start,struct collapse_control * cc)2282 static enum scan_result hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2283 struct file *file, pgoff_t start, struct collapse_control *cc)
2284 {
2285 struct folio *folio = NULL;
2286 struct address_space *mapping = file->f_mapping;
2287 XA_STATE(xas, &mapping->i_pages, start);
2288 int present, swap;
2289 int node = NUMA_NO_NODE;
2290 enum scan_result result = SCAN_SUCCEED;
2291
2292 present = 0;
2293 swap = 0;
2294 memset(cc->node_load, 0, sizeof(cc->node_load));
2295 nodes_clear(cc->alloc_nmask);
2296 rcu_read_lock();
2297 xas_for_each(&xas, folio, start + HPAGE_PMD_NR - 1) {
2298 if (xas_retry(&xas, folio))
2299 continue;
2300
2301 if (xa_is_value(folio)) {
2302 swap += 1 << xas_get_order(&xas);
2303 if (cc->is_khugepaged &&
2304 swap > khugepaged_max_ptes_swap) {
2305 result = SCAN_EXCEED_SWAP_PTE;
2306 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
2307 break;
2308 }
2309 continue;
2310 }
2311
2312 if (!folio_try_get(folio)) {
2313 xas_reset(&xas);
2314 continue;
2315 }
2316
2317 if (unlikely(folio != xas_reload(&xas))) {
2318 folio_put(folio);
2319 xas_reset(&xas);
2320 continue;
2321 }
2322
2323 if (folio_order(folio) == HPAGE_PMD_ORDER &&
2324 folio->index == start) {
2325 /* Maybe PMD-mapped */
2326 result = SCAN_PTE_MAPPED_HUGEPAGE;
2327 /*
2328 * For SCAN_PTE_MAPPED_HUGEPAGE, further processing
2329 * by the caller won't touch the page cache, and so
2330 * it's safe to skip LRU and refcount checks before
2331 * returning.
2332 */
2333 folio_put(folio);
2334 break;
2335 }
2336
2337 node = folio_nid(folio);
2338 if (hpage_collapse_scan_abort(node, cc)) {
2339 result = SCAN_SCAN_ABORT;
2340 folio_put(folio);
2341 break;
2342 }
2343 cc->node_load[node]++;
2344
2345 if (!folio_test_lru(folio)) {
2346 result = SCAN_PAGE_LRU;
2347 folio_put(folio);
2348 break;
2349 }
2350
2351 if (folio_expected_ref_count(folio) + 1 != folio_ref_count(folio)) {
2352 result = SCAN_PAGE_COUNT;
2353 folio_put(folio);
2354 break;
2355 }
2356
2357 /*
2358 * We probably should check if the folio is referenced
2359 * here, but nobody would transfer pte_young() to
2360 * folio_test_referenced() for us. And rmap walk here
2361 * is just too costly...
2362 */
2363
2364 present += folio_nr_pages(folio);
2365 folio_put(folio);
2366
2367 if (need_resched()) {
2368 xas_pause(&xas);
2369 cond_resched_rcu();
2370 }
2371 }
2372 rcu_read_unlock();
2373
2374 if (result == SCAN_SUCCEED) {
2375 if (cc->is_khugepaged &&
2376 present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2377 result = SCAN_EXCEED_NONE_PTE;
2378 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
2379 } else {
2380 result = collapse_file(mm, addr, file, start, cc);
2381 }
2382 }
2383
2384 trace_mm_khugepaged_scan_file(mm, folio, file, present, swap, result);
2385 return result;
2386 }
2387
khugepaged_scan_mm_slot(unsigned int pages,enum scan_result * result,struct collapse_control * cc)2388 static unsigned int khugepaged_scan_mm_slot(unsigned int pages, enum scan_result *result,
2389 struct collapse_control *cc)
2390 __releases(&khugepaged_mm_lock)
2391 __acquires(&khugepaged_mm_lock)
2392 {
2393 struct vma_iterator vmi;
2394 struct mm_slot *slot;
2395 struct mm_struct *mm;
2396 struct vm_area_struct *vma;
2397 int progress = 0;
2398
2399 VM_BUG_ON(!pages);
2400 lockdep_assert_held(&khugepaged_mm_lock);
2401 *result = SCAN_FAIL;
2402
2403 if (khugepaged_scan.mm_slot) {
2404 slot = khugepaged_scan.mm_slot;
2405 } else {
2406 slot = list_first_entry(&khugepaged_scan.mm_head,
2407 struct mm_slot, mm_node);
2408 khugepaged_scan.address = 0;
2409 khugepaged_scan.mm_slot = slot;
2410 }
2411 spin_unlock(&khugepaged_mm_lock);
2412
2413 mm = slot->mm;
2414 /*
2415 * Don't wait for semaphore (to avoid long wait times). Just move to
2416 * the next mm on the list.
2417 */
2418 vma = NULL;
2419 if (unlikely(!mmap_read_trylock(mm)))
2420 goto breakouterloop_mmap_lock;
2421
2422 progress++;
2423 if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
2424 goto breakouterloop;
2425
2426 vma_iter_init(&vmi, mm, khugepaged_scan.address);
2427 for_each_vma(vmi, vma) {
2428 unsigned long hstart, hend;
2429
2430 cond_resched();
2431 if (unlikely(hpage_collapse_test_exit_or_disable(mm))) {
2432 progress++;
2433 break;
2434 }
2435 if (!thp_vma_allowable_order(vma, vma->vm_flags, TVA_KHUGEPAGED, PMD_ORDER)) {
2436 progress++;
2437 continue;
2438 }
2439 hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE);
2440 hend = round_down(vma->vm_end, HPAGE_PMD_SIZE);
2441 if (khugepaged_scan.address > hend) {
2442 progress++;
2443 continue;
2444 }
2445 if (khugepaged_scan.address < hstart)
2446 khugepaged_scan.address = hstart;
2447 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2448
2449 while (khugepaged_scan.address < hend) {
2450 bool mmap_locked = true;
2451
2452 cond_resched();
2453 if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
2454 goto breakouterloop;
2455
2456 VM_BUG_ON(khugepaged_scan.address < hstart ||
2457 khugepaged_scan.address + HPAGE_PMD_SIZE >
2458 hend);
2459 if (!vma_is_anonymous(vma)) {
2460 struct file *file = get_file(vma->vm_file);
2461 pgoff_t pgoff = linear_page_index(vma,
2462 khugepaged_scan.address);
2463
2464 mmap_read_unlock(mm);
2465 mmap_locked = false;
2466 *result = hpage_collapse_scan_file(mm,
2467 khugepaged_scan.address, file, pgoff, cc);
2468 fput(file);
2469 if (*result == SCAN_PTE_MAPPED_HUGEPAGE) {
2470 mmap_read_lock(mm);
2471 if (hpage_collapse_test_exit_or_disable(mm))
2472 goto breakouterloop;
2473 *result = try_collapse_pte_mapped_thp(mm,
2474 khugepaged_scan.address, false);
2475 if (*result == SCAN_PMD_MAPPED)
2476 *result = SCAN_SUCCEED;
2477 mmap_read_unlock(mm);
2478 }
2479 } else {
2480 *result = hpage_collapse_scan_pmd(mm, vma,
2481 khugepaged_scan.address, &mmap_locked, cc);
2482 }
2483
2484 if (*result == SCAN_SUCCEED)
2485 ++khugepaged_pages_collapsed;
2486
2487 /* move to next address */
2488 khugepaged_scan.address += HPAGE_PMD_SIZE;
2489 progress += HPAGE_PMD_NR;
2490 if (!mmap_locked)
2491 /*
2492 * We released mmap_lock so break loop. Note
2493 * that we drop mmap_lock before all hugepage
2494 * allocations, so if allocation fails, we are
2495 * guaranteed to break here and report the
2496 * correct result back to caller.
2497 */
2498 goto breakouterloop_mmap_lock;
2499 if (progress >= pages)
2500 goto breakouterloop;
2501 }
2502 }
2503 breakouterloop:
2504 mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
2505 breakouterloop_mmap_lock:
2506
2507 spin_lock(&khugepaged_mm_lock);
2508 VM_BUG_ON(khugepaged_scan.mm_slot != slot);
2509 /*
2510 * Release the current mm_slot if this mm is about to die, or
2511 * if we scanned all vmas of this mm.
2512 */
2513 if (hpage_collapse_test_exit(mm) || !vma) {
2514 /*
2515 * Make sure that if mm_users is reaching zero while
2516 * khugepaged runs here, khugepaged_exit will find
2517 * mm_slot not pointing to the exiting mm.
2518 */
2519 if (!list_is_last(&slot->mm_node, &khugepaged_scan.mm_head)) {
2520 khugepaged_scan.mm_slot = list_next_entry(slot, mm_node);
2521 khugepaged_scan.address = 0;
2522 } else {
2523 khugepaged_scan.mm_slot = NULL;
2524 khugepaged_full_scans++;
2525 }
2526
2527 collect_mm_slot(slot);
2528 }
2529
2530 return progress;
2531 }
2532
khugepaged_has_work(void)2533 static int khugepaged_has_work(void)
2534 {
2535 return !list_empty(&khugepaged_scan.mm_head) && hugepage_pmd_enabled();
2536 }
2537
khugepaged_wait_event(void)2538 static int khugepaged_wait_event(void)
2539 {
2540 return !list_empty(&khugepaged_scan.mm_head) ||
2541 kthread_should_stop();
2542 }
2543
khugepaged_do_scan(struct collapse_control * cc)2544 static void khugepaged_do_scan(struct collapse_control *cc)
2545 {
2546 unsigned int progress = 0, pass_through_head = 0;
2547 unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
2548 bool wait = true;
2549 enum scan_result result = SCAN_SUCCEED;
2550
2551 lru_add_drain_all();
2552
2553 while (true) {
2554 cond_resched();
2555
2556 if (unlikely(kthread_should_stop()))
2557 break;
2558
2559 spin_lock(&khugepaged_mm_lock);
2560 if (!khugepaged_scan.mm_slot)
2561 pass_through_head++;
2562 if (khugepaged_has_work() &&
2563 pass_through_head < 2)
2564 progress += khugepaged_scan_mm_slot(pages - progress,
2565 &result, cc);
2566 else
2567 progress = pages;
2568 spin_unlock(&khugepaged_mm_lock);
2569
2570 if (progress >= pages)
2571 break;
2572
2573 if (result == SCAN_ALLOC_HUGE_PAGE_FAIL) {
2574 /*
2575 * If fail to allocate the first time, try to sleep for
2576 * a while. When hit again, cancel the scan.
2577 */
2578 if (!wait)
2579 break;
2580 wait = false;
2581 khugepaged_alloc_sleep();
2582 }
2583 }
2584 }
2585
khugepaged_should_wakeup(void)2586 static bool khugepaged_should_wakeup(void)
2587 {
2588 return kthread_should_stop() ||
2589 time_after_eq(jiffies, khugepaged_sleep_expire);
2590 }
2591
khugepaged_wait_work(void)2592 static void khugepaged_wait_work(void)
2593 {
2594 if (khugepaged_has_work()) {
2595 const unsigned long scan_sleep_jiffies =
2596 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2597
2598 if (!scan_sleep_jiffies)
2599 return;
2600
2601 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2602 wait_event_freezable_timeout(khugepaged_wait,
2603 khugepaged_should_wakeup(),
2604 scan_sleep_jiffies);
2605 return;
2606 }
2607
2608 if (hugepage_pmd_enabled())
2609 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2610 }
2611
khugepaged(void * none)2612 static int khugepaged(void *none)
2613 {
2614 struct mm_slot *slot;
2615
2616 set_freezable();
2617 set_user_nice(current, MAX_NICE);
2618
2619 while (!kthread_should_stop()) {
2620 khugepaged_do_scan(&khugepaged_collapse_control);
2621 khugepaged_wait_work();
2622 }
2623
2624 spin_lock(&khugepaged_mm_lock);
2625 slot = khugepaged_scan.mm_slot;
2626 khugepaged_scan.mm_slot = NULL;
2627 if (slot)
2628 collect_mm_slot(slot);
2629 spin_unlock(&khugepaged_mm_lock);
2630 return 0;
2631 }
2632
set_recommended_min_free_kbytes(void)2633 static void set_recommended_min_free_kbytes(void)
2634 {
2635 struct zone *zone;
2636 int nr_zones = 0;
2637 unsigned long recommended_min;
2638
2639 if (!hugepage_pmd_enabled()) {
2640 calculate_min_free_kbytes();
2641 goto update_wmarks;
2642 }
2643
2644 for_each_populated_zone(zone) {
2645 /*
2646 * We don't need to worry about fragmentation of
2647 * ZONE_MOVABLE since it only has movable pages.
2648 */
2649 if (zone_idx(zone) > gfp_zone(GFP_USER))
2650 continue;
2651
2652 nr_zones++;
2653 }
2654
2655 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2656 recommended_min = pageblock_nr_pages * nr_zones * 2;
2657
2658 /*
2659 * Make sure that on average at least two pageblocks are almost free
2660 * of another type, one for a migratetype to fall back to and a
2661 * second to avoid subsequent fallbacks of other types There are 3
2662 * MIGRATE_TYPES we care about.
2663 */
2664 recommended_min += pageblock_nr_pages * nr_zones *
2665 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2666
2667 /* don't ever allow to reserve more than 5% of the lowmem */
2668 recommended_min = min(recommended_min,
2669 (unsigned long) nr_free_buffer_pages() / 20);
2670 recommended_min <<= (PAGE_SHIFT-10);
2671
2672 if (recommended_min > min_free_kbytes) {
2673 if (user_min_free_kbytes >= 0)
2674 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2675 min_free_kbytes, recommended_min);
2676
2677 min_free_kbytes = recommended_min;
2678 }
2679
2680 update_wmarks:
2681 setup_per_zone_wmarks();
2682 }
2683
start_stop_khugepaged(void)2684 int start_stop_khugepaged(void)
2685 {
2686 int err = 0;
2687
2688 mutex_lock(&khugepaged_mutex);
2689 if (hugepage_pmd_enabled()) {
2690 if (!khugepaged_thread)
2691 khugepaged_thread = kthread_run(khugepaged, NULL,
2692 "khugepaged");
2693 if (IS_ERR(khugepaged_thread)) {
2694 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2695 err = PTR_ERR(khugepaged_thread);
2696 khugepaged_thread = NULL;
2697 goto fail;
2698 }
2699
2700 if (!list_empty(&khugepaged_scan.mm_head))
2701 wake_up_interruptible(&khugepaged_wait);
2702 } else if (khugepaged_thread) {
2703 kthread_stop(khugepaged_thread);
2704 khugepaged_thread = NULL;
2705 }
2706 set_recommended_min_free_kbytes();
2707 fail:
2708 mutex_unlock(&khugepaged_mutex);
2709 return err;
2710 }
2711
khugepaged_min_free_kbytes_update(void)2712 void khugepaged_min_free_kbytes_update(void)
2713 {
2714 mutex_lock(&khugepaged_mutex);
2715 if (hugepage_pmd_enabled() && khugepaged_thread)
2716 set_recommended_min_free_kbytes();
2717 mutex_unlock(&khugepaged_mutex);
2718 }
2719
current_is_khugepaged(void)2720 bool current_is_khugepaged(void)
2721 {
2722 return kthread_func(current) == khugepaged;
2723 }
2724
madvise_collapse_errno(enum scan_result r)2725 static int madvise_collapse_errno(enum scan_result r)
2726 {
2727 /*
2728 * MADV_COLLAPSE breaks from existing madvise(2) conventions to provide
2729 * actionable feedback to caller, so they may take an appropriate
2730 * fallback measure depending on the nature of the failure.
2731 */
2732 switch (r) {
2733 case SCAN_ALLOC_HUGE_PAGE_FAIL:
2734 return -ENOMEM;
2735 case SCAN_CGROUP_CHARGE_FAIL:
2736 case SCAN_EXCEED_NONE_PTE:
2737 return -EBUSY;
2738 /* Resource temporary unavailable - trying again might succeed */
2739 case SCAN_PAGE_COUNT:
2740 case SCAN_PAGE_LOCK:
2741 case SCAN_PAGE_LRU:
2742 case SCAN_DEL_PAGE_LRU:
2743 case SCAN_PAGE_FILLED:
2744 case SCAN_PAGE_DIRTY_OR_WRITEBACK:
2745 return -EAGAIN;
2746 /*
2747 * Other: Trying again likely not to succeed / error intrinsic to
2748 * specified memory range. khugepaged likely won't be able to collapse
2749 * either.
2750 */
2751 default:
2752 return -EINVAL;
2753 }
2754 }
2755
madvise_collapse(struct vm_area_struct * vma,unsigned long start,unsigned long end,bool * lock_dropped)2756 int madvise_collapse(struct vm_area_struct *vma, unsigned long start,
2757 unsigned long end, bool *lock_dropped)
2758 {
2759 struct collapse_control *cc;
2760 struct mm_struct *mm = vma->vm_mm;
2761 unsigned long hstart, hend, addr;
2762 enum scan_result last_fail = SCAN_FAIL;
2763 int thps = 0;
2764 bool mmap_locked = true;
2765
2766 BUG_ON(vma->vm_start > start);
2767 BUG_ON(vma->vm_end < end);
2768
2769 if (!thp_vma_allowable_order(vma, vma->vm_flags, TVA_FORCED_COLLAPSE, PMD_ORDER))
2770 return -EINVAL;
2771
2772 cc = kmalloc_obj(*cc);
2773 if (!cc)
2774 return -ENOMEM;
2775 cc->is_khugepaged = false;
2776
2777 mmgrab(mm);
2778 lru_add_drain_all();
2779
2780 hstart = (start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2781 hend = end & HPAGE_PMD_MASK;
2782
2783 for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) {
2784 enum scan_result result = SCAN_FAIL;
2785 bool triggered_wb = false;
2786
2787 retry:
2788 if (!mmap_locked) {
2789 cond_resched();
2790 mmap_read_lock(mm);
2791 mmap_locked = true;
2792 result = hugepage_vma_revalidate(mm, addr, false, &vma,
2793 cc);
2794 if (result != SCAN_SUCCEED) {
2795 last_fail = result;
2796 goto out_nolock;
2797 }
2798
2799 hend = min(hend, vma->vm_end & HPAGE_PMD_MASK);
2800 }
2801 mmap_assert_locked(mm);
2802 if (!vma_is_anonymous(vma)) {
2803 struct file *file = get_file(vma->vm_file);
2804 pgoff_t pgoff = linear_page_index(vma, addr);
2805
2806 mmap_read_unlock(mm);
2807 mmap_locked = false;
2808 *lock_dropped = true;
2809 result = hpage_collapse_scan_file(mm, addr, file, pgoff,
2810 cc);
2811
2812 if (result == SCAN_PAGE_DIRTY_OR_WRITEBACK && !triggered_wb &&
2813 mapping_can_writeback(file->f_mapping)) {
2814 loff_t lstart = (loff_t)pgoff << PAGE_SHIFT;
2815 loff_t lend = lstart + HPAGE_PMD_SIZE - 1;
2816
2817 filemap_write_and_wait_range(file->f_mapping, lstart, lend);
2818 triggered_wb = true;
2819 fput(file);
2820 goto retry;
2821 }
2822 fput(file);
2823 } else {
2824 result = hpage_collapse_scan_pmd(mm, vma, addr,
2825 &mmap_locked, cc);
2826 }
2827 if (!mmap_locked)
2828 *lock_dropped = true;
2829
2830 handle_result:
2831 switch (result) {
2832 case SCAN_SUCCEED:
2833 case SCAN_PMD_MAPPED:
2834 ++thps;
2835 break;
2836 case SCAN_PTE_MAPPED_HUGEPAGE:
2837 BUG_ON(mmap_locked);
2838 mmap_read_lock(mm);
2839 result = try_collapse_pte_mapped_thp(mm, addr, true);
2840 mmap_read_unlock(mm);
2841 goto handle_result;
2842 /* Whitelisted set of results where continuing OK */
2843 case SCAN_NO_PTE_TABLE:
2844 case SCAN_PTE_NON_PRESENT:
2845 case SCAN_PTE_UFFD_WP:
2846 case SCAN_LACK_REFERENCED_PAGE:
2847 case SCAN_PAGE_NULL:
2848 case SCAN_PAGE_COUNT:
2849 case SCAN_PAGE_LOCK:
2850 case SCAN_PAGE_COMPOUND:
2851 case SCAN_PAGE_LRU:
2852 case SCAN_DEL_PAGE_LRU:
2853 last_fail = result;
2854 break;
2855 default:
2856 last_fail = result;
2857 /* Other error, exit */
2858 goto out_maybelock;
2859 }
2860 }
2861
2862 out_maybelock:
2863 /* Caller expects us to hold mmap_lock on return */
2864 if (!mmap_locked)
2865 mmap_read_lock(mm);
2866 out_nolock:
2867 mmap_assert_locked(mm);
2868 mmdrop(mm);
2869 kfree(cc);
2870
2871 return thps == ((hend - hstart) >> HPAGE_PMD_SHIFT) ? 0
2872 : madvise_collapse_errno(last_fail);
2873 }
2874