xref: /linux/mm/khugepaged.c (revision 07f0148aafe8c95a3a76cd59e9e75b4d78d1d31d)
1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 
4 #include <linux/mm.h>
5 #include <linux/sched.h>
6 #include <linux/sched/mm.h>
7 #include <linux/sched/coredump.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/mm_inline.h>
12 #include <linux/kthread.h>
13 #include <linux/khugepaged.h>
14 #include <linux/freezer.h>
15 #include <linux/mman.h>
16 #include <linux/hashtable.h>
17 #include <linux/userfaultfd_k.h>
18 #include <linux/page_idle.h>
19 #include <linux/page_table_check.h>
20 #include <linux/swapops.h>
21 #include <linux/shmem_fs.h>
22 
23 #include <asm/tlb.h>
24 #include <asm/pgalloc.h>
25 #include "internal.h"
26 #include "mm_slot.h"
27 
28 enum scan_result {
29 	SCAN_FAIL,
30 	SCAN_SUCCEED,
31 	SCAN_PMD_NULL,
32 	SCAN_PMD_NONE,
33 	SCAN_PMD_MAPPED,
34 	SCAN_EXCEED_NONE_PTE,
35 	SCAN_EXCEED_SWAP_PTE,
36 	SCAN_EXCEED_SHARED_PTE,
37 	SCAN_PTE_NON_PRESENT,
38 	SCAN_PTE_UFFD_WP,
39 	SCAN_PTE_MAPPED_HUGEPAGE,
40 	SCAN_PAGE_RO,
41 	SCAN_LACK_REFERENCED_PAGE,
42 	SCAN_PAGE_NULL,
43 	SCAN_SCAN_ABORT,
44 	SCAN_PAGE_COUNT,
45 	SCAN_PAGE_LRU,
46 	SCAN_PAGE_LOCK,
47 	SCAN_PAGE_ANON,
48 	SCAN_PAGE_COMPOUND,
49 	SCAN_ANY_PROCESS,
50 	SCAN_VMA_NULL,
51 	SCAN_VMA_CHECK,
52 	SCAN_ADDRESS_RANGE,
53 	SCAN_DEL_PAGE_LRU,
54 	SCAN_ALLOC_HUGE_PAGE_FAIL,
55 	SCAN_CGROUP_CHARGE_FAIL,
56 	SCAN_TRUNCATED,
57 	SCAN_PAGE_HAS_PRIVATE,
58 };
59 
60 #define CREATE_TRACE_POINTS
61 #include <trace/events/huge_memory.h>
62 
63 static struct task_struct *khugepaged_thread __read_mostly;
64 static DEFINE_MUTEX(khugepaged_mutex);
65 
66 /* default scan 8*512 pte (or vmas) every 30 second */
67 static unsigned int khugepaged_pages_to_scan __read_mostly;
68 static unsigned int khugepaged_pages_collapsed;
69 static unsigned int khugepaged_full_scans;
70 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
71 /* during fragmentation poll the hugepage allocator once every minute */
72 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
73 static unsigned long khugepaged_sleep_expire;
74 static DEFINE_SPINLOCK(khugepaged_mm_lock);
75 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
76 /*
77  * default collapse hugepages if there is at least one pte mapped like
78  * it would have happened if the vma was large enough during page
79  * fault.
80  *
81  * Note that these are only respected if collapse was initiated by khugepaged.
82  */
83 static unsigned int khugepaged_max_ptes_none __read_mostly;
84 static unsigned int khugepaged_max_ptes_swap __read_mostly;
85 static unsigned int khugepaged_max_ptes_shared __read_mostly;
86 
87 #define MM_SLOTS_HASH_BITS 10
88 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
89 
90 static struct kmem_cache *mm_slot_cache __read_mostly;
91 
92 #define MAX_PTE_MAPPED_THP 8
93 
94 struct collapse_control {
95 	bool is_khugepaged;
96 
97 	/* Num pages scanned per node */
98 	u32 node_load[MAX_NUMNODES];
99 
100 	/* nodemask for allocation fallback */
101 	nodemask_t alloc_nmask;
102 };
103 
104 /**
105  * struct khugepaged_mm_slot - khugepaged information per mm that is being scanned
106  * @slot: hash lookup from mm to mm_slot
107  * @nr_pte_mapped_thp: number of pte mapped THP
108  * @pte_mapped_thp: address array corresponding pte mapped THP
109  */
110 struct khugepaged_mm_slot {
111 	struct mm_slot slot;
112 
113 	/* pte-mapped THP in this mm */
114 	int nr_pte_mapped_thp;
115 	unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
116 };
117 
118 /**
119  * struct khugepaged_scan - cursor for scanning
120  * @mm_head: the head of the mm list to scan
121  * @mm_slot: the current mm_slot we are scanning
122  * @address: the next address inside that to be scanned
123  *
124  * There is only the one khugepaged_scan instance of this cursor structure.
125  */
126 struct khugepaged_scan {
127 	struct list_head mm_head;
128 	struct khugepaged_mm_slot *mm_slot;
129 	unsigned long address;
130 };
131 
132 static struct khugepaged_scan khugepaged_scan = {
133 	.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
134 };
135 
136 #ifdef CONFIG_SYSFS
137 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
138 					 struct kobj_attribute *attr,
139 					 char *buf)
140 {
141 	return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
142 }
143 
144 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
145 					  struct kobj_attribute *attr,
146 					  const char *buf, size_t count)
147 {
148 	unsigned int msecs;
149 	int err;
150 
151 	err = kstrtouint(buf, 10, &msecs);
152 	if (err)
153 		return -EINVAL;
154 
155 	khugepaged_scan_sleep_millisecs = msecs;
156 	khugepaged_sleep_expire = 0;
157 	wake_up_interruptible(&khugepaged_wait);
158 
159 	return count;
160 }
161 static struct kobj_attribute scan_sleep_millisecs_attr =
162 	__ATTR_RW(scan_sleep_millisecs);
163 
164 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
165 					  struct kobj_attribute *attr,
166 					  char *buf)
167 {
168 	return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
169 }
170 
171 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
172 					   struct kobj_attribute *attr,
173 					   const char *buf, size_t count)
174 {
175 	unsigned int msecs;
176 	int err;
177 
178 	err = kstrtouint(buf, 10, &msecs);
179 	if (err)
180 		return -EINVAL;
181 
182 	khugepaged_alloc_sleep_millisecs = msecs;
183 	khugepaged_sleep_expire = 0;
184 	wake_up_interruptible(&khugepaged_wait);
185 
186 	return count;
187 }
188 static struct kobj_attribute alloc_sleep_millisecs_attr =
189 	__ATTR_RW(alloc_sleep_millisecs);
190 
191 static ssize_t pages_to_scan_show(struct kobject *kobj,
192 				  struct kobj_attribute *attr,
193 				  char *buf)
194 {
195 	return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
196 }
197 static ssize_t pages_to_scan_store(struct kobject *kobj,
198 				   struct kobj_attribute *attr,
199 				   const char *buf, size_t count)
200 {
201 	unsigned int pages;
202 	int err;
203 
204 	err = kstrtouint(buf, 10, &pages);
205 	if (err || !pages)
206 		return -EINVAL;
207 
208 	khugepaged_pages_to_scan = pages;
209 
210 	return count;
211 }
212 static struct kobj_attribute pages_to_scan_attr =
213 	__ATTR_RW(pages_to_scan);
214 
215 static ssize_t pages_collapsed_show(struct kobject *kobj,
216 				    struct kobj_attribute *attr,
217 				    char *buf)
218 {
219 	return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
220 }
221 static struct kobj_attribute pages_collapsed_attr =
222 	__ATTR_RO(pages_collapsed);
223 
224 static ssize_t full_scans_show(struct kobject *kobj,
225 			       struct kobj_attribute *attr,
226 			       char *buf)
227 {
228 	return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
229 }
230 static struct kobj_attribute full_scans_attr =
231 	__ATTR_RO(full_scans);
232 
233 static ssize_t defrag_show(struct kobject *kobj,
234 			   struct kobj_attribute *attr, char *buf)
235 {
236 	return single_hugepage_flag_show(kobj, attr, buf,
237 					 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
238 }
239 static ssize_t defrag_store(struct kobject *kobj,
240 			    struct kobj_attribute *attr,
241 			    const char *buf, size_t count)
242 {
243 	return single_hugepage_flag_store(kobj, attr, buf, count,
244 				 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
245 }
246 static struct kobj_attribute khugepaged_defrag_attr =
247 	__ATTR_RW(defrag);
248 
249 /*
250  * max_ptes_none controls if khugepaged should collapse hugepages over
251  * any unmapped ptes in turn potentially increasing the memory
252  * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
253  * reduce the available free memory in the system as it
254  * runs. Increasing max_ptes_none will instead potentially reduce the
255  * free memory in the system during the khugepaged scan.
256  */
257 static ssize_t max_ptes_none_show(struct kobject *kobj,
258 				  struct kobj_attribute *attr,
259 				  char *buf)
260 {
261 	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
262 }
263 static ssize_t max_ptes_none_store(struct kobject *kobj,
264 				   struct kobj_attribute *attr,
265 				   const char *buf, size_t count)
266 {
267 	int err;
268 	unsigned long max_ptes_none;
269 
270 	err = kstrtoul(buf, 10, &max_ptes_none);
271 	if (err || max_ptes_none > HPAGE_PMD_NR - 1)
272 		return -EINVAL;
273 
274 	khugepaged_max_ptes_none = max_ptes_none;
275 
276 	return count;
277 }
278 static struct kobj_attribute khugepaged_max_ptes_none_attr =
279 	__ATTR_RW(max_ptes_none);
280 
281 static ssize_t max_ptes_swap_show(struct kobject *kobj,
282 				  struct kobj_attribute *attr,
283 				  char *buf)
284 {
285 	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
286 }
287 
288 static ssize_t max_ptes_swap_store(struct kobject *kobj,
289 				   struct kobj_attribute *attr,
290 				   const char *buf, size_t count)
291 {
292 	int err;
293 	unsigned long max_ptes_swap;
294 
295 	err  = kstrtoul(buf, 10, &max_ptes_swap);
296 	if (err || max_ptes_swap > HPAGE_PMD_NR - 1)
297 		return -EINVAL;
298 
299 	khugepaged_max_ptes_swap = max_ptes_swap;
300 
301 	return count;
302 }
303 
304 static struct kobj_attribute khugepaged_max_ptes_swap_attr =
305 	__ATTR_RW(max_ptes_swap);
306 
307 static ssize_t max_ptes_shared_show(struct kobject *kobj,
308 				    struct kobj_attribute *attr,
309 				    char *buf)
310 {
311 	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
312 }
313 
314 static ssize_t max_ptes_shared_store(struct kobject *kobj,
315 				     struct kobj_attribute *attr,
316 				     const char *buf, size_t count)
317 {
318 	int err;
319 	unsigned long max_ptes_shared;
320 
321 	err  = kstrtoul(buf, 10, &max_ptes_shared);
322 	if (err || max_ptes_shared > HPAGE_PMD_NR - 1)
323 		return -EINVAL;
324 
325 	khugepaged_max_ptes_shared = max_ptes_shared;
326 
327 	return count;
328 }
329 
330 static struct kobj_attribute khugepaged_max_ptes_shared_attr =
331 	__ATTR_RW(max_ptes_shared);
332 
333 static struct attribute *khugepaged_attr[] = {
334 	&khugepaged_defrag_attr.attr,
335 	&khugepaged_max_ptes_none_attr.attr,
336 	&khugepaged_max_ptes_swap_attr.attr,
337 	&khugepaged_max_ptes_shared_attr.attr,
338 	&pages_to_scan_attr.attr,
339 	&pages_collapsed_attr.attr,
340 	&full_scans_attr.attr,
341 	&scan_sleep_millisecs_attr.attr,
342 	&alloc_sleep_millisecs_attr.attr,
343 	NULL,
344 };
345 
346 struct attribute_group khugepaged_attr_group = {
347 	.attrs = khugepaged_attr,
348 	.name = "khugepaged",
349 };
350 #endif /* CONFIG_SYSFS */
351 
352 int hugepage_madvise(struct vm_area_struct *vma,
353 		     unsigned long *vm_flags, int advice)
354 {
355 	switch (advice) {
356 	case MADV_HUGEPAGE:
357 #ifdef CONFIG_S390
358 		/*
359 		 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
360 		 * can't handle this properly after s390_enable_sie, so we simply
361 		 * ignore the madvise to prevent qemu from causing a SIGSEGV.
362 		 */
363 		if (mm_has_pgste(vma->vm_mm))
364 			return 0;
365 #endif
366 		*vm_flags &= ~VM_NOHUGEPAGE;
367 		*vm_flags |= VM_HUGEPAGE;
368 		/*
369 		 * If the vma become good for khugepaged to scan,
370 		 * register it here without waiting a page fault that
371 		 * may not happen any time soon.
372 		 */
373 		khugepaged_enter_vma(vma, *vm_flags);
374 		break;
375 	case MADV_NOHUGEPAGE:
376 		*vm_flags &= ~VM_HUGEPAGE;
377 		*vm_flags |= VM_NOHUGEPAGE;
378 		/*
379 		 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
380 		 * this vma even if we leave the mm registered in khugepaged if
381 		 * it got registered before VM_NOHUGEPAGE was set.
382 		 */
383 		break;
384 	}
385 
386 	return 0;
387 }
388 
389 int __init khugepaged_init(void)
390 {
391 	mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
392 					  sizeof(struct khugepaged_mm_slot),
393 					  __alignof__(struct khugepaged_mm_slot),
394 					  0, NULL);
395 	if (!mm_slot_cache)
396 		return -ENOMEM;
397 
398 	khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
399 	khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
400 	khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
401 	khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
402 
403 	return 0;
404 }
405 
406 void __init khugepaged_destroy(void)
407 {
408 	kmem_cache_destroy(mm_slot_cache);
409 }
410 
411 static inline int hpage_collapse_test_exit(struct mm_struct *mm)
412 {
413 	return atomic_read(&mm->mm_users) == 0;
414 }
415 
416 void __khugepaged_enter(struct mm_struct *mm)
417 {
418 	struct khugepaged_mm_slot *mm_slot;
419 	struct mm_slot *slot;
420 	int wakeup;
421 
422 	mm_slot = mm_slot_alloc(mm_slot_cache);
423 	if (!mm_slot)
424 		return;
425 
426 	slot = &mm_slot->slot;
427 
428 	/* __khugepaged_exit() must not run from under us */
429 	VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm);
430 	if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
431 		mm_slot_free(mm_slot_cache, mm_slot);
432 		return;
433 	}
434 
435 	spin_lock(&khugepaged_mm_lock);
436 	mm_slot_insert(mm_slots_hash, mm, slot);
437 	/*
438 	 * Insert just behind the scanning cursor, to let the area settle
439 	 * down a little.
440 	 */
441 	wakeup = list_empty(&khugepaged_scan.mm_head);
442 	list_add_tail(&slot->mm_node, &khugepaged_scan.mm_head);
443 	spin_unlock(&khugepaged_mm_lock);
444 
445 	mmgrab(mm);
446 	if (wakeup)
447 		wake_up_interruptible(&khugepaged_wait);
448 }
449 
450 void khugepaged_enter_vma(struct vm_area_struct *vma,
451 			  unsigned long vm_flags)
452 {
453 	if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
454 	    hugepage_flags_enabled()) {
455 		if (hugepage_vma_check(vma, vm_flags, false, false, true))
456 			__khugepaged_enter(vma->vm_mm);
457 	}
458 }
459 
460 void __khugepaged_exit(struct mm_struct *mm)
461 {
462 	struct khugepaged_mm_slot *mm_slot;
463 	struct mm_slot *slot;
464 	int free = 0;
465 
466 	spin_lock(&khugepaged_mm_lock);
467 	slot = mm_slot_lookup(mm_slots_hash, mm);
468 	mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
469 	if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
470 		hash_del(&slot->hash);
471 		list_del(&slot->mm_node);
472 		free = 1;
473 	}
474 	spin_unlock(&khugepaged_mm_lock);
475 
476 	if (free) {
477 		clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
478 		mm_slot_free(mm_slot_cache, mm_slot);
479 		mmdrop(mm);
480 	} else if (mm_slot) {
481 		/*
482 		 * This is required to serialize against
483 		 * hpage_collapse_test_exit() (which is guaranteed to run
484 		 * under mmap sem read mode). Stop here (after we return all
485 		 * pagetables will be destroyed) until khugepaged has finished
486 		 * working on the pagetables under the mmap_lock.
487 		 */
488 		mmap_write_lock(mm);
489 		mmap_write_unlock(mm);
490 	}
491 }
492 
493 static void release_pte_page(struct page *page)
494 {
495 	mod_node_page_state(page_pgdat(page),
496 			NR_ISOLATED_ANON + page_is_file_lru(page),
497 			-compound_nr(page));
498 	unlock_page(page);
499 	putback_lru_page(page);
500 }
501 
502 static void release_pte_pages(pte_t *pte, pte_t *_pte,
503 		struct list_head *compound_pagelist)
504 {
505 	struct page *page, *tmp;
506 
507 	while (--_pte >= pte) {
508 		pte_t pteval = *_pte;
509 
510 		page = pte_page(pteval);
511 		if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) &&
512 				!PageCompound(page))
513 			release_pte_page(page);
514 	}
515 
516 	list_for_each_entry_safe(page, tmp, compound_pagelist, lru) {
517 		list_del(&page->lru);
518 		release_pte_page(page);
519 	}
520 }
521 
522 static bool is_refcount_suitable(struct page *page)
523 {
524 	int expected_refcount;
525 
526 	expected_refcount = total_mapcount(page);
527 	if (PageSwapCache(page))
528 		expected_refcount += compound_nr(page);
529 
530 	return page_count(page) == expected_refcount;
531 }
532 
533 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
534 					unsigned long address,
535 					pte_t *pte,
536 					struct collapse_control *cc,
537 					struct list_head *compound_pagelist)
538 {
539 	struct page *page = NULL;
540 	pte_t *_pte;
541 	int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
542 	bool writable = false;
543 
544 	for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
545 	     _pte++, address += PAGE_SIZE) {
546 		pte_t pteval = *_pte;
547 		if (pte_none(pteval) || (pte_present(pteval) &&
548 				is_zero_pfn(pte_pfn(pteval)))) {
549 			++none_or_zero;
550 			if (!userfaultfd_armed(vma) &&
551 			    (!cc->is_khugepaged ||
552 			     none_or_zero <= khugepaged_max_ptes_none)) {
553 				continue;
554 			} else {
555 				result = SCAN_EXCEED_NONE_PTE;
556 				count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
557 				goto out;
558 			}
559 		}
560 		if (!pte_present(pteval)) {
561 			result = SCAN_PTE_NON_PRESENT;
562 			goto out;
563 		}
564 		page = vm_normal_page(vma, address, pteval);
565 		if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
566 			result = SCAN_PAGE_NULL;
567 			goto out;
568 		}
569 
570 		VM_BUG_ON_PAGE(!PageAnon(page), page);
571 
572 		if (page_mapcount(page) > 1) {
573 			++shared;
574 			if (cc->is_khugepaged &&
575 			    shared > khugepaged_max_ptes_shared) {
576 				result = SCAN_EXCEED_SHARED_PTE;
577 				count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
578 				goto out;
579 			}
580 		}
581 
582 		if (PageCompound(page)) {
583 			struct page *p;
584 			page = compound_head(page);
585 
586 			/*
587 			 * Check if we have dealt with the compound page
588 			 * already
589 			 */
590 			list_for_each_entry(p, compound_pagelist, lru) {
591 				if (page == p)
592 					goto next;
593 			}
594 		}
595 
596 		/*
597 		 * We can do it before isolate_lru_page because the
598 		 * page can't be freed from under us. NOTE: PG_lock
599 		 * is needed to serialize against split_huge_page
600 		 * when invoked from the VM.
601 		 */
602 		if (!trylock_page(page)) {
603 			result = SCAN_PAGE_LOCK;
604 			goto out;
605 		}
606 
607 		/*
608 		 * Check if the page has any GUP (or other external) pins.
609 		 *
610 		 * The page table that maps the page has been already unlinked
611 		 * from the page table tree and this process cannot get
612 		 * an additional pin on the page.
613 		 *
614 		 * New pins can come later if the page is shared across fork,
615 		 * but not from this process. The other process cannot write to
616 		 * the page, only trigger CoW.
617 		 */
618 		if (!is_refcount_suitable(page)) {
619 			unlock_page(page);
620 			result = SCAN_PAGE_COUNT;
621 			goto out;
622 		}
623 
624 		/*
625 		 * Isolate the page to avoid collapsing an hugepage
626 		 * currently in use by the VM.
627 		 */
628 		if (isolate_lru_page(page)) {
629 			unlock_page(page);
630 			result = SCAN_DEL_PAGE_LRU;
631 			goto out;
632 		}
633 		mod_node_page_state(page_pgdat(page),
634 				NR_ISOLATED_ANON + page_is_file_lru(page),
635 				compound_nr(page));
636 		VM_BUG_ON_PAGE(!PageLocked(page), page);
637 		VM_BUG_ON_PAGE(PageLRU(page), page);
638 
639 		if (PageCompound(page))
640 			list_add_tail(&page->lru, compound_pagelist);
641 next:
642 		/*
643 		 * If collapse was initiated by khugepaged, check that there is
644 		 * enough young pte to justify collapsing the page
645 		 */
646 		if (cc->is_khugepaged &&
647 		    (pte_young(pteval) || page_is_young(page) ||
648 		     PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
649 								     address)))
650 			referenced++;
651 
652 		if (pte_write(pteval))
653 			writable = true;
654 	}
655 
656 	if (unlikely(!writable)) {
657 		result = SCAN_PAGE_RO;
658 	} else if (unlikely(cc->is_khugepaged && !referenced)) {
659 		result = SCAN_LACK_REFERENCED_PAGE;
660 	} else {
661 		result = SCAN_SUCCEED;
662 		trace_mm_collapse_huge_page_isolate(page, none_or_zero,
663 						    referenced, writable, result);
664 		return result;
665 	}
666 out:
667 	release_pte_pages(pte, _pte, compound_pagelist);
668 	trace_mm_collapse_huge_page_isolate(page, none_or_zero,
669 					    referenced, writable, result);
670 	return result;
671 }
672 
673 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
674 				      struct vm_area_struct *vma,
675 				      unsigned long address,
676 				      spinlock_t *ptl,
677 				      struct list_head *compound_pagelist)
678 {
679 	struct page *src_page, *tmp;
680 	pte_t *_pte;
681 	for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
682 				_pte++, page++, address += PAGE_SIZE) {
683 		pte_t pteval = *_pte;
684 
685 		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
686 			clear_user_highpage(page, address);
687 			add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
688 			if (is_zero_pfn(pte_pfn(pteval))) {
689 				/*
690 				 * ptl mostly unnecessary.
691 				 */
692 				spin_lock(ptl);
693 				ptep_clear(vma->vm_mm, address, _pte);
694 				spin_unlock(ptl);
695 			}
696 		} else {
697 			src_page = pte_page(pteval);
698 			copy_user_highpage(page, src_page, address, vma);
699 			if (!PageCompound(src_page))
700 				release_pte_page(src_page);
701 			/*
702 			 * ptl mostly unnecessary, but preempt has to
703 			 * be disabled to update the per-cpu stats
704 			 * inside page_remove_rmap().
705 			 */
706 			spin_lock(ptl);
707 			ptep_clear(vma->vm_mm, address, _pte);
708 			page_remove_rmap(src_page, vma, false);
709 			spin_unlock(ptl);
710 			free_page_and_swap_cache(src_page);
711 		}
712 	}
713 
714 	list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
715 		list_del(&src_page->lru);
716 		mod_node_page_state(page_pgdat(src_page),
717 				    NR_ISOLATED_ANON + page_is_file_lru(src_page),
718 				    -compound_nr(src_page));
719 		unlock_page(src_page);
720 		free_swap_cache(src_page);
721 		putback_lru_page(src_page);
722 	}
723 }
724 
725 static void khugepaged_alloc_sleep(void)
726 {
727 	DEFINE_WAIT(wait);
728 
729 	add_wait_queue(&khugepaged_wait, &wait);
730 	__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
731 	schedule_timeout(msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
732 	remove_wait_queue(&khugepaged_wait, &wait);
733 }
734 
735 struct collapse_control khugepaged_collapse_control = {
736 	.is_khugepaged = true,
737 };
738 
739 static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
740 {
741 	int i;
742 
743 	/*
744 	 * If node_reclaim_mode is disabled, then no extra effort is made to
745 	 * allocate memory locally.
746 	 */
747 	if (!node_reclaim_enabled())
748 		return false;
749 
750 	/* If there is a count for this node already, it must be acceptable */
751 	if (cc->node_load[nid])
752 		return false;
753 
754 	for (i = 0; i < MAX_NUMNODES; i++) {
755 		if (!cc->node_load[i])
756 			continue;
757 		if (node_distance(nid, i) > node_reclaim_distance)
758 			return true;
759 	}
760 	return false;
761 }
762 
763 #define khugepaged_defrag()					\
764 	(transparent_hugepage_flags &				\
765 	 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
766 
767 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
768 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
769 {
770 	return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
771 }
772 
773 #ifdef CONFIG_NUMA
774 static int hpage_collapse_find_target_node(struct collapse_control *cc)
775 {
776 	int nid, target_node = 0, max_value = 0;
777 
778 	/* find first node with max normal pages hit */
779 	for (nid = 0; nid < MAX_NUMNODES; nid++)
780 		if (cc->node_load[nid] > max_value) {
781 			max_value = cc->node_load[nid];
782 			target_node = nid;
783 		}
784 
785 	for_each_online_node(nid) {
786 		if (max_value == cc->node_load[nid])
787 			node_set(nid, cc->alloc_nmask);
788 	}
789 
790 	return target_node;
791 }
792 #else
793 static int hpage_collapse_find_target_node(struct collapse_control *cc)
794 {
795 	return 0;
796 }
797 #endif
798 
799 static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
800 				      nodemask_t *nmask)
801 {
802 	*hpage = __alloc_pages(gfp, HPAGE_PMD_ORDER, node, nmask);
803 	if (unlikely(!*hpage)) {
804 		count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
805 		return false;
806 	}
807 
808 	prep_transhuge_page(*hpage);
809 	count_vm_event(THP_COLLAPSE_ALLOC);
810 	return true;
811 }
812 
813 /*
814  * If mmap_lock temporarily dropped, revalidate vma
815  * before taking mmap_lock.
816  * Returns enum scan_result value.
817  */
818 
819 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
820 				   bool expect_anon,
821 				   struct vm_area_struct **vmap,
822 				   struct collapse_control *cc)
823 {
824 	struct vm_area_struct *vma;
825 
826 	if (unlikely(hpage_collapse_test_exit(mm)))
827 		return SCAN_ANY_PROCESS;
828 
829 	*vmap = vma = find_vma(mm, address);
830 	if (!vma)
831 		return SCAN_VMA_NULL;
832 
833 	if (!transhuge_vma_suitable(vma, address))
834 		return SCAN_ADDRESS_RANGE;
835 	if (!hugepage_vma_check(vma, vma->vm_flags, false, false,
836 				cc->is_khugepaged))
837 		return SCAN_VMA_CHECK;
838 	/*
839 	 * Anon VMA expected, the address may be unmapped then
840 	 * remapped to file after khugepaged reaquired the mmap_lock.
841 	 *
842 	 * hugepage_vma_check may return true for qualified file
843 	 * vmas.
844 	 */
845 	if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
846 		return SCAN_PAGE_ANON;
847 	return SCAN_SUCCEED;
848 }
849 
850 static int find_pmd_or_thp_or_none(struct mm_struct *mm,
851 				   unsigned long address,
852 				   pmd_t **pmd)
853 {
854 	pmd_t pmde;
855 
856 	*pmd = mm_find_pmd(mm, address);
857 	if (!*pmd)
858 		return SCAN_PMD_NULL;
859 
860 	pmde = pmdp_get_lockless(*pmd);
861 
862 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
863 	/* See comments in pmd_none_or_trans_huge_or_clear_bad() */
864 	barrier();
865 #endif
866 	if (pmd_none(pmde))
867 		return SCAN_PMD_NONE;
868 	if (pmd_trans_huge(pmde))
869 		return SCAN_PMD_MAPPED;
870 	if (pmd_bad(pmde))
871 		return SCAN_PMD_NULL;
872 	return SCAN_SUCCEED;
873 }
874 
875 static int check_pmd_still_valid(struct mm_struct *mm,
876 				 unsigned long address,
877 				 pmd_t *pmd)
878 {
879 	pmd_t *new_pmd;
880 	int result = find_pmd_or_thp_or_none(mm, address, &new_pmd);
881 
882 	if (result != SCAN_SUCCEED)
883 		return result;
884 	if (new_pmd != pmd)
885 		return SCAN_FAIL;
886 	return SCAN_SUCCEED;
887 }
888 
889 /*
890  * Bring missing pages in from swap, to complete THP collapse.
891  * Only done if hpage_collapse_scan_pmd believes it is worthwhile.
892  *
893  * Called and returns without pte mapped or spinlocks held.
894  * Note that if false is returned, mmap_lock will be released.
895  */
896 
897 static int __collapse_huge_page_swapin(struct mm_struct *mm,
898 				       struct vm_area_struct *vma,
899 				       unsigned long haddr, pmd_t *pmd,
900 				       int referenced)
901 {
902 	int swapped_in = 0;
903 	vm_fault_t ret = 0;
904 	unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
905 
906 	for (address = haddr; address < end; address += PAGE_SIZE) {
907 		struct vm_fault vmf = {
908 			.vma = vma,
909 			.address = address,
910 			.pgoff = linear_page_index(vma, haddr),
911 			.flags = FAULT_FLAG_ALLOW_RETRY,
912 			.pmd = pmd,
913 		};
914 
915 		vmf.pte = pte_offset_map(pmd, address);
916 		vmf.orig_pte = *vmf.pte;
917 		if (!is_swap_pte(vmf.orig_pte)) {
918 			pte_unmap(vmf.pte);
919 			continue;
920 		}
921 		ret = do_swap_page(&vmf);
922 
923 		/*
924 		 * do_swap_page returns VM_FAULT_RETRY with released mmap_lock.
925 		 * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because
926 		 * we do not retry here and swap entry will remain in pagetable
927 		 * resulting in later failure.
928 		 */
929 		if (ret & VM_FAULT_RETRY) {
930 			trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
931 			/* Likely, but not guaranteed, that page lock failed */
932 			return SCAN_PAGE_LOCK;
933 		}
934 		if (ret & VM_FAULT_ERROR) {
935 			mmap_read_unlock(mm);
936 			trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
937 			return SCAN_FAIL;
938 		}
939 		swapped_in++;
940 	}
941 
942 	/* Drain LRU add pagevec to remove extra pin on the swapped in pages */
943 	if (swapped_in)
944 		lru_add_drain();
945 
946 	trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
947 	return SCAN_SUCCEED;
948 }
949 
950 static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
951 			      struct collapse_control *cc)
952 {
953 	gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
954 		     GFP_TRANSHUGE);
955 	int node = hpage_collapse_find_target_node(cc);
956 
957 	if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask))
958 		return SCAN_ALLOC_HUGE_PAGE_FAIL;
959 	if (unlikely(mem_cgroup_charge(page_folio(*hpage), mm, gfp)))
960 		return SCAN_CGROUP_CHARGE_FAIL;
961 	count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC);
962 	return SCAN_SUCCEED;
963 }
964 
965 static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
966 			      int referenced, int unmapped,
967 			      struct collapse_control *cc)
968 {
969 	LIST_HEAD(compound_pagelist);
970 	pmd_t *pmd, _pmd;
971 	pte_t *pte;
972 	pgtable_t pgtable;
973 	struct page *hpage;
974 	spinlock_t *pmd_ptl, *pte_ptl;
975 	int result = SCAN_FAIL;
976 	struct vm_area_struct *vma;
977 	struct mmu_notifier_range range;
978 
979 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
980 
981 	/*
982 	 * Before allocating the hugepage, release the mmap_lock read lock.
983 	 * The allocation can take potentially a long time if it involves
984 	 * sync compaction, and we do not need to hold the mmap_lock during
985 	 * that. We will recheck the vma after taking it again in write mode.
986 	 */
987 	mmap_read_unlock(mm);
988 
989 	result = alloc_charge_hpage(&hpage, mm, cc);
990 	if (result != SCAN_SUCCEED)
991 		goto out_nolock;
992 
993 	mmap_read_lock(mm);
994 	result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
995 	if (result != SCAN_SUCCEED) {
996 		mmap_read_unlock(mm);
997 		goto out_nolock;
998 	}
999 
1000 	result = find_pmd_or_thp_or_none(mm, address, &pmd);
1001 	if (result != SCAN_SUCCEED) {
1002 		mmap_read_unlock(mm);
1003 		goto out_nolock;
1004 	}
1005 
1006 	if (unmapped) {
1007 		/*
1008 		 * __collapse_huge_page_swapin will return with mmap_lock
1009 		 * released when it fails. So we jump out_nolock directly in
1010 		 * that case.  Continuing to collapse causes inconsistency.
1011 		 */
1012 		result = __collapse_huge_page_swapin(mm, vma, address, pmd,
1013 						     referenced);
1014 		if (result != SCAN_SUCCEED)
1015 			goto out_nolock;
1016 	}
1017 
1018 	mmap_read_unlock(mm);
1019 	/*
1020 	 * Prevent all access to pagetables with the exception of
1021 	 * gup_fast later handled by the ptep_clear_flush and the VM
1022 	 * handled by the anon_vma lock + PG_lock.
1023 	 */
1024 	mmap_write_lock(mm);
1025 	result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
1026 	if (result != SCAN_SUCCEED)
1027 		goto out_up_write;
1028 	/* check if the pmd is still valid */
1029 	result = check_pmd_still_valid(mm, address, pmd);
1030 	if (result != SCAN_SUCCEED)
1031 		goto out_up_write;
1032 
1033 	anon_vma_lock_write(vma->anon_vma);
1034 
1035 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
1036 				address, address + HPAGE_PMD_SIZE);
1037 	mmu_notifier_invalidate_range_start(&range);
1038 
1039 	pte = pte_offset_map(pmd, address);
1040 	pte_ptl = pte_lockptr(mm, pmd);
1041 
1042 	pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1043 	/*
1044 	 * This removes any huge TLB entry from the CPU so we won't allow
1045 	 * huge and small TLB entries for the same virtual address to
1046 	 * avoid the risk of CPU bugs in that area.
1047 	 *
1048 	 * Parallel fast GUP is fine since fast GUP will back off when
1049 	 * it detects PMD is changed.
1050 	 */
1051 	_pmd = pmdp_collapse_flush(vma, address, pmd);
1052 	spin_unlock(pmd_ptl);
1053 	mmu_notifier_invalidate_range_end(&range);
1054 	tlb_remove_table_sync_one();
1055 
1056 	spin_lock(pte_ptl);
1057 	result =  __collapse_huge_page_isolate(vma, address, pte, cc,
1058 					       &compound_pagelist);
1059 	spin_unlock(pte_ptl);
1060 
1061 	if (unlikely(result != SCAN_SUCCEED)) {
1062 		pte_unmap(pte);
1063 		spin_lock(pmd_ptl);
1064 		BUG_ON(!pmd_none(*pmd));
1065 		/*
1066 		 * We can only use set_pmd_at when establishing
1067 		 * hugepmds and never for establishing regular pmds that
1068 		 * points to regular pagetables. Use pmd_populate for that
1069 		 */
1070 		pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1071 		spin_unlock(pmd_ptl);
1072 		anon_vma_unlock_write(vma->anon_vma);
1073 		goto out_up_write;
1074 	}
1075 
1076 	/*
1077 	 * All pages are isolated and locked so anon_vma rmap
1078 	 * can't run anymore.
1079 	 */
1080 	anon_vma_unlock_write(vma->anon_vma);
1081 
1082 	__collapse_huge_page_copy(pte, hpage, vma, address, pte_ptl,
1083 				  &compound_pagelist);
1084 	pte_unmap(pte);
1085 	/*
1086 	 * spin_lock() below is not the equivalent of smp_wmb(), but
1087 	 * the smp_wmb() inside __SetPageUptodate() can be reused to
1088 	 * avoid the copy_huge_page writes to become visible after
1089 	 * the set_pmd_at() write.
1090 	 */
1091 	__SetPageUptodate(hpage);
1092 	pgtable = pmd_pgtable(_pmd);
1093 
1094 	_pmd = mk_huge_pmd(hpage, vma->vm_page_prot);
1095 	_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1096 
1097 	spin_lock(pmd_ptl);
1098 	BUG_ON(!pmd_none(*pmd));
1099 	page_add_new_anon_rmap(hpage, vma, address);
1100 	lru_cache_add_inactive_or_unevictable(hpage, vma);
1101 	pgtable_trans_huge_deposit(mm, pmd, pgtable);
1102 	set_pmd_at(mm, address, pmd, _pmd);
1103 	update_mmu_cache_pmd(vma, address, pmd);
1104 	spin_unlock(pmd_ptl);
1105 
1106 	hpage = NULL;
1107 
1108 	result = SCAN_SUCCEED;
1109 out_up_write:
1110 	mmap_write_unlock(mm);
1111 out_nolock:
1112 	if (hpage) {
1113 		mem_cgroup_uncharge(page_folio(hpage));
1114 		put_page(hpage);
1115 	}
1116 	trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result);
1117 	return result;
1118 }
1119 
1120 static int hpage_collapse_scan_pmd(struct mm_struct *mm,
1121 				   struct vm_area_struct *vma,
1122 				   unsigned long address, bool *mmap_locked,
1123 				   struct collapse_control *cc)
1124 {
1125 	pmd_t *pmd;
1126 	pte_t *pte, *_pte;
1127 	int result = SCAN_FAIL, referenced = 0;
1128 	int none_or_zero = 0, shared = 0;
1129 	struct page *page = NULL;
1130 	unsigned long _address;
1131 	spinlock_t *ptl;
1132 	int node = NUMA_NO_NODE, unmapped = 0;
1133 	bool writable = false;
1134 
1135 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1136 
1137 	result = find_pmd_or_thp_or_none(mm, address, &pmd);
1138 	if (result != SCAN_SUCCEED)
1139 		goto out;
1140 
1141 	memset(cc->node_load, 0, sizeof(cc->node_load));
1142 	nodes_clear(cc->alloc_nmask);
1143 	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1144 	for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
1145 	     _pte++, _address += PAGE_SIZE) {
1146 		pte_t pteval = *_pte;
1147 		if (is_swap_pte(pteval)) {
1148 			++unmapped;
1149 			if (!cc->is_khugepaged ||
1150 			    unmapped <= khugepaged_max_ptes_swap) {
1151 				/*
1152 				 * Always be strict with uffd-wp
1153 				 * enabled swap entries.  Please see
1154 				 * comment below for pte_uffd_wp().
1155 				 */
1156 				if (pte_swp_uffd_wp(pteval)) {
1157 					result = SCAN_PTE_UFFD_WP;
1158 					goto out_unmap;
1159 				}
1160 				continue;
1161 			} else {
1162 				result = SCAN_EXCEED_SWAP_PTE;
1163 				count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
1164 				goto out_unmap;
1165 			}
1166 		}
1167 		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1168 			++none_or_zero;
1169 			if (!userfaultfd_armed(vma) &&
1170 			    (!cc->is_khugepaged ||
1171 			     none_or_zero <= khugepaged_max_ptes_none)) {
1172 				continue;
1173 			} else {
1174 				result = SCAN_EXCEED_NONE_PTE;
1175 				count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
1176 				goto out_unmap;
1177 			}
1178 		}
1179 		if (pte_uffd_wp(pteval)) {
1180 			/*
1181 			 * Don't collapse the page if any of the small
1182 			 * PTEs are armed with uffd write protection.
1183 			 * Here we can also mark the new huge pmd as
1184 			 * write protected if any of the small ones is
1185 			 * marked but that could bring unknown
1186 			 * userfault messages that falls outside of
1187 			 * the registered range.  So, just be simple.
1188 			 */
1189 			result = SCAN_PTE_UFFD_WP;
1190 			goto out_unmap;
1191 		}
1192 		if (pte_write(pteval))
1193 			writable = true;
1194 
1195 		page = vm_normal_page(vma, _address, pteval);
1196 		if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
1197 			result = SCAN_PAGE_NULL;
1198 			goto out_unmap;
1199 		}
1200 
1201 		if (page_mapcount(page) > 1) {
1202 			++shared;
1203 			if (cc->is_khugepaged &&
1204 			    shared > khugepaged_max_ptes_shared) {
1205 				result = SCAN_EXCEED_SHARED_PTE;
1206 				count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
1207 				goto out_unmap;
1208 			}
1209 		}
1210 
1211 		page = compound_head(page);
1212 
1213 		/*
1214 		 * Record which node the original page is from and save this
1215 		 * information to cc->node_load[].
1216 		 * Khugepaged will allocate hugepage from the node has the max
1217 		 * hit record.
1218 		 */
1219 		node = page_to_nid(page);
1220 		if (hpage_collapse_scan_abort(node, cc)) {
1221 			result = SCAN_SCAN_ABORT;
1222 			goto out_unmap;
1223 		}
1224 		cc->node_load[node]++;
1225 		if (!PageLRU(page)) {
1226 			result = SCAN_PAGE_LRU;
1227 			goto out_unmap;
1228 		}
1229 		if (PageLocked(page)) {
1230 			result = SCAN_PAGE_LOCK;
1231 			goto out_unmap;
1232 		}
1233 		if (!PageAnon(page)) {
1234 			result = SCAN_PAGE_ANON;
1235 			goto out_unmap;
1236 		}
1237 
1238 		/*
1239 		 * Check if the page has any GUP (or other external) pins.
1240 		 *
1241 		 * Here the check may be racy:
1242 		 * it may see total_mapcount > refcount in some cases?
1243 		 * But such case is ephemeral we could always retry collapse
1244 		 * later.  However it may report false positive if the page
1245 		 * has excessive GUP pins (i.e. 512).  Anyway the same check
1246 		 * will be done again later the risk seems low.
1247 		 */
1248 		if (!is_refcount_suitable(page)) {
1249 			result = SCAN_PAGE_COUNT;
1250 			goto out_unmap;
1251 		}
1252 
1253 		/*
1254 		 * If collapse was initiated by khugepaged, check that there is
1255 		 * enough young pte to justify collapsing the page
1256 		 */
1257 		if (cc->is_khugepaged &&
1258 		    (pte_young(pteval) || page_is_young(page) ||
1259 		     PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
1260 								     address)))
1261 			referenced++;
1262 	}
1263 	if (!writable) {
1264 		result = SCAN_PAGE_RO;
1265 	} else if (cc->is_khugepaged &&
1266 		   (!referenced ||
1267 		    (unmapped && referenced < HPAGE_PMD_NR / 2))) {
1268 		result = SCAN_LACK_REFERENCED_PAGE;
1269 	} else {
1270 		result = SCAN_SUCCEED;
1271 	}
1272 out_unmap:
1273 	pte_unmap_unlock(pte, ptl);
1274 	if (result == SCAN_SUCCEED) {
1275 		result = collapse_huge_page(mm, address, referenced,
1276 					    unmapped, cc);
1277 		/* collapse_huge_page will return with the mmap_lock released */
1278 		*mmap_locked = false;
1279 	}
1280 out:
1281 	trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1282 				     none_or_zero, result, unmapped);
1283 	return result;
1284 }
1285 
1286 static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
1287 {
1288 	struct mm_slot *slot = &mm_slot->slot;
1289 	struct mm_struct *mm = slot->mm;
1290 
1291 	lockdep_assert_held(&khugepaged_mm_lock);
1292 
1293 	if (hpage_collapse_test_exit(mm)) {
1294 		/* free mm_slot */
1295 		hash_del(&slot->hash);
1296 		list_del(&slot->mm_node);
1297 
1298 		/*
1299 		 * Not strictly needed because the mm exited already.
1300 		 *
1301 		 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1302 		 */
1303 
1304 		/* khugepaged_mm_lock actually not necessary for the below */
1305 		mm_slot_free(mm_slot_cache, mm_slot);
1306 		mmdrop(mm);
1307 	}
1308 }
1309 
1310 #ifdef CONFIG_SHMEM
1311 /*
1312  * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1313  * khugepaged should try to collapse the page table.
1314  *
1315  * Note that following race exists:
1316  * (1) khugepaged calls khugepaged_collapse_pte_mapped_thps() for mm_struct A,
1317  *     emptying the A's ->pte_mapped_thp[] array.
1318  * (2) MADV_COLLAPSE collapses some file extent with target mm_struct B, and
1319  *     retract_page_tables() finds a VMA in mm_struct A mapping the same extent
1320  *     (at virtual address X) and adds an entry (for X) into mm_struct A's
1321  *     ->pte-mapped_thp[] array.
1322  * (3) khugepaged calls khugepaged_collapse_scan_file() for mm_struct A at X,
1323  *     sees a pte-mapped THP (SCAN_PTE_MAPPED_HUGEPAGE) and adds an entry
1324  *     (for X) into mm_struct A's ->pte-mapped_thp[] array.
1325  * Thus, it's possible the same address is added multiple times for the same
1326  * mm_struct.  Should this happen, we'll simply attempt
1327  * collapse_pte_mapped_thp() multiple times for the same address, under the same
1328  * exclusive mmap_lock, and assuming the first call is successful, subsequent
1329  * attempts will return quickly (without grabbing any additional locks) when
1330  * a huge pmd is found in find_pmd_or_thp_or_none().  Since this is a cheap
1331  * check, and since this is a rare occurrence, the cost of preventing this
1332  * "multiple-add" is thought to be more expensive than just handling it, should
1333  * it occur.
1334  */
1335 static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1336 					  unsigned long addr)
1337 {
1338 	struct khugepaged_mm_slot *mm_slot;
1339 	struct mm_slot *slot;
1340 	bool ret = false;
1341 
1342 	VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1343 
1344 	spin_lock(&khugepaged_mm_lock);
1345 	slot = mm_slot_lookup(mm_slots_hash, mm);
1346 	mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
1347 	if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP)) {
1348 		mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1349 		ret = true;
1350 	}
1351 	spin_unlock(&khugepaged_mm_lock);
1352 	return ret;
1353 }
1354 
1355 /* hpage must be locked, and mmap_lock must be held in write */
1356 static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
1357 			pmd_t *pmdp, struct page *hpage)
1358 {
1359 	struct vm_fault vmf = {
1360 		.vma = vma,
1361 		.address = addr,
1362 		.flags = 0,
1363 		.pmd = pmdp,
1364 	};
1365 
1366 	VM_BUG_ON(!PageTransHuge(hpage));
1367 	mmap_assert_write_locked(vma->vm_mm);
1368 
1369 	if (do_set_pmd(&vmf, hpage))
1370 		return SCAN_FAIL;
1371 
1372 	get_page(hpage);
1373 	return SCAN_SUCCEED;
1374 }
1375 
1376 /*
1377  * A note about locking:
1378  * Trying to take the page table spinlocks would be useless here because those
1379  * are only used to synchronize:
1380  *
1381  *  - modifying terminal entries (ones that point to a data page, not to another
1382  *    page table)
1383  *  - installing *new* non-terminal entries
1384  *
1385  * Instead, we need roughly the same kind of protection as free_pgtables() or
1386  * mm_take_all_locks() (but only for a single VMA):
1387  * The mmap lock together with this VMA's rmap locks covers all paths towards
1388  * the page table entries we're messing with here, except for hardware page
1389  * table walks and lockless_pages_from_mm().
1390  */
1391 static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
1392 				  unsigned long addr, pmd_t *pmdp)
1393 {
1394 	pmd_t pmd;
1395 	struct mmu_notifier_range range;
1396 
1397 	mmap_assert_write_locked(mm);
1398 	if (vma->vm_file)
1399 		lockdep_assert_held_write(&vma->vm_file->f_mapping->i_mmap_rwsem);
1400 	/*
1401 	 * All anon_vmas attached to the VMA have the same root and are
1402 	 * therefore locked by the same lock.
1403 	 */
1404 	if (vma->anon_vma)
1405 		lockdep_assert_held_write(&vma->anon_vma->root->rwsem);
1406 
1407 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm, addr,
1408 				addr + HPAGE_PMD_SIZE);
1409 	mmu_notifier_invalidate_range_start(&range);
1410 	pmd = pmdp_collapse_flush(vma, addr, pmdp);
1411 	tlb_remove_table_sync_one();
1412 	mmu_notifier_invalidate_range_end(&range);
1413 	mm_dec_nr_ptes(mm);
1414 	page_table_check_pte_clear_range(mm, addr, pmd);
1415 	pte_free(mm, pmd_pgtable(pmd));
1416 }
1417 
1418 /**
1419  * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1420  * address haddr.
1421  *
1422  * @mm: process address space where collapse happens
1423  * @addr: THP collapse address
1424  * @install_pmd: If a huge PMD should be installed
1425  *
1426  * This function checks whether all the PTEs in the PMD are pointing to the
1427  * right THP. If so, retract the page table so the THP can refault in with
1428  * as pmd-mapped. Possibly install a huge PMD mapping the THP.
1429  */
1430 int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
1431 			    bool install_pmd)
1432 {
1433 	unsigned long haddr = addr & HPAGE_PMD_MASK;
1434 	struct vm_area_struct *vma = vma_lookup(mm, haddr);
1435 	struct page *hpage;
1436 	pte_t *start_pte, *pte;
1437 	pmd_t *pmd;
1438 	spinlock_t *ptl;
1439 	int count = 0, result = SCAN_FAIL;
1440 	int i;
1441 
1442 	mmap_assert_write_locked(mm);
1443 
1444 	/* Fast check before locking page if already PMD-mapped */
1445 	result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
1446 	if (result == SCAN_PMD_MAPPED)
1447 		return result;
1448 
1449 	if (!vma || !vma->vm_file ||
1450 	    !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
1451 		return SCAN_VMA_CHECK;
1452 
1453 	/*
1454 	 * If we are here, we've succeeded in replacing all the native pages
1455 	 * in the page cache with a single hugepage. If a mm were to fault-in
1456 	 * this memory (mapped by a suitably aligned VMA), we'd get the hugepage
1457 	 * and map it by a PMD, regardless of sysfs THP settings. As such, let's
1458 	 * analogously elide sysfs THP settings here.
1459 	 */
1460 	if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
1461 		return SCAN_VMA_CHECK;
1462 
1463 	/* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
1464 	if (userfaultfd_wp(vma))
1465 		return SCAN_PTE_UFFD_WP;
1466 
1467 	hpage = find_lock_page(vma->vm_file->f_mapping,
1468 			       linear_page_index(vma, haddr));
1469 	if (!hpage)
1470 		return SCAN_PAGE_NULL;
1471 
1472 	if (!PageHead(hpage)) {
1473 		result = SCAN_FAIL;
1474 		goto drop_hpage;
1475 	}
1476 
1477 	if (compound_order(hpage) != HPAGE_PMD_ORDER) {
1478 		result = SCAN_PAGE_COMPOUND;
1479 		goto drop_hpage;
1480 	}
1481 
1482 	switch (result) {
1483 	case SCAN_SUCCEED:
1484 		break;
1485 	case SCAN_PMD_NONE:
1486 		/*
1487 		 * In MADV_COLLAPSE path, possible race with khugepaged where
1488 		 * all pte entries have been removed and pmd cleared.  If so,
1489 		 * skip all the pte checks and just update the pmd mapping.
1490 		 */
1491 		goto maybe_install_pmd;
1492 	default:
1493 		goto drop_hpage;
1494 	}
1495 
1496 	/*
1497 	 * We need to lock the mapping so that from here on, only GUP-fast and
1498 	 * hardware page walks can access the parts of the page tables that
1499 	 * we're operating on.
1500 	 * See collapse_and_free_pmd().
1501 	 */
1502 	i_mmap_lock_write(vma->vm_file->f_mapping);
1503 
1504 	/*
1505 	 * This spinlock should be unnecessary: Nobody else should be accessing
1506 	 * the page tables under spinlock protection here, only
1507 	 * lockless_pages_from_mm() and the hardware page walker can access page
1508 	 * tables while all the high-level locks are held in write mode.
1509 	 */
1510 	start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1511 	result = SCAN_FAIL;
1512 
1513 	/* step 1: check all mapped PTEs are to the right huge page */
1514 	for (i = 0, addr = haddr, pte = start_pte;
1515 	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1516 		struct page *page;
1517 
1518 		/* empty pte, skip */
1519 		if (pte_none(*pte))
1520 			continue;
1521 
1522 		/* page swapped out, abort */
1523 		if (!pte_present(*pte)) {
1524 			result = SCAN_PTE_NON_PRESENT;
1525 			goto abort;
1526 		}
1527 
1528 		page = vm_normal_page(vma, addr, *pte);
1529 		if (WARN_ON_ONCE(page && is_zone_device_page(page)))
1530 			page = NULL;
1531 		/*
1532 		 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1533 		 * page table, but the new page will not be a subpage of hpage.
1534 		 */
1535 		if (hpage + i != page)
1536 			goto abort;
1537 		count++;
1538 	}
1539 
1540 	/* step 2: adjust rmap */
1541 	for (i = 0, addr = haddr, pte = start_pte;
1542 	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1543 		struct page *page;
1544 
1545 		if (pte_none(*pte))
1546 			continue;
1547 		page = vm_normal_page(vma, addr, *pte);
1548 		if (WARN_ON_ONCE(page && is_zone_device_page(page)))
1549 			goto abort;
1550 		page_remove_rmap(page, vma, false);
1551 	}
1552 
1553 	pte_unmap_unlock(start_pte, ptl);
1554 
1555 	/* step 3: set proper refcount and mm_counters. */
1556 	if (count) {
1557 		page_ref_sub(hpage, count);
1558 		add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1559 	}
1560 
1561 	/* step 4: remove pte entries */
1562 	/* we make no change to anon, but protect concurrent anon page lookup */
1563 	if (vma->anon_vma)
1564 		anon_vma_lock_write(vma->anon_vma);
1565 
1566 	collapse_and_free_pmd(mm, vma, haddr, pmd);
1567 
1568 	if (vma->anon_vma)
1569 		anon_vma_unlock_write(vma->anon_vma);
1570 	i_mmap_unlock_write(vma->vm_file->f_mapping);
1571 
1572 maybe_install_pmd:
1573 	/* step 5: install pmd entry */
1574 	result = install_pmd
1575 			? set_huge_pmd(vma, haddr, pmd, hpage)
1576 			: SCAN_SUCCEED;
1577 
1578 drop_hpage:
1579 	unlock_page(hpage);
1580 	put_page(hpage);
1581 	return result;
1582 
1583 abort:
1584 	pte_unmap_unlock(start_pte, ptl);
1585 	i_mmap_unlock_write(vma->vm_file->f_mapping);
1586 	goto drop_hpage;
1587 }
1588 
1589 static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot)
1590 {
1591 	struct mm_slot *slot = &mm_slot->slot;
1592 	struct mm_struct *mm = slot->mm;
1593 	int i;
1594 
1595 	if (likely(mm_slot->nr_pte_mapped_thp == 0))
1596 		return;
1597 
1598 	if (!mmap_write_trylock(mm))
1599 		return;
1600 
1601 	if (unlikely(hpage_collapse_test_exit(mm)))
1602 		goto out;
1603 
1604 	for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1605 		collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i], false);
1606 
1607 out:
1608 	mm_slot->nr_pte_mapped_thp = 0;
1609 	mmap_write_unlock(mm);
1610 }
1611 
1612 static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
1613 			       struct mm_struct *target_mm,
1614 			       unsigned long target_addr, struct page *hpage,
1615 			       struct collapse_control *cc)
1616 {
1617 	struct vm_area_struct *vma;
1618 	int target_result = SCAN_FAIL;
1619 
1620 	i_mmap_lock_write(mapping);
1621 	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1622 		int result = SCAN_FAIL;
1623 		struct mm_struct *mm = NULL;
1624 		unsigned long addr = 0;
1625 		pmd_t *pmd;
1626 		bool is_target = false;
1627 
1628 		/*
1629 		 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1630 		 * got written to. These VMAs are likely not worth investing
1631 		 * mmap_write_lock(mm) as PMD-mapping is likely to be split
1632 		 * later.
1633 		 *
1634 		 * Note that vma->anon_vma check is racy: it can be set up after
1635 		 * the check but before we took mmap_lock by the fault path.
1636 		 * But page lock would prevent establishing any new ptes of the
1637 		 * page, so we are safe.
1638 		 *
1639 		 * An alternative would be drop the check, but check that page
1640 		 * table is clear before calling pmdp_collapse_flush() under
1641 		 * ptl. It has higher chance to recover THP for the VMA, but
1642 		 * has higher cost too. It would also probably require locking
1643 		 * the anon_vma.
1644 		 */
1645 		if (vma->anon_vma) {
1646 			result = SCAN_PAGE_ANON;
1647 			goto next;
1648 		}
1649 		addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1650 		if (addr & ~HPAGE_PMD_MASK ||
1651 		    vma->vm_end < addr + HPAGE_PMD_SIZE) {
1652 			result = SCAN_VMA_CHECK;
1653 			goto next;
1654 		}
1655 		mm = vma->vm_mm;
1656 		is_target = mm == target_mm && addr == target_addr;
1657 		result = find_pmd_or_thp_or_none(mm, addr, &pmd);
1658 		if (result != SCAN_SUCCEED)
1659 			goto next;
1660 		/*
1661 		 * We need exclusive mmap_lock to retract page table.
1662 		 *
1663 		 * We use trylock due to lock inversion: we need to acquire
1664 		 * mmap_lock while holding page lock. Fault path does it in
1665 		 * reverse order. Trylock is a way to avoid deadlock.
1666 		 *
1667 		 * Also, it's not MADV_COLLAPSE's job to collapse other
1668 		 * mappings - let khugepaged take care of them later.
1669 		 */
1670 		result = SCAN_PTE_MAPPED_HUGEPAGE;
1671 		if ((cc->is_khugepaged || is_target) &&
1672 		    mmap_write_trylock(mm)) {
1673 			/*
1674 			 * When a vma is registered with uffd-wp, we can't
1675 			 * recycle the pmd pgtable because there can be pte
1676 			 * markers installed.  Skip it only, so the rest mm/vma
1677 			 * can still have the same file mapped hugely, however
1678 			 * it'll always mapped in small page size for uffd-wp
1679 			 * registered ranges.
1680 			 */
1681 			if (hpage_collapse_test_exit(mm)) {
1682 				result = SCAN_ANY_PROCESS;
1683 				goto unlock_next;
1684 			}
1685 			if (userfaultfd_wp(vma)) {
1686 				result = SCAN_PTE_UFFD_WP;
1687 				goto unlock_next;
1688 			}
1689 			collapse_and_free_pmd(mm, vma, addr, pmd);
1690 			if (!cc->is_khugepaged && is_target)
1691 				result = set_huge_pmd(vma, addr, pmd, hpage);
1692 			else
1693 				result = SCAN_SUCCEED;
1694 
1695 unlock_next:
1696 			mmap_write_unlock(mm);
1697 			goto next;
1698 		}
1699 		/*
1700 		 * Calling context will handle target mm/addr. Otherwise, let
1701 		 * khugepaged try again later.
1702 		 */
1703 		if (!is_target) {
1704 			khugepaged_add_pte_mapped_thp(mm, addr);
1705 			continue;
1706 		}
1707 next:
1708 		if (is_target)
1709 			target_result = result;
1710 	}
1711 	i_mmap_unlock_write(mapping);
1712 	return target_result;
1713 }
1714 
1715 /**
1716  * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1717  *
1718  * @mm: process address space where collapse happens
1719  * @addr: virtual collapse start address
1720  * @file: file that collapse on
1721  * @start: collapse start address
1722  * @cc: collapse context and scratchpad
1723  *
1724  * Basic scheme is simple, details are more complex:
1725  *  - allocate and lock a new huge page;
1726  *  - scan page cache replacing old pages with the new one
1727  *    + swap/gup in pages if necessary;
1728  *    + fill in gaps;
1729  *    + keep old pages around in case rollback is required;
1730  *  - if replacing succeeds:
1731  *    + copy data over;
1732  *    + free old pages;
1733  *    + unlock huge page;
1734  *  - if replacing failed;
1735  *    + put all pages back and unfreeze them;
1736  *    + restore gaps in the page cache;
1737  *    + unlock and free huge page;
1738  */
1739 static int collapse_file(struct mm_struct *mm, unsigned long addr,
1740 			 struct file *file, pgoff_t start,
1741 			 struct collapse_control *cc)
1742 {
1743 	struct address_space *mapping = file->f_mapping;
1744 	struct page *hpage;
1745 	pgoff_t index = 0, end = start + HPAGE_PMD_NR;
1746 	LIST_HEAD(pagelist);
1747 	XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
1748 	int nr_none = 0, result = SCAN_SUCCEED;
1749 	bool is_shmem = shmem_file(file);
1750 	int nr = 0;
1751 
1752 	VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1753 	VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1754 
1755 	result = alloc_charge_hpage(&hpage, mm, cc);
1756 	if (result != SCAN_SUCCEED)
1757 		goto out;
1758 
1759 	/*
1760 	 * Ensure we have slots for all the pages in the range.  This is
1761 	 * almost certainly a no-op because most of the pages must be present
1762 	 */
1763 	do {
1764 		xas_lock_irq(&xas);
1765 		xas_create_range(&xas);
1766 		if (!xas_error(&xas))
1767 			break;
1768 		xas_unlock_irq(&xas);
1769 		if (!xas_nomem(&xas, GFP_KERNEL)) {
1770 			result = SCAN_FAIL;
1771 			goto out;
1772 		}
1773 	} while (1);
1774 
1775 	__SetPageLocked(hpage);
1776 	if (is_shmem)
1777 		__SetPageSwapBacked(hpage);
1778 	hpage->index = start;
1779 	hpage->mapping = mapping;
1780 
1781 	/*
1782 	 * At this point the hpage is locked and not up-to-date.
1783 	 * It's safe to insert it into the page cache, because nobody would
1784 	 * be able to map it or use it in another way until we unlock it.
1785 	 */
1786 
1787 	xas_set(&xas, start);
1788 	for (index = start; index < end; index++) {
1789 		struct page *page = xas_next(&xas);
1790 		struct folio *folio;
1791 
1792 		VM_BUG_ON(index != xas.xa_index);
1793 		if (is_shmem) {
1794 			if (!page) {
1795 				/*
1796 				 * Stop if extent has been truncated or
1797 				 * hole-punched, and is now completely
1798 				 * empty.
1799 				 */
1800 				if (index == start) {
1801 					if (!xas_next_entry(&xas, end - 1)) {
1802 						result = SCAN_TRUNCATED;
1803 						goto xa_locked;
1804 					}
1805 					xas_set(&xas, index);
1806 				}
1807 				if (!shmem_charge(mapping->host, 1)) {
1808 					result = SCAN_FAIL;
1809 					goto xa_locked;
1810 				}
1811 				xas_store(&xas, hpage);
1812 				nr_none++;
1813 				continue;
1814 			}
1815 
1816 			if (xa_is_value(page) || !PageUptodate(page)) {
1817 				xas_unlock_irq(&xas);
1818 				/* swap in or instantiate fallocated page */
1819 				if (shmem_get_folio(mapping->host, index,
1820 						&folio, SGP_NOALLOC)) {
1821 					result = SCAN_FAIL;
1822 					goto xa_unlocked;
1823 				}
1824 				page = folio_file_page(folio, index);
1825 			} else if (trylock_page(page)) {
1826 				get_page(page);
1827 				xas_unlock_irq(&xas);
1828 			} else {
1829 				result = SCAN_PAGE_LOCK;
1830 				goto xa_locked;
1831 			}
1832 		} else {	/* !is_shmem */
1833 			if (!page || xa_is_value(page)) {
1834 				xas_unlock_irq(&xas);
1835 				page_cache_sync_readahead(mapping, &file->f_ra,
1836 							  file, index,
1837 							  end - index);
1838 				/* drain pagevecs to help isolate_lru_page() */
1839 				lru_add_drain();
1840 				page = find_lock_page(mapping, index);
1841 				if (unlikely(page == NULL)) {
1842 					result = SCAN_FAIL;
1843 					goto xa_unlocked;
1844 				}
1845 			} else if (PageDirty(page)) {
1846 				/*
1847 				 * khugepaged only works on read-only fd,
1848 				 * so this page is dirty because it hasn't
1849 				 * been flushed since first write. There
1850 				 * won't be new dirty pages.
1851 				 *
1852 				 * Trigger async flush here and hope the
1853 				 * writeback is done when khugepaged
1854 				 * revisits this page.
1855 				 *
1856 				 * This is a one-off situation. We are not
1857 				 * forcing writeback in loop.
1858 				 */
1859 				xas_unlock_irq(&xas);
1860 				filemap_flush(mapping);
1861 				result = SCAN_FAIL;
1862 				goto xa_unlocked;
1863 			} else if (PageWriteback(page)) {
1864 				xas_unlock_irq(&xas);
1865 				result = SCAN_FAIL;
1866 				goto xa_unlocked;
1867 			} else if (trylock_page(page)) {
1868 				get_page(page);
1869 				xas_unlock_irq(&xas);
1870 			} else {
1871 				result = SCAN_PAGE_LOCK;
1872 				goto xa_locked;
1873 			}
1874 		}
1875 
1876 		/*
1877 		 * The page must be locked, so we can drop the i_pages lock
1878 		 * without racing with truncate.
1879 		 */
1880 		VM_BUG_ON_PAGE(!PageLocked(page), page);
1881 
1882 		/* make sure the page is up to date */
1883 		if (unlikely(!PageUptodate(page))) {
1884 			result = SCAN_FAIL;
1885 			goto out_unlock;
1886 		}
1887 
1888 		/*
1889 		 * If file was truncated then extended, or hole-punched, before
1890 		 * we locked the first page, then a THP might be there already.
1891 		 * This will be discovered on the first iteration.
1892 		 */
1893 		if (PageTransCompound(page)) {
1894 			struct page *head = compound_head(page);
1895 
1896 			result = compound_order(head) == HPAGE_PMD_ORDER &&
1897 					head->index == start
1898 					/* Maybe PMD-mapped */
1899 					? SCAN_PTE_MAPPED_HUGEPAGE
1900 					: SCAN_PAGE_COMPOUND;
1901 			goto out_unlock;
1902 		}
1903 
1904 		folio = page_folio(page);
1905 
1906 		if (folio_mapping(folio) != mapping) {
1907 			result = SCAN_TRUNCATED;
1908 			goto out_unlock;
1909 		}
1910 
1911 		if (!is_shmem && (folio_test_dirty(folio) ||
1912 				  folio_test_writeback(folio))) {
1913 			/*
1914 			 * khugepaged only works on read-only fd, so this
1915 			 * page is dirty because it hasn't been flushed
1916 			 * since first write.
1917 			 */
1918 			result = SCAN_FAIL;
1919 			goto out_unlock;
1920 		}
1921 
1922 		if (folio_isolate_lru(folio)) {
1923 			result = SCAN_DEL_PAGE_LRU;
1924 			goto out_unlock;
1925 		}
1926 
1927 		if (folio_has_private(folio) &&
1928 		    !filemap_release_folio(folio, GFP_KERNEL)) {
1929 			result = SCAN_PAGE_HAS_PRIVATE;
1930 			folio_putback_lru(folio);
1931 			goto out_unlock;
1932 		}
1933 
1934 		if (folio_mapped(folio))
1935 			try_to_unmap(folio,
1936 					TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
1937 
1938 		xas_lock_irq(&xas);
1939 		xas_set(&xas, index);
1940 
1941 		VM_BUG_ON_PAGE(page != xas_load(&xas), page);
1942 
1943 		/*
1944 		 * The page is expected to have page_count() == 3:
1945 		 *  - we hold a pin on it;
1946 		 *  - one reference from page cache;
1947 		 *  - one from isolate_lru_page;
1948 		 */
1949 		if (!page_ref_freeze(page, 3)) {
1950 			result = SCAN_PAGE_COUNT;
1951 			xas_unlock_irq(&xas);
1952 			putback_lru_page(page);
1953 			goto out_unlock;
1954 		}
1955 
1956 		/*
1957 		 * Add the page to the list to be able to undo the collapse if
1958 		 * something go wrong.
1959 		 */
1960 		list_add_tail(&page->lru, &pagelist);
1961 
1962 		/* Finally, replace with the new page. */
1963 		xas_store(&xas, hpage);
1964 		continue;
1965 out_unlock:
1966 		unlock_page(page);
1967 		put_page(page);
1968 		goto xa_unlocked;
1969 	}
1970 	nr = thp_nr_pages(hpage);
1971 
1972 	if (is_shmem)
1973 		__mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
1974 	else {
1975 		__mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
1976 		filemap_nr_thps_inc(mapping);
1977 		/*
1978 		 * Paired with smp_mb() in do_dentry_open() to ensure
1979 		 * i_writecount is up to date and the update to nr_thps is
1980 		 * visible. Ensures the page cache will be truncated if the
1981 		 * file is opened writable.
1982 		 */
1983 		smp_mb();
1984 		if (inode_is_open_for_write(mapping->host)) {
1985 			result = SCAN_FAIL;
1986 			__mod_lruvec_page_state(hpage, NR_FILE_THPS, -nr);
1987 			filemap_nr_thps_dec(mapping);
1988 			goto xa_locked;
1989 		}
1990 	}
1991 
1992 	if (nr_none) {
1993 		__mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
1994 		/* nr_none is always 0 for non-shmem. */
1995 		__mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
1996 	}
1997 
1998 	/* Join all the small entries into a single multi-index entry */
1999 	xas_set_order(&xas, start, HPAGE_PMD_ORDER);
2000 	xas_store(&xas, hpage);
2001 xa_locked:
2002 	xas_unlock_irq(&xas);
2003 xa_unlocked:
2004 
2005 	/*
2006 	 * If collapse is successful, flush must be done now before copying.
2007 	 * If collapse is unsuccessful, does flush actually need to be done?
2008 	 * Do it anyway, to clear the state.
2009 	 */
2010 	try_to_unmap_flush();
2011 
2012 	if (result == SCAN_SUCCEED) {
2013 		struct page *page, *tmp;
2014 		struct folio *folio;
2015 
2016 		/*
2017 		 * Replacing old pages with new one has succeeded, now we
2018 		 * need to copy the content and free the old pages.
2019 		 */
2020 		index = start;
2021 		list_for_each_entry_safe(page, tmp, &pagelist, lru) {
2022 			while (index < page->index) {
2023 				clear_highpage(hpage + (index % HPAGE_PMD_NR));
2024 				index++;
2025 			}
2026 			copy_highpage(hpage + (page->index % HPAGE_PMD_NR),
2027 				      page);
2028 			list_del(&page->lru);
2029 			page->mapping = NULL;
2030 			page_ref_unfreeze(page, 1);
2031 			ClearPageActive(page);
2032 			ClearPageUnevictable(page);
2033 			unlock_page(page);
2034 			put_page(page);
2035 			index++;
2036 		}
2037 		while (index < end) {
2038 			clear_highpage(hpage + (index % HPAGE_PMD_NR));
2039 			index++;
2040 		}
2041 
2042 		folio = page_folio(hpage);
2043 		folio_mark_uptodate(folio);
2044 		folio_ref_add(folio, HPAGE_PMD_NR - 1);
2045 
2046 		if (is_shmem)
2047 			folio_mark_dirty(folio);
2048 		folio_add_lru(folio);
2049 
2050 		/*
2051 		 * Remove pte page tables, so we can re-fault the page as huge.
2052 		 */
2053 		result = retract_page_tables(mapping, start, mm, addr, hpage,
2054 					     cc);
2055 		unlock_page(hpage);
2056 		hpage = NULL;
2057 	} else {
2058 		struct page *page;
2059 
2060 		/* Something went wrong: roll back page cache changes */
2061 		xas_lock_irq(&xas);
2062 		if (nr_none) {
2063 			mapping->nrpages -= nr_none;
2064 			shmem_uncharge(mapping->host, nr_none);
2065 		}
2066 
2067 		xas_set(&xas, start);
2068 		xas_for_each(&xas, page, end - 1) {
2069 			page = list_first_entry_or_null(&pagelist,
2070 					struct page, lru);
2071 			if (!page || xas.xa_index < page->index) {
2072 				if (!nr_none)
2073 					break;
2074 				nr_none--;
2075 				/* Put holes back where they were */
2076 				xas_store(&xas, NULL);
2077 				continue;
2078 			}
2079 
2080 			VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
2081 
2082 			/* Unfreeze the page. */
2083 			list_del(&page->lru);
2084 			page_ref_unfreeze(page, 2);
2085 			xas_store(&xas, page);
2086 			xas_pause(&xas);
2087 			xas_unlock_irq(&xas);
2088 			unlock_page(page);
2089 			putback_lru_page(page);
2090 			xas_lock_irq(&xas);
2091 		}
2092 		VM_BUG_ON(nr_none);
2093 		xas_unlock_irq(&xas);
2094 
2095 		hpage->mapping = NULL;
2096 	}
2097 
2098 	if (hpage)
2099 		unlock_page(hpage);
2100 out:
2101 	VM_BUG_ON(!list_empty(&pagelist));
2102 	if (hpage) {
2103 		mem_cgroup_uncharge(page_folio(hpage));
2104 		put_page(hpage);
2105 	}
2106 
2107 	trace_mm_khugepaged_collapse_file(mm, hpage, index, is_shmem, addr, file, nr, result);
2108 	return result;
2109 }
2110 
2111 static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2112 				    struct file *file, pgoff_t start,
2113 				    struct collapse_control *cc)
2114 {
2115 	struct page *page = NULL;
2116 	struct address_space *mapping = file->f_mapping;
2117 	XA_STATE(xas, &mapping->i_pages, start);
2118 	int present, swap;
2119 	int node = NUMA_NO_NODE;
2120 	int result = SCAN_SUCCEED;
2121 
2122 	present = 0;
2123 	swap = 0;
2124 	memset(cc->node_load, 0, sizeof(cc->node_load));
2125 	nodes_clear(cc->alloc_nmask);
2126 	rcu_read_lock();
2127 	xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
2128 		if (xas_retry(&xas, page))
2129 			continue;
2130 
2131 		if (xa_is_value(page)) {
2132 			++swap;
2133 			if (cc->is_khugepaged &&
2134 			    swap > khugepaged_max_ptes_swap) {
2135 				result = SCAN_EXCEED_SWAP_PTE;
2136 				count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
2137 				break;
2138 			}
2139 			continue;
2140 		}
2141 
2142 		/*
2143 		 * TODO: khugepaged should compact smaller compound pages
2144 		 * into a PMD sized page
2145 		 */
2146 		if (PageTransCompound(page)) {
2147 			struct page *head = compound_head(page);
2148 
2149 			result = compound_order(head) == HPAGE_PMD_ORDER &&
2150 					head->index == start
2151 					/* Maybe PMD-mapped */
2152 					? SCAN_PTE_MAPPED_HUGEPAGE
2153 					: SCAN_PAGE_COMPOUND;
2154 			/*
2155 			 * For SCAN_PTE_MAPPED_HUGEPAGE, further processing
2156 			 * by the caller won't touch the page cache, and so
2157 			 * it's safe to skip LRU and refcount checks before
2158 			 * returning.
2159 			 */
2160 			break;
2161 		}
2162 
2163 		node = page_to_nid(page);
2164 		if (hpage_collapse_scan_abort(node, cc)) {
2165 			result = SCAN_SCAN_ABORT;
2166 			break;
2167 		}
2168 		cc->node_load[node]++;
2169 
2170 		if (!PageLRU(page)) {
2171 			result = SCAN_PAGE_LRU;
2172 			break;
2173 		}
2174 
2175 		if (page_count(page) !=
2176 		    1 + page_mapcount(page) + page_has_private(page)) {
2177 			result = SCAN_PAGE_COUNT;
2178 			break;
2179 		}
2180 
2181 		/*
2182 		 * We probably should check if the page is referenced here, but
2183 		 * nobody would transfer pte_young() to PageReferenced() for us.
2184 		 * And rmap walk here is just too costly...
2185 		 */
2186 
2187 		present++;
2188 
2189 		if (need_resched()) {
2190 			xas_pause(&xas);
2191 			cond_resched_rcu();
2192 		}
2193 	}
2194 	rcu_read_unlock();
2195 
2196 	if (result == SCAN_SUCCEED) {
2197 		if (cc->is_khugepaged &&
2198 		    present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2199 			result = SCAN_EXCEED_NONE_PTE;
2200 			count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
2201 		} else {
2202 			result = collapse_file(mm, addr, file, start, cc);
2203 		}
2204 	}
2205 
2206 	trace_mm_khugepaged_scan_file(mm, page, file, present, swap, result);
2207 	return result;
2208 }
2209 #else
2210 static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2211 				    struct file *file, pgoff_t start,
2212 				    struct collapse_control *cc)
2213 {
2214 	BUILD_BUG();
2215 }
2216 
2217 static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot)
2218 {
2219 }
2220 
2221 static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
2222 					  unsigned long addr)
2223 {
2224 	return false;
2225 }
2226 #endif
2227 
2228 static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
2229 					    struct collapse_control *cc)
2230 	__releases(&khugepaged_mm_lock)
2231 	__acquires(&khugepaged_mm_lock)
2232 {
2233 	struct vma_iterator vmi;
2234 	struct khugepaged_mm_slot *mm_slot;
2235 	struct mm_slot *slot;
2236 	struct mm_struct *mm;
2237 	struct vm_area_struct *vma;
2238 	int progress = 0;
2239 
2240 	VM_BUG_ON(!pages);
2241 	lockdep_assert_held(&khugepaged_mm_lock);
2242 	*result = SCAN_FAIL;
2243 
2244 	if (khugepaged_scan.mm_slot) {
2245 		mm_slot = khugepaged_scan.mm_slot;
2246 		slot = &mm_slot->slot;
2247 	} else {
2248 		slot = list_entry(khugepaged_scan.mm_head.next,
2249 				     struct mm_slot, mm_node);
2250 		mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
2251 		khugepaged_scan.address = 0;
2252 		khugepaged_scan.mm_slot = mm_slot;
2253 	}
2254 	spin_unlock(&khugepaged_mm_lock);
2255 	khugepaged_collapse_pte_mapped_thps(mm_slot);
2256 
2257 	mm = slot->mm;
2258 	/*
2259 	 * Don't wait for semaphore (to avoid long wait times).  Just move to
2260 	 * the next mm on the list.
2261 	 */
2262 	vma = NULL;
2263 	if (unlikely(!mmap_read_trylock(mm)))
2264 		goto breakouterloop_mmap_lock;
2265 
2266 	progress++;
2267 	if (unlikely(hpage_collapse_test_exit(mm)))
2268 		goto breakouterloop;
2269 
2270 	vma_iter_init(&vmi, mm, khugepaged_scan.address);
2271 	for_each_vma(vmi, vma) {
2272 		unsigned long hstart, hend;
2273 
2274 		cond_resched();
2275 		if (unlikely(hpage_collapse_test_exit(mm))) {
2276 			progress++;
2277 			break;
2278 		}
2279 		if (!hugepage_vma_check(vma, vma->vm_flags, false, false, true)) {
2280 skip:
2281 			progress++;
2282 			continue;
2283 		}
2284 		hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE);
2285 		hend = round_down(vma->vm_end, HPAGE_PMD_SIZE);
2286 		if (khugepaged_scan.address > hend)
2287 			goto skip;
2288 		if (khugepaged_scan.address < hstart)
2289 			khugepaged_scan.address = hstart;
2290 		VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2291 
2292 		while (khugepaged_scan.address < hend) {
2293 			bool mmap_locked = true;
2294 
2295 			cond_resched();
2296 			if (unlikely(hpage_collapse_test_exit(mm)))
2297 				goto breakouterloop;
2298 
2299 			VM_BUG_ON(khugepaged_scan.address < hstart ||
2300 				  khugepaged_scan.address + HPAGE_PMD_SIZE >
2301 				  hend);
2302 			if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2303 				struct file *file = get_file(vma->vm_file);
2304 				pgoff_t pgoff = linear_page_index(vma,
2305 						khugepaged_scan.address);
2306 
2307 				mmap_read_unlock(mm);
2308 				*result = hpage_collapse_scan_file(mm,
2309 								   khugepaged_scan.address,
2310 								   file, pgoff, cc);
2311 				mmap_locked = false;
2312 				fput(file);
2313 			} else {
2314 				*result = hpage_collapse_scan_pmd(mm, vma,
2315 								  khugepaged_scan.address,
2316 								  &mmap_locked,
2317 								  cc);
2318 			}
2319 			switch (*result) {
2320 			case SCAN_PTE_MAPPED_HUGEPAGE: {
2321 				pmd_t *pmd;
2322 
2323 				*result = find_pmd_or_thp_or_none(mm,
2324 								  khugepaged_scan.address,
2325 								  &pmd);
2326 				if (*result != SCAN_SUCCEED)
2327 					break;
2328 				if (!khugepaged_add_pte_mapped_thp(mm,
2329 								   khugepaged_scan.address))
2330 					break;
2331 			} fallthrough;
2332 			case SCAN_SUCCEED:
2333 				++khugepaged_pages_collapsed;
2334 				break;
2335 			default:
2336 				break;
2337 			}
2338 
2339 			/* move to next address */
2340 			khugepaged_scan.address += HPAGE_PMD_SIZE;
2341 			progress += HPAGE_PMD_NR;
2342 			if (!mmap_locked)
2343 				/*
2344 				 * We released mmap_lock so break loop.  Note
2345 				 * that we drop mmap_lock before all hugepage
2346 				 * allocations, so if allocation fails, we are
2347 				 * guaranteed to break here and report the
2348 				 * correct result back to caller.
2349 				 */
2350 				goto breakouterloop_mmap_lock;
2351 			if (progress >= pages)
2352 				goto breakouterloop;
2353 		}
2354 	}
2355 breakouterloop:
2356 	mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
2357 breakouterloop_mmap_lock:
2358 
2359 	spin_lock(&khugepaged_mm_lock);
2360 	VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2361 	/*
2362 	 * Release the current mm_slot if this mm is about to die, or
2363 	 * if we scanned all vmas of this mm.
2364 	 */
2365 	if (hpage_collapse_test_exit(mm) || !vma) {
2366 		/*
2367 		 * Make sure that if mm_users is reaching zero while
2368 		 * khugepaged runs here, khugepaged_exit will find
2369 		 * mm_slot not pointing to the exiting mm.
2370 		 */
2371 		if (slot->mm_node.next != &khugepaged_scan.mm_head) {
2372 			slot = list_entry(slot->mm_node.next,
2373 					  struct mm_slot, mm_node);
2374 			khugepaged_scan.mm_slot =
2375 				mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
2376 			khugepaged_scan.address = 0;
2377 		} else {
2378 			khugepaged_scan.mm_slot = NULL;
2379 			khugepaged_full_scans++;
2380 		}
2381 
2382 		collect_mm_slot(mm_slot);
2383 	}
2384 
2385 	return progress;
2386 }
2387 
2388 static int khugepaged_has_work(void)
2389 {
2390 	return !list_empty(&khugepaged_scan.mm_head) &&
2391 		hugepage_flags_enabled();
2392 }
2393 
2394 static int khugepaged_wait_event(void)
2395 {
2396 	return !list_empty(&khugepaged_scan.mm_head) ||
2397 		kthread_should_stop();
2398 }
2399 
2400 static void khugepaged_do_scan(struct collapse_control *cc)
2401 {
2402 	unsigned int progress = 0, pass_through_head = 0;
2403 	unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
2404 	bool wait = true;
2405 	int result = SCAN_SUCCEED;
2406 
2407 	lru_add_drain_all();
2408 
2409 	while (true) {
2410 		cond_resched();
2411 
2412 		if (unlikely(kthread_should_stop() || try_to_freeze()))
2413 			break;
2414 
2415 		spin_lock(&khugepaged_mm_lock);
2416 		if (!khugepaged_scan.mm_slot)
2417 			pass_through_head++;
2418 		if (khugepaged_has_work() &&
2419 		    pass_through_head < 2)
2420 			progress += khugepaged_scan_mm_slot(pages - progress,
2421 							    &result, cc);
2422 		else
2423 			progress = pages;
2424 		spin_unlock(&khugepaged_mm_lock);
2425 
2426 		if (progress >= pages)
2427 			break;
2428 
2429 		if (result == SCAN_ALLOC_HUGE_PAGE_FAIL) {
2430 			/*
2431 			 * If fail to allocate the first time, try to sleep for
2432 			 * a while.  When hit again, cancel the scan.
2433 			 */
2434 			if (!wait)
2435 				break;
2436 			wait = false;
2437 			khugepaged_alloc_sleep();
2438 		}
2439 	}
2440 }
2441 
2442 static bool khugepaged_should_wakeup(void)
2443 {
2444 	return kthread_should_stop() ||
2445 	       time_after_eq(jiffies, khugepaged_sleep_expire);
2446 }
2447 
2448 static void khugepaged_wait_work(void)
2449 {
2450 	if (khugepaged_has_work()) {
2451 		const unsigned long scan_sleep_jiffies =
2452 			msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2453 
2454 		if (!scan_sleep_jiffies)
2455 			return;
2456 
2457 		khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2458 		wait_event_freezable_timeout(khugepaged_wait,
2459 					     khugepaged_should_wakeup(),
2460 					     scan_sleep_jiffies);
2461 		return;
2462 	}
2463 
2464 	if (hugepage_flags_enabled())
2465 		wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2466 }
2467 
2468 static int khugepaged(void *none)
2469 {
2470 	struct khugepaged_mm_slot *mm_slot;
2471 
2472 	set_freezable();
2473 	set_user_nice(current, MAX_NICE);
2474 
2475 	while (!kthread_should_stop()) {
2476 		khugepaged_do_scan(&khugepaged_collapse_control);
2477 		khugepaged_wait_work();
2478 	}
2479 
2480 	spin_lock(&khugepaged_mm_lock);
2481 	mm_slot = khugepaged_scan.mm_slot;
2482 	khugepaged_scan.mm_slot = NULL;
2483 	if (mm_slot)
2484 		collect_mm_slot(mm_slot);
2485 	spin_unlock(&khugepaged_mm_lock);
2486 	return 0;
2487 }
2488 
2489 static void set_recommended_min_free_kbytes(void)
2490 {
2491 	struct zone *zone;
2492 	int nr_zones = 0;
2493 	unsigned long recommended_min;
2494 
2495 	if (!hugepage_flags_enabled()) {
2496 		calculate_min_free_kbytes();
2497 		goto update_wmarks;
2498 	}
2499 
2500 	for_each_populated_zone(zone) {
2501 		/*
2502 		 * We don't need to worry about fragmentation of
2503 		 * ZONE_MOVABLE since it only has movable pages.
2504 		 */
2505 		if (zone_idx(zone) > gfp_zone(GFP_USER))
2506 			continue;
2507 
2508 		nr_zones++;
2509 	}
2510 
2511 	/* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2512 	recommended_min = pageblock_nr_pages * nr_zones * 2;
2513 
2514 	/*
2515 	 * Make sure that on average at least two pageblocks are almost free
2516 	 * of another type, one for a migratetype to fall back to and a
2517 	 * second to avoid subsequent fallbacks of other types There are 3
2518 	 * MIGRATE_TYPES we care about.
2519 	 */
2520 	recommended_min += pageblock_nr_pages * nr_zones *
2521 			   MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2522 
2523 	/* don't ever allow to reserve more than 5% of the lowmem */
2524 	recommended_min = min(recommended_min,
2525 			      (unsigned long) nr_free_buffer_pages() / 20);
2526 	recommended_min <<= (PAGE_SHIFT-10);
2527 
2528 	if (recommended_min > min_free_kbytes) {
2529 		if (user_min_free_kbytes >= 0)
2530 			pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2531 				min_free_kbytes, recommended_min);
2532 
2533 		min_free_kbytes = recommended_min;
2534 	}
2535 
2536 update_wmarks:
2537 	setup_per_zone_wmarks();
2538 }
2539 
2540 int start_stop_khugepaged(void)
2541 {
2542 	int err = 0;
2543 
2544 	mutex_lock(&khugepaged_mutex);
2545 	if (hugepage_flags_enabled()) {
2546 		if (!khugepaged_thread)
2547 			khugepaged_thread = kthread_run(khugepaged, NULL,
2548 							"khugepaged");
2549 		if (IS_ERR(khugepaged_thread)) {
2550 			pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2551 			err = PTR_ERR(khugepaged_thread);
2552 			khugepaged_thread = NULL;
2553 			goto fail;
2554 		}
2555 
2556 		if (!list_empty(&khugepaged_scan.mm_head))
2557 			wake_up_interruptible(&khugepaged_wait);
2558 	} else if (khugepaged_thread) {
2559 		kthread_stop(khugepaged_thread);
2560 		khugepaged_thread = NULL;
2561 	}
2562 	set_recommended_min_free_kbytes();
2563 fail:
2564 	mutex_unlock(&khugepaged_mutex);
2565 	return err;
2566 }
2567 
2568 void khugepaged_min_free_kbytes_update(void)
2569 {
2570 	mutex_lock(&khugepaged_mutex);
2571 	if (hugepage_flags_enabled() && khugepaged_thread)
2572 		set_recommended_min_free_kbytes();
2573 	mutex_unlock(&khugepaged_mutex);
2574 }
2575 
2576 bool current_is_khugepaged(void)
2577 {
2578 	return kthread_func(current) == khugepaged;
2579 }
2580 
2581 static int madvise_collapse_errno(enum scan_result r)
2582 {
2583 	/*
2584 	 * MADV_COLLAPSE breaks from existing madvise(2) conventions to provide
2585 	 * actionable feedback to caller, so they may take an appropriate
2586 	 * fallback measure depending on the nature of the failure.
2587 	 */
2588 	switch (r) {
2589 	case SCAN_ALLOC_HUGE_PAGE_FAIL:
2590 		return -ENOMEM;
2591 	case SCAN_CGROUP_CHARGE_FAIL:
2592 		return -EBUSY;
2593 	/* Resource temporary unavailable - trying again might succeed */
2594 	case SCAN_PAGE_LOCK:
2595 	case SCAN_PAGE_LRU:
2596 	case SCAN_DEL_PAGE_LRU:
2597 		return -EAGAIN;
2598 	/*
2599 	 * Other: Trying again likely not to succeed / error intrinsic to
2600 	 * specified memory range. khugepaged likely won't be able to collapse
2601 	 * either.
2602 	 */
2603 	default:
2604 		return -EINVAL;
2605 	}
2606 }
2607 
2608 int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
2609 		     unsigned long start, unsigned long end)
2610 {
2611 	struct collapse_control *cc;
2612 	struct mm_struct *mm = vma->vm_mm;
2613 	unsigned long hstart, hend, addr;
2614 	int thps = 0, last_fail = SCAN_FAIL;
2615 	bool mmap_locked = true;
2616 
2617 	BUG_ON(vma->vm_start > start);
2618 	BUG_ON(vma->vm_end < end);
2619 
2620 	*prev = vma;
2621 
2622 	if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
2623 		return -EINVAL;
2624 
2625 	cc = kmalloc(sizeof(*cc), GFP_KERNEL);
2626 	if (!cc)
2627 		return -ENOMEM;
2628 	cc->is_khugepaged = false;
2629 
2630 	mmgrab(mm);
2631 	lru_add_drain_all();
2632 
2633 	hstart = (start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2634 	hend = end & HPAGE_PMD_MASK;
2635 
2636 	for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) {
2637 		int result = SCAN_FAIL;
2638 
2639 		if (!mmap_locked) {
2640 			cond_resched();
2641 			mmap_read_lock(mm);
2642 			mmap_locked = true;
2643 			result = hugepage_vma_revalidate(mm, addr, false, &vma,
2644 							 cc);
2645 			if (result  != SCAN_SUCCEED) {
2646 				last_fail = result;
2647 				goto out_nolock;
2648 			}
2649 
2650 			hend = min(hend, vma->vm_end & HPAGE_PMD_MASK);
2651 		}
2652 		mmap_assert_locked(mm);
2653 		memset(cc->node_load, 0, sizeof(cc->node_load));
2654 		nodes_clear(cc->alloc_nmask);
2655 		if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2656 			struct file *file = get_file(vma->vm_file);
2657 			pgoff_t pgoff = linear_page_index(vma, addr);
2658 
2659 			mmap_read_unlock(mm);
2660 			mmap_locked = false;
2661 			result = hpage_collapse_scan_file(mm, addr, file, pgoff,
2662 							  cc);
2663 			fput(file);
2664 		} else {
2665 			result = hpage_collapse_scan_pmd(mm, vma, addr,
2666 							 &mmap_locked, cc);
2667 		}
2668 		if (!mmap_locked)
2669 			*prev = NULL;  /* Tell caller we dropped mmap_lock */
2670 
2671 handle_result:
2672 		switch (result) {
2673 		case SCAN_SUCCEED:
2674 		case SCAN_PMD_MAPPED:
2675 			++thps;
2676 			break;
2677 		case SCAN_PTE_MAPPED_HUGEPAGE:
2678 			BUG_ON(mmap_locked);
2679 			BUG_ON(*prev);
2680 			mmap_write_lock(mm);
2681 			result = collapse_pte_mapped_thp(mm, addr, true);
2682 			mmap_write_unlock(mm);
2683 			goto handle_result;
2684 		/* Whitelisted set of results where continuing OK */
2685 		case SCAN_PMD_NULL:
2686 		case SCAN_PTE_NON_PRESENT:
2687 		case SCAN_PTE_UFFD_WP:
2688 		case SCAN_PAGE_RO:
2689 		case SCAN_LACK_REFERENCED_PAGE:
2690 		case SCAN_PAGE_NULL:
2691 		case SCAN_PAGE_COUNT:
2692 		case SCAN_PAGE_LOCK:
2693 		case SCAN_PAGE_COMPOUND:
2694 		case SCAN_PAGE_LRU:
2695 		case SCAN_DEL_PAGE_LRU:
2696 			last_fail = result;
2697 			break;
2698 		default:
2699 			last_fail = result;
2700 			/* Other error, exit */
2701 			goto out_maybelock;
2702 		}
2703 	}
2704 
2705 out_maybelock:
2706 	/* Caller expects us to hold mmap_lock on return */
2707 	if (!mmap_locked)
2708 		mmap_read_lock(mm);
2709 out_nolock:
2710 	mmap_assert_locked(mm);
2711 	mmdrop(mm);
2712 	kfree(cc);
2713 
2714 	return thps == ((hend - hstart) >> HPAGE_PMD_SHIFT) ? 0
2715 			: madvise_collapse_errno(last_fail);
2716 }
2717