xref: /linux/mm/khugepaged.c (revision f528260b1a7d52140dfeb58857e13fc98ac193ef)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2b46e756fSKirill A. Shutemov #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3b46e756fSKirill A. Shutemov 
4b46e756fSKirill A. Shutemov #include <linux/mm.h>
5b46e756fSKirill A. Shutemov #include <linux/sched.h>
66e84f315SIngo Molnar #include <linux/sched/mm.h>
7f7ccbae4SIngo Molnar #include <linux/sched/coredump.h>
8b46e756fSKirill A. Shutemov #include <linux/mmu_notifier.h>
9b46e756fSKirill A. Shutemov #include <linux/rmap.h>
10b46e756fSKirill A. Shutemov #include <linux/swap.h>
11b46e756fSKirill A. Shutemov #include <linux/mm_inline.h>
12b46e756fSKirill A. Shutemov #include <linux/kthread.h>
13b46e756fSKirill A. Shutemov #include <linux/khugepaged.h>
14b46e756fSKirill A. Shutemov #include <linux/freezer.h>
15b46e756fSKirill A. Shutemov #include <linux/mman.h>
16b46e756fSKirill A. Shutemov #include <linux/hashtable.h>
17b46e756fSKirill A. Shutemov #include <linux/userfaultfd_k.h>
18b46e756fSKirill A. Shutemov #include <linux/page_idle.h>
1980110bbfSPasha Tatashin #include <linux/page_table_check.h>
20b46e756fSKirill A. Shutemov #include <linux/swapops.h>
21f3f0e1d2SKirill A. Shutemov #include <linux/shmem_fs.h>
22b46e756fSKirill A. Shutemov 
23b46e756fSKirill A. Shutemov #include <asm/tlb.h>
24b46e756fSKirill A. Shutemov #include <asm/pgalloc.h>
25b46e756fSKirill A. Shutemov #include "internal.h"
26b26e2701SQi Zheng #include "mm_slot.h"
27b46e756fSKirill A. Shutemov 
28b46e756fSKirill A. Shutemov enum scan_result {
29b46e756fSKirill A. Shutemov 	SCAN_FAIL,
30b46e756fSKirill A. Shutemov 	SCAN_SUCCEED,
31b46e756fSKirill A. Shutemov 	SCAN_PMD_NULL,
3234488399SZach O'Keefe 	SCAN_PMD_NONE,
3350722804SZach O'Keefe 	SCAN_PMD_MAPPED,
34b46e756fSKirill A. Shutemov 	SCAN_EXCEED_NONE_PTE,
3571a2c112SKirill A. Shutemov 	SCAN_EXCEED_SWAP_PTE,
3671a2c112SKirill A. Shutemov 	SCAN_EXCEED_SHARED_PTE,
37b46e756fSKirill A. Shutemov 	SCAN_PTE_NON_PRESENT,
38e1e267c7SPeter Xu 	SCAN_PTE_UFFD_WP,
3958ac9a89SZach O'Keefe 	SCAN_PTE_MAPPED_HUGEPAGE,
40b46e756fSKirill A. Shutemov 	SCAN_PAGE_RO,
410db501f7SEbru Akagunduz 	SCAN_LACK_REFERENCED_PAGE,
42b46e756fSKirill A. Shutemov 	SCAN_PAGE_NULL,
43b46e756fSKirill A. Shutemov 	SCAN_SCAN_ABORT,
44b46e756fSKirill A. Shutemov 	SCAN_PAGE_COUNT,
45b46e756fSKirill A. Shutemov 	SCAN_PAGE_LRU,
46b46e756fSKirill A. Shutemov 	SCAN_PAGE_LOCK,
47b46e756fSKirill A. Shutemov 	SCAN_PAGE_ANON,
48b46e756fSKirill A. Shutemov 	SCAN_PAGE_COMPOUND,
49b46e756fSKirill A. Shutemov 	SCAN_ANY_PROCESS,
50b46e756fSKirill A. Shutemov 	SCAN_VMA_NULL,
51b46e756fSKirill A. Shutemov 	SCAN_VMA_CHECK,
52b46e756fSKirill A. Shutemov 	SCAN_ADDRESS_RANGE,
53b46e756fSKirill A. Shutemov 	SCAN_DEL_PAGE_LRU,
54b46e756fSKirill A. Shutemov 	SCAN_ALLOC_HUGE_PAGE_FAIL,
55b46e756fSKirill A. Shutemov 	SCAN_CGROUP_CHARGE_FAIL,
56f3f0e1d2SKirill A. Shutemov 	SCAN_TRUNCATED,
5799cb0dbdSSong Liu 	SCAN_PAGE_HAS_PRIVATE,
58b46e756fSKirill A. Shutemov };
59b46e756fSKirill A. Shutemov 
60b46e756fSKirill A. Shutemov #define CREATE_TRACE_POINTS
61b46e756fSKirill A. Shutemov #include <trace/events/huge_memory.h>
62b46e756fSKirill A. Shutemov 
634aab2be0SVijay Balakrishna static struct task_struct *khugepaged_thread __read_mostly;
644aab2be0SVijay Balakrishna static DEFINE_MUTEX(khugepaged_mutex);
654aab2be0SVijay Balakrishna 
66b46e756fSKirill A. Shutemov /* default scan 8*512 pte (or vmas) every 30 second */
67b46e756fSKirill A. Shutemov static unsigned int khugepaged_pages_to_scan __read_mostly;
68b46e756fSKirill A. Shutemov static unsigned int khugepaged_pages_collapsed;
69b46e756fSKirill A. Shutemov static unsigned int khugepaged_full_scans;
70b46e756fSKirill A. Shutemov static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
71b46e756fSKirill A. Shutemov /* during fragmentation poll the hugepage allocator once every minute */
72b46e756fSKirill A. Shutemov static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
73b46e756fSKirill A. Shutemov static unsigned long khugepaged_sleep_expire;
74b46e756fSKirill A. Shutemov static DEFINE_SPINLOCK(khugepaged_mm_lock);
75b46e756fSKirill A. Shutemov static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
76b46e756fSKirill A. Shutemov /*
77b46e756fSKirill A. Shutemov  * default collapse hugepages if there is at least one pte mapped like
78b46e756fSKirill A. Shutemov  * it would have happened if the vma was large enough during page
79b46e756fSKirill A. Shutemov  * fault.
80d8ea7cc8SZach O'Keefe  *
81d8ea7cc8SZach O'Keefe  * Note that these are only respected if collapse was initiated by khugepaged.
82b46e756fSKirill A. Shutemov  */
83b46e756fSKirill A. Shutemov static unsigned int khugepaged_max_ptes_none __read_mostly;
84b46e756fSKirill A. Shutemov static unsigned int khugepaged_max_ptes_swap __read_mostly;
8571a2c112SKirill A. Shutemov static unsigned int khugepaged_max_ptes_shared __read_mostly;
86b46e756fSKirill A. Shutemov 
87b46e756fSKirill A. Shutemov #define MM_SLOTS_HASH_BITS 10
88b46e756fSKirill A. Shutemov static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
89b46e756fSKirill A. Shutemov 
90b46e756fSKirill A. Shutemov static struct kmem_cache *mm_slot_cache __read_mostly;
91b46e756fSKirill A. Shutemov 
9227e1f827SSong Liu #define MAX_PTE_MAPPED_THP 8
9327e1f827SSong Liu 
9434d6b470SZach O'Keefe struct collapse_control {
95d8ea7cc8SZach O'Keefe 	bool is_khugepaged;
96d8ea7cc8SZach O'Keefe 
9734d6b470SZach O'Keefe 	/* Num pages scanned per node */
9834d6b470SZach O'Keefe 	u32 node_load[MAX_NUMNODES];
9934d6b470SZach O'Keefe 
100e031ff96SYang Shi 	/* nodemask for allocation fallback */
101e031ff96SYang Shi 	nodemask_t alloc_nmask;
10234d6b470SZach O'Keefe };
10334d6b470SZach O'Keefe 
104b46e756fSKirill A. Shutemov /**
105b26e2701SQi Zheng  * struct khugepaged_mm_slot - khugepaged information per mm that is being scanned
106b26e2701SQi Zheng  * @slot: hash lookup from mm to mm_slot
107336e6b53SAlex Shi  * @nr_pte_mapped_thp: number of pte mapped THP
108336e6b53SAlex Shi  * @pte_mapped_thp: address array corresponding pte mapped THP
109b46e756fSKirill A. Shutemov  */
110b26e2701SQi Zheng struct khugepaged_mm_slot {
111b26e2701SQi Zheng 	struct mm_slot slot;
11227e1f827SSong Liu 
11327e1f827SSong Liu 	/* pte-mapped THP in this mm */
11427e1f827SSong Liu 	int nr_pte_mapped_thp;
11527e1f827SSong Liu 	unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
116b46e756fSKirill A. Shutemov };
117b46e756fSKirill A. Shutemov 
118b46e756fSKirill A. Shutemov /**
119b46e756fSKirill A. Shutemov  * struct khugepaged_scan - cursor for scanning
120b46e756fSKirill A. Shutemov  * @mm_head: the head of the mm list to scan
121b46e756fSKirill A. Shutemov  * @mm_slot: the current mm_slot we are scanning
122b46e756fSKirill A. Shutemov  * @address: the next address inside that to be scanned
123b46e756fSKirill A. Shutemov  *
124b46e756fSKirill A. Shutemov  * There is only the one khugepaged_scan instance of this cursor structure.
125b46e756fSKirill A. Shutemov  */
126b46e756fSKirill A. Shutemov struct khugepaged_scan {
127b46e756fSKirill A. Shutemov 	struct list_head mm_head;
128b26e2701SQi Zheng 	struct khugepaged_mm_slot *mm_slot;
129b46e756fSKirill A. Shutemov 	unsigned long address;
130b46e756fSKirill A. Shutemov };
131b46e756fSKirill A. Shutemov 
132b46e756fSKirill A. Shutemov static struct khugepaged_scan khugepaged_scan = {
133b46e756fSKirill A. Shutemov 	.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
134b46e756fSKirill A. Shutemov };
135b46e756fSKirill A. Shutemov 
136e1465d12SJérémy Lefaure #ifdef CONFIG_SYSFS
137b46e756fSKirill A. Shutemov static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
138b46e756fSKirill A. Shutemov 					 struct kobj_attribute *attr,
139b46e756fSKirill A. Shutemov 					 char *buf)
140b46e756fSKirill A. Shutemov {
141ae7a927dSJoe Perches 	return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
142b46e756fSKirill A. Shutemov }
143b46e756fSKirill A. Shutemov 
144b46e756fSKirill A. Shutemov static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
145b46e756fSKirill A. Shutemov 					  struct kobj_attribute *attr,
146b46e756fSKirill A. Shutemov 					  const char *buf, size_t count)
147b46e756fSKirill A. Shutemov {
148dfefd226SAlexey Dobriyan 	unsigned int msecs;
149b46e756fSKirill A. Shutemov 	int err;
150b46e756fSKirill A. Shutemov 
151dfefd226SAlexey Dobriyan 	err = kstrtouint(buf, 10, &msecs);
152dfefd226SAlexey Dobriyan 	if (err)
153b46e756fSKirill A. Shutemov 		return -EINVAL;
154b46e756fSKirill A. Shutemov 
155b46e756fSKirill A. Shutemov 	khugepaged_scan_sleep_millisecs = msecs;
156b46e756fSKirill A. Shutemov 	khugepaged_sleep_expire = 0;
157b46e756fSKirill A. Shutemov 	wake_up_interruptible(&khugepaged_wait);
158b46e756fSKirill A. Shutemov 
159b46e756fSKirill A. Shutemov 	return count;
160b46e756fSKirill A. Shutemov }
161b46e756fSKirill A. Shutemov static struct kobj_attribute scan_sleep_millisecs_attr =
1626dcdc94dSMiaohe Lin 	__ATTR_RW(scan_sleep_millisecs);
163b46e756fSKirill A. Shutemov 
164b46e756fSKirill A. Shutemov static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
165b46e756fSKirill A. Shutemov 					  struct kobj_attribute *attr,
166b46e756fSKirill A. Shutemov 					  char *buf)
167b46e756fSKirill A. Shutemov {
168ae7a927dSJoe Perches 	return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
169b46e756fSKirill A. Shutemov }
170b46e756fSKirill A. Shutemov 
171b46e756fSKirill A. Shutemov static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
172b46e756fSKirill A. Shutemov 					   struct kobj_attribute *attr,
173b46e756fSKirill A. Shutemov 					   const char *buf, size_t count)
174b46e756fSKirill A. Shutemov {
175dfefd226SAlexey Dobriyan 	unsigned int msecs;
176b46e756fSKirill A. Shutemov 	int err;
177b46e756fSKirill A. Shutemov 
178dfefd226SAlexey Dobriyan 	err = kstrtouint(buf, 10, &msecs);
179dfefd226SAlexey Dobriyan 	if (err)
180b46e756fSKirill A. Shutemov 		return -EINVAL;
181b46e756fSKirill A. Shutemov 
182b46e756fSKirill A. Shutemov 	khugepaged_alloc_sleep_millisecs = msecs;
183b46e756fSKirill A. Shutemov 	khugepaged_sleep_expire = 0;
184b46e756fSKirill A. Shutemov 	wake_up_interruptible(&khugepaged_wait);
185b46e756fSKirill A. Shutemov 
186b46e756fSKirill A. Shutemov 	return count;
187b46e756fSKirill A. Shutemov }
188b46e756fSKirill A. Shutemov static struct kobj_attribute alloc_sleep_millisecs_attr =
1896dcdc94dSMiaohe Lin 	__ATTR_RW(alloc_sleep_millisecs);
190b46e756fSKirill A. Shutemov 
191b46e756fSKirill A. Shutemov static ssize_t pages_to_scan_show(struct kobject *kobj,
192b46e756fSKirill A. Shutemov 				  struct kobj_attribute *attr,
193b46e756fSKirill A. Shutemov 				  char *buf)
194b46e756fSKirill A. Shutemov {
195ae7a927dSJoe Perches 	return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
196b46e756fSKirill A. Shutemov }
197b46e756fSKirill A. Shutemov static ssize_t pages_to_scan_store(struct kobject *kobj,
198b46e756fSKirill A. Shutemov 				   struct kobj_attribute *attr,
199b46e756fSKirill A. Shutemov 				   const char *buf, size_t count)
200b46e756fSKirill A. Shutemov {
201dfefd226SAlexey Dobriyan 	unsigned int pages;
202b46e756fSKirill A. Shutemov 	int err;
203b46e756fSKirill A. Shutemov 
204dfefd226SAlexey Dobriyan 	err = kstrtouint(buf, 10, &pages);
205dfefd226SAlexey Dobriyan 	if (err || !pages)
206b46e756fSKirill A. Shutemov 		return -EINVAL;
207b46e756fSKirill A. Shutemov 
208b46e756fSKirill A. Shutemov 	khugepaged_pages_to_scan = pages;
209b46e756fSKirill A. Shutemov 
210b46e756fSKirill A. Shutemov 	return count;
211b46e756fSKirill A. Shutemov }
212b46e756fSKirill A. Shutemov static struct kobj_attribute pages_to_scan_attr =
2136dcdc94dSMiaohe Lin 	__ATTR_RW(pages_to_scan);
214b46e756fSKirill A. Shutemov 
215b46e756fSKirill A. Shutemov static ssize_t pages_collapsed_show(struct kobject *kobj,
216b46e756fSKirill A. Shutemov 				    struct kobj_attribute *attr,
217b46e756fSKirill A. Shutemov 				    char *buf)
218b46e756fSKirill A. Shutemov {
219ae7a927dSJoe Perches 	return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
220b46e756fSKirill A. Shutemov }
221b46e756fSKirill A. Shutemov static struct kobj_attribute pages_collapsed_attr =
222b46e756fSKirill A. Shutemov 	__ATTR_RO(pages_collapsed);
223b46e756fSKirill A. Shutemov 
224b46e756fSKirill A. Shutemov static ssize_t full_scans_show(struct kobject *kobj,
225b46e756fSKirill A. Shutemov 			       struct kobj_attribute *attr,
226b46e756fSKirill A. Shutemov 			       char *buf)
227b46e756fSKirill A. Shutemov {
228ae7a927dSJoe Perches 	return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
229b46e756fSKirill A. Shutemov }
230b46e756fSKirill A. Shutemov static struct kobj_attribute full_scans_attr =
231b46e756fSKirill A. Shutemov 	__ATTR_RO(full_scans);
232b46e756fSKirill A. Shutemov 
2336dcdc94dSMiaohe Lin static ssize_t defrag_show(struct kobject *kobj,
234b46e756fSKirill A. Shutemov 			   struct kobj_attribute *attr, char *buf)
235b46e756fSKirill A. Shutemov {
236b46e756fSKirill A. Shutemov 	return single_hugepage_flag_show(kobj, attr, buf,
237b46e756fSKirill A. Shutemov 					 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
238b46e756fSKirill A. Shutemov }
2396dcdc94dSMiaohe Lin static ssize_t defrag_store(struct kobject *kobj,
240b46e756fSKirill A. Shutemov 			    struct kobj_attribute *attr,
241b46e756fSKirill A. Shutemov 			    const char *buf, size_t count)
242b46e756fSKirill A. Shutemov {
243b46e756fSKirill A. Shutemov 	return single_hugepage_flag_store(kobj, attr, buf, count,
244b46e756fSKirill A. Shutemov 				 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
245b46e756fSKirill A. Shutemov }
246b46e756fSKirill A. Shutemov static struct kobj_attribute khugepaged_defrag_attr =
2476dcdc94dSMiaohe Lin 	__ATTR_RW(defrag);
248b46e756fSKirill A. Shutemov 
249b46e756fSKirill A. Shutemov /*
250b46e756fSKirill A. Shutemov  * max_ptes_none controls if khugepaged should collapse hugepages over
251b46e756fSKirill A. Shutemov  * any unmapped ptes in turn potentially increasing the memory
252b46e756fSKirill A. Shutemov  * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
253b46e756fSKirill A. Shutemov  * reduce the available free memory in the system as it
254b46e756fSKirill A. Shutemov  * runs. Increasing max_ptes_none will instead potentially reduce the
255b46e756fSKirill A. Shutemov  * free memory in the system during the khugepaged scan.
256b46e756fSKirill A. Shutemov  */
2576dcdc94dSMiaohe Lin static ssize_t max_ptes_none_show(struct kobject *kobj,
258b46e756fSKirill A. Shutemov 				  struct kobj_attribute *attr,
259b46e756fSKirill A. Shutemov 				  char *buf)
260b46e756fSKirill A. Shutemov {
261ae7a927dSJoe Perches 	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
262b46e756fSKirill A. Shutemov }
2636dcdc94dSMiaohe Lin static ssize_t max_ptes_none_store(struct kobject *kobj,
264b46e756fSKirill A. Shutemov 				   struct kobj_attribute *attr,
265b46e756fSKirill A. Shutemov 				   const char *buf, size_t count)
266b46e756fSKirill A. Shutemov {
267b46e756fSKirill A. Shutemov 	int err;
268b46e756fSKirill A. Shutemov 	unsigned long max_ptes_none;
269b46e756fSKirill A. Shutemov 
270b46e756fSKirill A. Shutemov 	err = kstrtoul(buf, 10, &max_ptes_none);
271b46e756fSKirill A. Shutemov 	if (err || max_ptes_none > HPAGE_PMD_NR - 1)
272b46e756fSKirill A. Shutemov 		return -EINVAL;
273b46e756fSKirill A. Shutemov 
274b46e756fSKirill A. Shutemov 	khugepaged_max_ptes_none = max_ptes_none;
275b46e756fSKirill A. Shutemov 
276b46e756fSKirill A. Shutemov 	return count;
277b46e756fSKirill A. Shutemov }
278b46e756fSKirill A. Shutemov static struct kobj_attribute khugepaged_max_ptes_none_attr =
2796dcdc94dSMiaohe Lin 	__ATTR_RW(max_ptes_none);
280b46e756fSKirill A. Shutemov 
2816dcdc94dSMiaohe Lin static ssize_t max_ptes_swap_show(struct kobject *kobj,
282b46e756fSKirill A. Shutemov 				  struct kobj_attribute *attr,
283b46e756fSKirill A. Shutemov 				  char *buf)
284b46e756fSKirill A. Shutemov {
285ae7a927dSJoe Perches 	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
286b46e756fSKirill A. Shutemov }
287b46e756fSKirill A. Shutemov 
2886dcdc94dSMiaohe Lin static ssize_t max_ptes_swap_store(struct kobject *kobj,
289b46e756fSKirill A. Shutemov 				   struct kobj_attribute *attr,
290b46e756fSKirill A. Shutemov 				   const char *buf, size_t count)
291b46e756fSKirill A. Shutemov {
292b46e756fSKirill A. Shutemov 	int err;
293b46e756fSKirill A. Shutemov 	unsigned long max_ptes_swap;
294b46e756fSKirill A. Shutemov 
295b46e756fSKirill A. Shutemov 	err  = kstrtoul(buf, 10, &max_ptes_swap);
296b46e756fSKirill A. Shutemov 	if (err || max_ptes_swap > HPAGE_PMD_NR - 1)
297b46e756fSKirill A. Shutemov 		return -EINVAL;
298b46e756fSKirill A. Shutemov 
299b46e756fSKirill A. Shutemov 	khugepaged_max_ptes_swap = max_ptes_swap;
300b46e756fSKirill A. Shutemov 
301b46e756fSKirill A. Shutemov 	return count;
302b46e756fSKirill A. Shutemov }
303b46e756fSKirill A. Shutemov 
304b46e756fSKirill A. Shutemov static struct kobj_attribute khugepaged_max_ptes_swap_attr =
3056dcdc94dSMiaohe Lin 	__ATTR_RW(max_ptes_swap);
306b46e756fSKirill A. Shutemov 
3076dcdc94dSMiaohe Lin static ssize_t max_ptes_shared_show(struct kobject *kobj,
30871a2c112SKirill A. Shutemov 				    struct kobj_attribute *attr,
30971a2c112SKirill A. Shutemov 				    char *buf)
31071a2c112SKirill A. Shutemov {
311ae7a927dSJoe Perches 	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
31271a2c112SKirill A. Shutemov }
31371a2c112SKirill A. Shutemov 
3146dcdc94dSMiaohe Lin static ssize_t max_ptes_shared_store(struct kobject *kobj,
31571a2c112SKirill A. Shutemov 				     struct kobj_attribute *attr,
31671a2c112SKirill A. Shutemov 				     const char *buf, size_t count)
31771a2c112SKirill A. Shutemov {
31871a2c112SKirill A. Shutemov 	int err;
31971a2c112SKirill A. Shutemov 	unsigned long max_ptes_shared;
32071a2c112SKirill A. Shutemov 
32171a2c112SKirill A. Shutemov 	err  = kstrtoul(buf, 10, &max_ptes_shared);
32271a2c112SKirill A. Shutemov 	if (err || max_ptes_shared > HPAGE_PMD_NR - 1)
32371a2c112SKirill A. Shutemov 		return -EINVAL;
32471a2c112SKirill A. Shutemov 
32571a2c112SKirill A. Shutemov 	khugepaged_max_ptes_shared = max_ptes_shared;
32671a2c112SKirill A. Shutemov 
32771a2c112SKirill A. Shutemov 	return count;
32871a2c112SKirill A. Shutemov }
32971a2c112SKirill A. Shutemov 
33071a2c112SKirill A. Shutemov static struct kobj_attribute khugepaged_max_ptes_shared_attr =
3316dcdc94dSMiaohe Lin 	__ATTR_RW(max_ptes_shared);
33271a2c112SKirill A. Shutemov 
333b46e756fSKirill A. Shutemov static struct attribute *khugepaged_attr[] = {
334b46e756fSKirill A. Shutemov 	&khugepaged_defrag_attr.attr,
335b46e756fSKirill A. Shutemov 	&khugepaged_max_ptes_none_attr.attr,
33671a2c112SKirill A. Shutemov 	&khugepaged_max_ptes_swap_attr.attr,
33771a2c112SKirill A. Shutemov 	&khugepaged_max_ptes_shared_attr.attr,
338b46e756fSKirill A. Shutemov 	&pages_to_scan_attr.attr,
339b46e756fSKirill A. Shutemov 	&pages_collapsed_attr.attr,
340b46e756fSKirill A. Shutemov 	&full_scans_attr.attr,
341b46e756fSKirill A. Shutemov 	&scan_sleep_millisecs_attr.attr,
342b46e756fSKirill A. Shutemov 	&alloc_sleep_millisecs_attr.attr,
343b46e756fSKirill A. Shutemov 	NULL,
344b46e756fSKirill A. Shutemov };
345b46e756fSKirill A. Shutemov 
346b46e756fSKirill A. Shutemov struct attribute_group khugepaged_attr_group = {
347b46e756fSKirill A. Shutemov 	.attrs = khugepaged_attr,
348b46e756fSKirill A. Shutemov 	.name = "khugepaged",
349b46e756fSKirill A. Shutemov };
350e1465d12SJérémy Lefaure #endif /* CONFIG_SYSFS */
351b46e756fSKirill A. Shutemov 
352b46e756fSKirill A. Shutemov int hugepage_madvise(struct vm_area_struct *vma,
353b46e756fSKirill A. Shutemov 		     unsigned long *vm_flags, int advice)
354b46e756fSKirill A. Shutemov {
355b46e756fSKirill A. Shutemov 	switch (advice) {
356b46e756fSKirill A. Shutemov 	case MADV_HUGEPAGE:
357b46e756fSKirill A. Shutemov #ifdef CONFIG_S390
358b46e756fSKirill A. Shutemov 		/*
359b46e756fSKirill A. Shutemov 		 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
360b46e756fSKirill A. Shutemov 		 * can't handle this properly after s390_enable_sie, so we simply
361b46e756fSKirill A. Shutemov 		 * ignore the madvise to prevent qemu from causing a SIGSEGV.
362b46e756fSKirill A. Shutemov 		 */
363b46e756fSKirill A. Shutemov 		if (mm_has_pgste(vma->vm_mm))
364b46e756fSKirill A. Shutemov 			return 0;
365b46e756fSKirill A. Shutemov #endif
366b46e756fSKirill A. Shutemov 		*vm_flags &= ~VM_NOHUGEPAGE;
367b46e756fSKirill A. Shutemov 		*vm_flags |= VM_HUGEPAGE;
368b46e756fSKirill A. Shutemov 		/*
369b46e756fSKirill A. Shutemov 		 * If the vma become good for khugepaged to scan,
370b46e756fSKirill A. Shutemov 		 * register it here without waiting a page fault that
371b46e756fSKirill A. Shutemov 		 * may not happen any time soon.
372b46e756fSKirill A. Shutemov 		 */
373c791576cSYang Shi 		khugepaged_enter_vma(vma, *vm_flags);
374b46e756fSKirill A. Shutemov 		break;
375b46e756fSKirill A. Shutemov 	case MADV_NOHUGEPAGE:
376b46e756fSKirill A. Shutemov 		*vm_flags &= ~VM_HUGEPAGE;
377b46e756fSKirill A. Shutemov 		*vm_flags |= VM_NOHUGEPAGE;
378b46e756fSKirill A. Shutemov 		/*
379b46e756fSKirill A. Shutemov 		 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
380b46e756fSKirill A. Shutemov 		 * this vma even if we leave the mm registered in khugepaged if
381b46e756fSKirill A. Shutemov 		 * it got registered before VM_NOHUGEPAGE was set.
382b46e756fSKirill A. Shutemov 		 */
383b46e756fSKirill A. Shutemov 		break;
384b46e756fSKirill A. Shutemov 	}
385b46e756fSKirill A. Shutemov 
386b46e756fSKirill A. Shutemov 	return 0;
387b46e756fSKirill A. Shutemov }
388b46e756fSKirill A. Shutemov 
389b46e756fSKirill A. Shutemov int __init khugepaged_init(void)
390b46e756fSKirill A. Shutemov {
391b46e756fSKirill A. Shutemov 	mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
392b26e2701SQi Zheng 					  sizeof(struct khugepaged_mm_slot),
393b26e2701SQi Zheng 					  __alignof__(struct khugepaged_mm_slot),
394b26e2701SQi Zheng 					  0, NULL);
395b46e756fSKirill A. Shutemov 	if (!mm_slot_cache)
396b46e756fSKirill A. Shutemov 		return -ENOMEM;
397b46e756fSKirill A. Shutemov 
398b46e756fSKirill A. Shutemov 	khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
399b46e756fSKirill A. Shutemov 	khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
400b46e756fSKirill A. Shutemov 	khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
40171a2c112SKirill A. Shutemov 	khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
402b46e756fSKirill A. Shutemov 
403b46e756fSKirill A. Shutemov 	return 0;
404b46e756fSKirill A. Shutemov }
405b46e756fSKirill A. Shutemov 
406b46e756fSKirill A. Shutemov void __init khugepaged_destroy(void)
407b46e756fSKirill A. Shutemov {
408b46e756fSKirill A. Shutemov 	kmem_cache_destroy(mm_slot_cache);
409b46e756fSKirill A. Shutemov }
410b46e756fSKirill A. Shutemov 
4117d2c4385SZach O'Keefe static inline int hpage_collapse_test_exit(struct mm_struct *mm)
412b46e756fSKirill A. Shutemov {
4134d45e75aSJann Horn 	return atomic_read(&mm->mm_users) == 0;
414b46e756fSKirill A. Shutemov }
415b46e756fSKirill A. Shutemov 
416d2081b2bSYang Shi void __khugepaged_enter(struct mm_struct *mm)
417b46e756fSKirill A. Shutemov {
418b26e2701SQi Zheng 	struct khugepaged_mm_slot *mm_slot;
419b26e2701SQi Zheng 	struct mm_slot *slot;
420b46e756fSKirill A. Shutemov 	int wakeup;
421b46e756fSKirill A. Shutemov 
422b26e2701SQi Zheng 	mm_slot = mm_slot_alloc(mm_slot_cache);
423b46e756fSKirill A. Shutemov 	if (!mm_slot)
424d2081b2bSYang Shi 		return;
425b46e756fSKirill A. Shutemov 
426b26e2701SQi Zheng 	slot = &mm_slot->slot;
427b26e2701SQi Zheng 
428b46e756fSKirill A. Shutemov 	/* __khugepaged_exit() must not run from under us */
4297d2c4385SZach O'Keefe 	VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm);
430b46e756fSKirill A. Shutemov 	if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
431b26e2701SQi Zheng 		mm_slot_free(mm_slot_cache, mm_slot);
432d2081b2bSYang Shi 		return;
433b46e756fSKirill A. Shutemov 	}
434b46e756fSKirill A. Shutemov 
435b46e756fSKirill A. Shutemov 	spin_lock(&khugepaged_mm_lock);
436b26e2701SQi Zheng 	mm_slot_insert(mm_slots_hash, mm, slot);
437b46e756fSKirill A. Shutemov 	/*
438b46e756fSKirill A. Shutemov 	 * Insert just behind the scanning cursor, to let the area settle
439b46e756fSKirill A. Shutemov 	 * down a little.
440b46e756fSKirill A. Shutemov 	 */
441b46e756fSKirill A. Shutemov 	wakeup = list_empty(&khugepaged_scan.mm_head);
442b26e2701SQi Zheng 	list_add_tail(&slot->mm_node, &khugepaged_scan.mm_head);
443b46e756fSKirill A. Shutemov 	spin_unlock(&khugepaged_mm_lock);
444b46e756fSKirill A. Shutemov 
445f1f10076SVegard Nossum 	mmgrab(mm);
446b46e756fSKirill A. Shutemov 	if (wakeup)
447b46e756fSKirill A. Shutemov 		wake_up_interruptible(&khugepaged_wait);
448b46e756fSKirill A. Shutemov }
449b46e756fSKirill A. Shutemov 
450c791576cSYang Shi void khugepaged_enter_vma(struct vm_area_struct *vma,
451b46e756fSKirill A. Shutemov 			  unsigned long vm_flags)
452b46e756fSKirill A. Shutemov {
4532647d11bSYang Shi 	if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
4541064026bSYang Shi 	    hugepage_flags_enabled()) {
455a7f4e6e4SZach O'Keefe 		if (hugepage_vma_check(vma, vm_flags, false, false, true))
4562647d11bSYang Shi 			__khugepaged_enter(vma->vm_mm);
4572647d11bSYang Shi 	}
458b46e756fSKirill A. Shutemov }
459b46e756fSKirill A. Shutemov 
460b46e756fSKirill A. Shutemov void __khugepaged_exit(struct mm_struct *mm)
461b46e756fSKirill A. Shutemov {
462b26e2701SQi Zheng 	struct khugepaged_mm_slot *mm_slot;
463b26e2701SQi Zheng 	struct mm_slot *slot;
464b46e756fSKirill A. Shutemov 	int free = 0;
465b46e756fSKirill A. Shutemov 
466b46e756fSKirill A. Shutemov 	spin_lock(&khugepaged_mm_lock);
467b26e2701SQi Zheng 	slot = mm_slot_lookup(mm_slots_hash, mm);
468b26e2701SQi Zheng 	mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
469b46e756fSKirill A. Shutemov 	if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
470b26e2701SQi Zheng 		hash_del(&slot->hash);
471b26e2701SQi Zheng 		list_del(&slot->mm_node);
472b46e756fSKirill A. Shutemov 		free = 1;
473b46e756fSKirill A. Shutemov 	}
474b46e756fSKirill A. Shutemov 	spin_unlock(&khugepaged_mm_lock);
475b46e756fSKirill A. Shutemov 
476b46e756fSKirill A. Shutemov 	if (free) {
477b46e756fSKirill A. Shutemov 		clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
478b26e2701SQi Zheng 		mm_slot_free(mm_slot_cache, mm_slot);
479b46e756fSKirill A. Shutemov 		mmdrop(mm);
480b46e756fSKirill A. Shutemov 	} else if (mm_slot) {
481b46e756fSKirill A. Shutemov 		/*
482b46e756fSKirill A. Shutemov 		 * This is required to serialize against
4837d2c4385SZach O'Keefe 		 * hpage_collapse_test_exit() (which is guaranteed to run
4847d2c4385SZach O'Keefe 		 * under mmap sem read mode). Stop here (after we return all
4857d2c4385SZach O'Keefe 		 * pagetables will be destroyed) until khugepaged has finished
4867d2c4385SZach O'Keefe 		 * working on the pagetables under the mmap_lock.
487b46e756fSKirill A. Shutemov 		 */
488d8ed45c5SMichel Lespinasse 		mmap_write_lock(mm);
489d8ed45c5SMichel Lespinasse 		mmap_write_unlock(mm);
490b46e756fSKirill A. Shutemov 	}
491b46e756fSKirill A. Shutemov }
492b46e756fSKirill A. Shutemov 
49392644f58SVishal Moola (Oracle) static void release_pte_folio(struct folio *folio)
49492644f58SVishal Moola (Oracle) {
49592644f58SVishal Moola (Oracle) 	node_stat_mod_folio(folio,
49692644f58SVishal Moola (Oracle) 			NR_ISOLATED_ANON + folio_is_file_lru(folio),
49792644f58SVishal Moola (Oracle) 			-folio_nr_pages(folio));
49892644f58SVishal Moola (Oracle) 	folio_unlock(folio);
49992644f58SVishal Moola (Oracle) 	folio_putback_lru(folio);
50092644f58SVishal Moola (Oracle) }
50192644f58SVishal Moola (Oracle) 
502b46e756fSKirill A. Shutemov static void release_pte_page(struct page *page)
503b46e756fSKirill A. Shutemov {
50492644f58SVishal Moola (Oracle) 	release_pte_folio(page_folio(page));
505b46e756fSKirill A. Shutemov }
506b46e756fSKirill A. Shutemov 
5075503fbf2SKirill A. Shutemov static void release_pte_pages(pte_t *pte, pte_t *_pte,
5085503fbf2SKirill A. Shutemov 		struct list_head *compound_pagelist)
509b46e756fSKirill A. Shutemov {
5109bdfeea4SVishal Moola (Oracle) 	struct folio *folio, *tmp;
5115503fbf2SKirill A. Shutemov 
512b46e756fSKirill A. Shutemov 	while (--_pte >= pte) {
513b46e756fSKirill A. Shutemov 		pte_t pteval = *_pte;
514*f528260bSVishal Moola (Oracle) 		unsigned long pfn;
5155503fbf2SKirill A. Shutemov 
516*f528260bSVishal Moola (Oracle) 		if (pte_none(pteval))
517*f528260bSVishal Moola (Oracle) 			continue;
518*f528260bSVishal Moola (Oracle) 		pfn = pte_pfn(pteval);
519*f528260bSVishal Moola (Oracle) 		if (is_zero_pfn(pfn))
520*f528260bSVishal Moola (Oracle) 			continue;
521*f528260bSVishal Moola (Oracle) 		folio = pfn_folio(pfn);
522*f528260bSVishal Moola (Oracle) 		if (folio_test_large(folio))
523*f528260bSVishal Moola (Oracle) 			continue;
5249bdfeea4SVishal Moola (Oracle) 		release_pte_folio(folio);
5255503fbf2SKirill A. Shutemov 	}
5265503fbf2SKirill A. Shutemov 
5279bdfeea4SVishal Moola (Oracle) 	list_for_each_entry_safe(folio, tmp, compound_pagelist, lru) {
5289bdfeea4SVishal Moola (Oracle) 		list_del(&folio->lru);
5299bdfeea4SVishal Moola (Oracle) 		release_pte_folio(folio);
530b46e756fSKirill A. Shutemov 	}
531b46e756fSKirill A. Shutemov }
532b46e756fSKirill A. Shutemov 
5339445689fSKirill A. Shutemov static bool is_refcount_suitable(struct page *page)
5349445689fSKirill A. Shutemov {
5359445689fSKirill A. Shutemov 	int expected_refcount;
5369445689fSKirill A. Shutemov 
5379445689fSKirill A. Shutemov 	expected_refcount = total_mapcount(page);
5389445689fSKirill A. Shutemov 	if (PageSwapCache(page))
5399445689fSKirill A. Shutemov 		expected_refcount += compound_nr(page);
5409445689fSKirill A. Shutemov 
5419445689fSKirill A. Shutemov 	return page_count(page) == expected_refcount;
5429445689fSKirill A. Shutemov }
5439445689fSKirill A. Shutemov 
544b46e756fSKirill A. Shutemov static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
545b46e756fSKirill A. Shutemov 					unsigned long address,
5465503fbf2SKirill A. Shutemov 					pte_t *pte,
547d8ea7cc8SZach O'Keefe 					struct collapse_control *cc,
5485503fbf2SKirill A. Shutemov 					struct list_head *compound_pagelist)
549b46e756fSKirill A. Shutemov {
550b46e756fSKirill A. Shutemov 	struct page *page = NULL;
551b46e756fSKirill A. Shutemov 	pte_t *_pte;
55250ad2f24SZach O'Keefe 	int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
5530db501f7SEbru Akagunduz 	bool writable = false;
554b46e756fSKirill A. Shutemov 
555b46e756fSKirill A. Shutemov 	for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
556b46e756fSKirill A. Shutemov 	     _pte++, address += PAGE_SIZE) {
557b46e756fSKirill A. Shutemov 		pte_t pteval = *_pte;
558b46e756fSKirill A. Shutemov 		if (pte_none(pteval) || (pte_present(pteval) &&
559b46e756fSKirill A. Shutemov 				is_zero_pfn(pte_pfn(pteval)))) {
560d8ea7cc8SZach O'Keefe 			++none_or_zero;
561b46e756fSKirill A. Shutemov 			if (!userfaultfd_armed(vma) &&
562d8ea7cc8SZach O'Keefe 			    (!cc->is_khugepaged ||
563d8ea7cc8SZach O'Keefe 			     none_or_zero <= khugepaged_max_ptes_none)) {
564b46e756fSKirill A. Shutemov 				continue;
565b46e756fSKirill A. Shutemov 			} else {
566b46e756fSKirill A. Shutemov 				result = SCAN_EXCEED_NONE_PTE;
567e9ea874aSYang Yang 				count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
568b46e756fSKirill A. Shutemov 				goto out;
569b46e756fSKirill A. Shutemov 			}
570b46e756fSKirill A. Shutemov 		}
571b46e756fSKirill A. Shutemov 		if (!pte_present(pteval)) {
572b46e756fSKirill A. Shutemov 			result = SCAN_PTE_NON_PRESENT;
573b46e756fSKirill A. Shutemov 			goto out;
574b46e756fSKirill A. Shutemov 		}
575b46e756fSKirill A. Shutemov 		page = vm_normal_page(vma, address, pteval);
5763218f871SAlex Sierra 		if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
577b46e756fSKirill A. Shutemov 			result = SCAN_PAGE_NULL;
578b46e756fSKirill A. Shutemov 			goto out;
579b46e756fSKirill A. Shutemov 		}
580b46e756fSKirill A. Shutemov 
581b46e756fSKirill A. Shutemov 		VM_BUG_ON_PAGE(!PageAnon(page), page);
582b46e756fSKirill A. Shutemov 
583d8ea7cc8SZach O'Keefe 		if (page_mapcount(page) > 1) {
584d8ea7cc8SZach O'Keefe 			++shared;
585d8ea7cc8SZach O'Keefe 			if (cc->is_khugepaged &&
586d8ea7cc8SZach O'Keefe 			    shared > khugepaged_max_ptes_shared) {
58771a2c112SKirill A. Shutemov 				result = SCAN_EXCEED_SHARED_PTE;
588e9ea874aSYang Yang 				count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
58971a2c112SKirill A. Shutemov 				goto out;
59071a2c112SKirill A. Shutemov 			}
591d8ea7cc8SZach O'Keefe 		}
59271a2c112SKirill A. Shutemov 
5935503fbf2SKirill A. Shutemov 		if (PageCompound(page)) {
5945503fbf2SKirill A. Shutemov 			struct page *p;
5955503fbf2SKirill A. Shutemov 			page = compound_head(page);
5965503fbf2SKirill A. Shutemov 
5975503fbf2SKirill A. Shutemov 			/*
5985503fbf2SKirill A. Shutemov 			 * Check if we have dealt with the compound page
5995503fbf2SKirill A. Shutemov 			 * already
6005503fbf2SKirill A. Shutemov 			 */
6015503fbf2SKirill A. Shutemov 			list_for_each_entry(p, compound_pagelist, lru) {
6025503fbf2SKirill A. Shutemov 				if (page == p)
6035503fbf2SKirill A. Shutemov 					goto next;
6045503fbf2SKirill A. Shutemov 			}
6055503fbf2SKirill A. Shutemov 		}
6065503fbf2SKirill A. Shutemov 
607b46e756fSKirill A. Shutemov 		/*
608b46e756fSKirill A. Shutemov 		 * We can do it before isolate_lru_page because the
609b46e756fSKirill A. Shutemov 		 * page can't be freed from under us. NOTE: PG_lock
610b46e756fSKirill A. Shutemov 		 * is needed to serialize against split_huge_page
611b46e756fSKirill A. Shutemov 		 * when invoked from the VM.
612b46e756fSKirill A. Shutemov 		 */
613b46e756fSKirill A. Shutemov 		if (!trylock_page(page)) {
614b46e756fSKirill A. Shutemov 			result = SCAN_PAGE_LOCK;
615b46e756fSKirill A. Shutemov 			goto out;
616b46e756fSKirill A. Shutemov 		}
617b46e756fSKirill A. Shutemov 
618b46e756fSKirill A. Shutemov 		/*
6199445689fSKirill A. Shutemov 		 * Check if the page has any GUP (or other external) pins.
6209445689fSKirill A. Shutemov 		 *
6219445689fSKirill A. Shutemov 		 * The page table that maps the page has been already unlinked
6229445689fSKirill A. Shutemov 		 * from the page table tree and this process cannot get
623f0953a1bSIngo Molnar 		 * an additional pin on the page.
6249445689fSKirill A. Shutemov 		 *
6259445689fSKirill A. Shutemov 		 * New pins can come later if the page is shared across fork,
6269445689fSKirill A. Shutemov 		 * but not from this process. The other process cannot write to
6279445689fSKirill A. Shutemov 		 * the page, only trigger CoW.
628b46e756fSKirill A. Shutemov 		 */
6299445689fSKirill A. Shutemov 		if (!is_refcount_suitable(page)) {
630b46e756fSKirill A. Shutemov 			unlock_page(page);
631b46e756fSKirill A. Shutemov 			result = SCAN_PAGE_COUNT;
632b46e756fSKirill A. Shutemov 			goto out;
633b46e756fSKirill A. Shutemov 		}
634b46e756fSKirill A. Shutemov 
635b46e756fSKirill A. Shutemov 		/*
636b46e756fSKirill A. Shutemov 		 * Isolate the page to avoid collapsing an hugepage
637b46e756fSKirill A. Shutemov 		 * currently in use by the VM.
638b46e756fSKirill A. Shutemov 		 */
639b46e756fSKirill A. Shutemov 		if (isolate_lru_page(page)) {
640b46e756fSKirill A. Shutemov 			unlock_page(page);
641b46e756fSKirill A. Shutemov 			result = SCAN_DEL_PAGE_LRU;
642b46e756fSKirill A. Shutemov 			goto out;
643b46e756fSKirill A. Shutemov 		}
6445503fbf2SKirill A. Shutemov 		mod_node_page_state(page_pgdat(page),
6455503fbf2SKirill A. Shutemov 				NR_ISOLATED_ANON + page_is_file_lru(page),
6465503fbf2SKirill A. Shutemov 				compound_nr(page));
647b46e756fSKirill A. Shutemov 		VM_BUG_ON_PAGE(!PageLocked(page), page);
648b46e756fSKirill A. Shutemov 		VM_BUG_ON_PAGE(PageLRU(page), page);
649b46e756fSKirill A. Shutemov 
6505503fbf2SKirill A. Shutemov 		if (PageCompound(page))
6515503fbf2SKirill A. Shutemov 			list_add_tail(&page->lru, compound_pagelist);
6525503fbf2SKirill A. Shutemov next:
653d8ea7cc8SZach O'Keefe 		/*
654d8ea7cc8SZach O'Keefe 		 * If collapse was initiated by khugepaged, check that there is
655d8ea7cc8SZach O'Keefe 		 * enough young pte to justify collapsing the page
656d8ea7cc8SZach O'Keefe 		 */
657d8ea7cc8SZach O'Keefe 		if (cc->is_khugepaged &&
658d8ea7cc8SZach O'Keefe 		    (pte_young(pteval) || page_is_young(page) ||
659d8ea7cc8SZach O'Keefe 		     PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
660d8ea7cc8SZach O'Keefe 								     address)))
6610db501f7SEbru Akagunduz 			referenced++;
6625503fbf2SKirill A. Shutemov 
6635503fbf2SKirill A. Shutemov 		if (pte_write(pteval))
6645503fbf2SKirill A. Shutemov 			writable = true;
665b46e756fSKirill A. Shutemov 	}
66674e579bfSMiaohe Lin 
66774e579bfSMiaohe Lin 	if (unlikely(!writable)) {
66874e579bfSMiaohe Lin 		result = SCAN_PAGE_RO;
669d8ea7cc8SZach O'Keefe 	} else if (unlikely(cc->is_khugepaged && !referenced)) {
67074e579bfSMiaohe Lin 		result = SCAN_LACK_REFERENCED_PAGE;
67174e579bfSMiaohe Lin 	} else {
672b46e756fSKirill A. Shutemov 		result = SCAN_SUCCEED;
673b46e756fSKirill A. Shutemov 		trace_mm_collapse_huge_page_isolate(page, none_or_zero,
674b46e756fSKirill A. Shutemov 						    referenced, writable, result);
67550ad2f24SZach O'Keefe 		return result;
676b46e756fSKirill A. Shutemov 	}
677b46e756fSKirill A. Shutemov out:
6785503fbf2SKirill A. Shutemov 	release_pte_pages(pte, _pte, compound_pagelist);
679b46e756fSKirill A. Shutemov 	trace_mm_collapse_huge_page_isolate(page, none_or_zero,
680b46e756fSKirill A. Shutemov 					    referenced, writable, result);
68150ad2f24SZach O'Keefe 	return result;
682b46e756fSKirill A. Shutemov }
683b46e756fSKirill A. Shutemov 
684b46e756fSKirill A. Shutemov static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
685b46e756fSKirill A. Shutemov 				      struct vm_area_struct *vma,
686b46e756fSKirill A. Shutemov 				      unsigned long address,
6875503fbf2SKirill A. Shutemov 				      spinlock_t *ptl,
6885503fbf2SKirill A. Shutemov 				      struct list_head *compound_pagelist)
689b46e756fSKirill A. Shutemov {
6905503fbf2SKirill A. Shutemov 	struct page *src_page, *tmp;
691b46e756fSKirill A. Shutemov 	pte_t *_pte;
692338a16baSDavid Rientjes 	for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
693338a16baSDavid Rientjes 				_pte++, page++, address += PAGE_SIZE) {
694b46e756fSKirill A. Shutemov 		pte_t pteval = *_pte;
695b46e756fSKirill A. Shutemov 
696b46e756fSKirill A. Shutemov 		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
697b46e756fSKirill A. Shutemov 			clear_user_highpage(page, address);
698b46e756fSKirill A. Shutemov 			add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
699b46e756fSKirill A. Shutemov 			if (is_zero_pfn(pte_pfn(pteval))) {
700b46e756fSKirill A. Shutemov 				/*
701b46e756fSKirill A. Shutemov 				 * ptl mostly unnecessary.
702b46e756fSKirill A. Shutemov 				 */
703b46e756fSKirill A. Shutemov 				spin_lock(ptl);
70408d5b29eSPasha Tatashin 				ptep_clear(vma->vm_mm, address, _pte);
705b46e756fSKirill A. Shutemov 				spin_unlock(ptl);
706b46e756fSKirill A. Shutemov 			}
707b46e756fSKirill A. Shutemov 		} else {
708b46e756fSKirill A. Shutemov 			src_page = pte_page(pteval);
709b46e756fSKirill A. Shutemov 			copy_user_highpage(page, src_page, address, vma);
7105503fbf2SKirill A. Shutemov 			if (!PageCompound(src_page))
711b46e756fSKirill A. Shutemov 				release_pte_page(src_page);
712b46e756fSKirill A. Shutemov 			/*
713b46e756fSKirill A. Shutemov 			 * ptl mostly unnecessary, but preempt has to
714b46e756fSKirill A. Shutemov 			 * be disabled to update the per-cpu stats
715b46e756fSKirill A. Shutemov 			 * inside page_remove_rmap().
716b46e756fSKirill A. Shutemov 			 */
717b46e756fSKirill A. Shutemov 			spin_lock(ptl);
71808d5b29eSPasha Tatashin 			ptep_clear(vma->vm_mm, address, _pte);
719cea86fe2SHugh Dickins 			page_remove_rmap(src_page, vma, false);
720b46e756fSKirill A. Shutemov 			spin_unlock(ptl);
721b46e756fSKirill A. Shutemov 			free_page_and_swap_cache(src_page);
722b46e756fSKirill A. Shutemov 		}
723b46e756fSKirill A. Shutemov 	}
7245503fbf2SKirill A. Shutemov 
7255503fbf2SKirill A. Shutemov 	list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
7265503fbf2SKirill A. Shutemov 		list_del(&src_page->lru);
7271baec203SMiaohe Lin 		mod_node_page_state(page_pgdat(src_page),
7281baec203SMiaohe Lin 				    NR_ISOLATED_ANON + page_is_file_lru(src_page),
7291baec203SMiaohe Lin 				    -compound_nr(src_page));
7301baec203SMiaohe Lin 		unlock_page(src_page);
7311baec203SMiaohe Lin 		free_swap_cache(src_page);
7321baec203SMiaohe Lin 		putback_lru_page(src_page);
7335503fbf2SKirill A. Shutemov 	}
734b46e756fSKirill A. Shutemov }
735b46e756fSKirill A. Shutemov 
736b46e756fSKirill A. Shutemov static void khugepaged_alloc_sleep(void)
737b46e756fSKirill A. Shutemov {
738b46e756fSKirill A. Shutemov 	DEFINE_WAIT(wait);
739b46e756fSKirill A. Shutemov 
740b46e756fSKirill A. Shutemov 	add_wait_queue(&khugepaged_wait, &wait);
741f5d39b02SPeter Zijlstra 	__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
742f5d39b02SPeter Zijlstra 	schedule_timeout(msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
743b46e756fSKirill A. Shutemov 	remove_wait_queue(&khugepaged_wait, &wait);
744b46e756fSKirill A. Shutemov }
745b46e756fSKirill A. Shutemov 
74634d6b470SZach O'Keefe struct collapse_control khugepaged_collapse_control = {
747d8ea7cc8SZach O'Keefe 	.is_khugepaged = true,
74834d6b470SZach O'Keefe };
749b46e756fSKirill A. Shutemov 
7507d2c4385SZach O'Keefe static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
751b46e756fSKirill A. Shutemov {
752b46e756fSKirill A. Shutemov 	int i;
753b46e756fSKirill A. Shutemov 
754b46e756fSKirill A. Shutemov 	/*
755a5f5f91dSMel Gorman 	 * If node_reclaim_mode is disabled, then no extra effort is made to
756b46e756fSKirill A. Shutemov 	 * allocate memory locally.
757b46e756fSKirill A. Shutemov 	 */
758202e35dbSDave Hansen 	if (!node_reclaim_enabled())
759b46e756fSKirill A. Shutemov 		return false;
760b46e756fSKirill A. Shutemov 
761b46e756fSKirill A. Shutemov 	/* If there is a count for this node already, it must be acceptable */
76234d6b470SZach O'Keefe 	if (cc->node_load[nid])
763b46e756fSKirill A. Shutemov 		return false;
764b46e756fSKirill A. Shutemov 
765b46e756fSKirill A. Shutemov 	for (i = 0; i < MAX_NUMNODES; i++) {
76634d6b470SZach O'Keefe 		if (!cc->node_load[i])
767b46e756fSKirill A. Shutemov 			continue;
768a55c7454SMatt Fleming 		if (node_distance(nid, i) > node_reclaim_distance)
769b46e756fSKirill A. Shutemov 			return true;
770b46e756fSKirill A. Shutemov 	}
771b46e756fSKirill A. Shutemov 	return false;
772b46e756fSKirill A. Shutemov }
773b46e756fSKirill A. Shutemov 
7741064026bSYang Shi #define khugepaged_defrag()					\
7751064026bSYang Shi 	(transparent_hugepage_flags &				\
7761064026bSYang Shi 	 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
7771064026bSYang Shi 
778b46e756fSKirill A. Shutemov /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
779b46e756fSKirill A. Shutemov static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
780b46e756fSKirill A. Shutemov {
78125160354SVlastimil Babka 	return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
782b46e756fSKirill A. Shutemov }
783b46e756fSKirill A. Shutemov 
784b46e756fSKirill A. Shutemov #ifdef CONFIG_NUMA
7857d2c4385SZach O'Keefe static int hpage_collapse_find_target_node(struct collapse_control *cc)
786b46e756fSKirill A. Shutemov {
787b46e756fSKirill A. Shutemov 	int nid, target_node = 0, max_value = 0;
788b46e756fSKirill A. Shutemov 
789b46e756fSKirill A. Shutemov 	/* find first node with max normal pages hit */
790b46e756fSKirill A. Shutemov 	for (nid = 0; nid < MAX_NUMNODES; nid++)
79134d6b470SZach O'Keefe 		if (cc->node_load[nid] > max_value) {
79234d6b470SZach O'Keefe 			max_value = cc->node_load[nid];
793b46e756fSKirill A. Shutemov 			target_node = nid;
794b46e756fSKirill A. Shutemov 		}
795b46e756fSKirill A. Shutemov 
796e031ff96SYang Shi 	for_each_online_node(nid) {
797e031ff96SYang Shi 		if (max_value == cc->node_load[nid])
798e031ff96SYang Shi 			node_set(nid, cc->alloc_nmask);
799b46e756fSKirill A. Shutemov 	}
800b46e756fSKirill A. Shutemov 
801b46e756fSKirill A. Shutemov 	return target_node;
802b46e756fSKirill A. Shutemov }
803c6a7f445SYang Shi #else
8047d2c4385SZach O'Keefe static int hpage_collapse_find_target_node(struct collapse_control *cc)
805b46e756fSKirill A. Shutemov {
806c6a7f445SYang Shi 	return 0;
807b46e756fSKirill A. Shutemov }
808c6a7f445SYang Shi #endif
809b46e756fSKirill A. Shutemov 
810e031ff96SYang Shi static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
811e031ff96SYang Shi 				      nodemask_t *nmask)
812b46e756fSKirill A. Shutemov {
813e031ff96SYang Shi 	*hpage = __alloc_pages(gfp, HPAGE_PMD_ORDER, node, nmask);
814b46e756fSKirill A. Shutemov 	if (unlikely(!*hpage)) {
815b46e756fSKirill A. Shutemov 		count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
8169710a78aSZach O'Keefe 		return false;
817b46e756fSKirill A. Shutemov 	}
818b46e756fSKirill A. Shutemov 
819b46e756fSKirill A. Shutemov 	prep_transhuge_page(*hpage);
820b46e756fSKirill A. Shutemov 	count_vm_event(THP_COLLAPSE_ALLOC);
821b46e756fSKirill A. Shutemov 	return true;
822b46e756fSKirill A. Shutemov }
823b46e756fSKirill A. Shutemov 
824b46e756fSKirill A. Shutemov /*
825c1e8d7c6SMichel Lespinasse  * If mmap_lock temporarily dropped, revalidate vma
826c1e8d7c6SMichel Lespinasse  * before taking mmap_lock.
82750ad2f24SZach O'Keefe  * Returns enum scan_result value.
828b46e756fSKirill A. Shutemov  */
829b46e756fSKirill A. Shutemov 
830c131f751SKirill A. Shutemov static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
83134488399SZach O'Keefe 				   bool expect_anon,
832a7f4e6e4SZach O'Keefe 				   struct vm_area_struct **vmap,
833a7f4e6e4SZach O'Keefe 				   struct collapse_control *cc)
834b46e756fSKirill A. Shutemov {
835b46e756fSKirill A. Shutemov 	struct vm_area_struct *vma;
836b46e756fSKirill A. Shutemov 
8377d2c4385SZach O'Keefe 	if (unlikely(hpage_collapse_test_exit(mm)))
838b46e756fSKirill A. Shutemov 		return SCAN_ANY_PROCESS;
839b46e756fSKirill A. Shutemov 
840c131f751SKirill A. Shutemov 	*vmap = vma = find_vma(mm, address);
841b46e756fSKirill A. Shutemov 	if (!vma)
842b46e756fSKirill A. Shutemov 		return SCAN_VMA_NULL;
843b46e756fSKirill A. Shutemov 
8444fa6893fSYang Shi 	if (!transhuge_vma_suitable(vma, address))
845b46e756fSKirill A. Shutemov 		return SCAN_ADDRESS_RANGE;
846a7f4e6e4SZach O'Keefe 	if (!hugepage_vma_check(vma, vma->vm_flags, false, false,
847a7f4e6e4SZach O'Keefe 				cc->is_khugepaged))
848b46e756fSKirill A. Shutemov 		return SCAN_VMA_CHECK;
849f707fa49SYang Shi 	/*
850f707fa49SYang Shi 	 * Anon VMA expected, the address may be unmapped then
851f707fa49SYang Shi 	 * remapped to file after khugepaged reaquired the mmap_lock.
852f707fa49SYang Shi 	 *
853f707fa49SYang Shi 	 * hugepage_vma_check may return true for qualified file
854f707fa49SYang Shi 	 * vmas.
855f707fa49SYang Shi 	 */
85634488399SZach O'Keefe 	if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
85734488399SZach O'Keefe 		return SCAN_PAGE_ANON;
85850ad2f24SZach O'Keefe 	return SCAN_SUCCEED;
859b46e756fSKirill A. Shutemov }
860b46e756fSKirill A. Shutemov 
861edb5d0cfSZach O'Keefe /*
862edb5d0cfSZach O'Keefe  * See pmd_trans_unstable() for how the result may change out from
863edb5d0cfSZach O'Keefe  * underneath us, even if we hold mmap_lock in read.
864edb5d0cfSZach O'Keefe  */
86550722804SZach O'Keefe static int find_pmd_or_thp_or_none(struct mm_struct *mm,
86650722804SZach O'Keefe 				   unsigned long address,
86750722804SZach O'Keefe 				   pmd_t **pmd)
86850722804SZach O'Keefe {
86950722804SZach O'Keefe 	pmd_t pmde;
87050722804SZach O'Keefe 
87150722804SZach O'Keefe 	*pmd = mm_find_pmd(mm, address);
87250722804SZach O'Keefe 	if (!*pmd)
87350722804SZach O'Keefe 		return SCAN_PMD_NULL;
87450722804SZach O'Keefe 
875dab6e717SPeter Zijlstra 	pmde = pmdp_get_lockless(*pmd);
87650722804SZach O'Keefe 
87750722804SZach O'Keefe #ifdef CONFIG_TRANSPARENT_HUGEPAGE
87850722804SZach O'Keefe 	/* See comments in pmd_none_or_trans_huge_or_clear_bad() */
87950722804SZach O'Keefe 	barrier();
88050722804SZach O'Keefe #endif
88134488399SZach O'Keefe 	if (pmd_none(pmde))
88234488399SZach O'Keefe 		return SCAN_PMD_NONE;
883edb5d0cfSZach O'Keefe 	if (!pmd_present(pmde))
884edb5d0cfSZach O'Keefe 		return SCAN_PMD_NULL;
88550722804SZach O'Keefe 	if (pmd_trans_huge(pmde))
88650722804SZach O'Keefe 		return SCAN_PMD_MAPPED;
887edb5d0cfSZach O'Keefe 	if (pmd_devmap(pmde))
888edb5d0cfSZach O'Keefe 		return SCAN_PMD_NULL;
88950722804SZach O'Keefe 	if (pmd_bad(pmde))
89050722804SZach O'Keefe 		return SCAN_PMD_NULL;
89150722804SZach O'Keefe 	return SCAN_SUCCEED;
89250722804SZach O'Keefe }
89350722804SZach O'Keefe 
89450722804SZach O'Keefe static int check_pmd_still_valid(struct mm_struct *mm,
89550722804SZach O'Keefe 				 unsigned long address,
89650722804SZach O'Keefe 				 pmd_t *pmd)
89750722804SZach O'Keefe {
89850722804SZach O'Keefe 	pmd_t *new_pmd;
89950722804SZach O'Keefe 	int result = find_pmd_or_thp_or_none(mm, address, &new_pmd);
90050722804SZach O'Keefe 
90150722804SZach O'Keefe 	if (result != SCAN_SUCCEED)
90250722804SZach O'Keefe 		return result;
90350722804SZach O'Keefe 	if (new_pmd != pmd)
90450722804SZach O'Keefe 		return SCAN_FAIL;
90550722804SZach O'Keefe 	return SCAN_SUCCEED;
906b46e756fSKirill A. Shutemov }
907b46e756fSKirill A. Shutemov 
908b46e756fSKirill A. Shutemov /*
909b46e756fSKirill A. Shutemov  * Bring missing pages in from swap, to complete THP collapse.
9107d2c4385SZach O'Keefe  * Only done if hpage_collapse_scan_pmd believes it is worthwhile.
911b46e756fSKirill A. Shutemov  *
9124d928e20SMiaohe Lin  * Called and returns without pte mapped or spinlocks held.
9134d928e20SMiaohe Lin  * Note that if false is returned, mmap_lock will be released.
914b46e756fSKirill A. Shutemov  */
915b46e756fSKirill A. Shutemov 
91650ad2f24SZach O'Keefe static int __collapse_huge_page_swapin(struct mm_struct *mm,
917b46e756fSKirill A. Shutemov 				       struct vm_area_struct *vma,
9182b635dd3SWill Deacon 				       unsigned long haddr, pmd_t *pmd,
9190db501f7SEbru Akagunduz 				       int referenced)
920b46e756fSKirill A. Shutemov {
9212b740303SSouptick Joarder 	int swapped_in = 0;
9222b740303SSouptick Joarder 	vm_fault_t ret = 0;
9232b635dd3SWill Deacon 	unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
9242b635dd3SWill Deacon 
9252b635dd3SWill Deacon 	for (address = haddr; address < end; address += PAGE_SIZE) {
92682b0f8c3SJan Kara 		struct vm_fault vmf = {
927b46e756fSKirill A. Shutemov 			.vma = vma,
928b46e756fSKirill A. Shutemov 			.address = address,
9292b635dd3SWill Deacon 			.pgoff = linear_page_index(vma, haddr),
930b46e756fSKirill A. Shutemov 			.flags = FAULT_FLAG_ALLOW_RETRY,
931b46e756fSKirill A. Shutemov 			.pmd = pmd,
932b46e756fSKirill A. Shutemov 		};
933b46e756fSKirill A. Shutemov 
93482b0f8c3SJan Kara 		vmf.pte = pte_offset_map(pmd, address);
9352994302bSJan Kara 		vmf.orig_pte = *vmf.pte;
9362b635dd3SWill Deacon 		if (!is_swap_pte(vmf.orig_pte)) {
9372b635dd3SWill Deacon 			pte_unmap(vmf.pte);
938b46e756fSKirill A. Shutemov 			continue;
9392b635dd3SWill Deacon 		}
9402994302bSJan Kara 		ret = do_swap_page(&vmf);
9410db501f7SEbru Akagunduz 
9424d928e20SMiaohe Lin 		/*
9434d928e20SMiaohe Lin 		 * do_swap_page returns VM_FAULT_RETRY with released mmap_lock.
9444d928e20SMiaohe Lin 		 * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because
9454d928e20SMiaohe Lin 		 * we do not retry here and swap entry will remain in pagetable
9464d928e20SMiaohe Lin 		 * resulting in later failure.
9474d928e20SMiaohe Lin 		 */
948b46e756fSKirill A. Shutemov 		if (ret & VM_FAULT_RETRY) {
9490db501f7SEbru Akagunduz 			trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
95050ad2f24SZach O'Keefe 			/* Likely, but not guaranteed, that page lock failed */
95150ad2f24SZach O'Keefe 			return SCAN_PAGE_LOCK;
95247f863eaSEbru Akagunduz 		}
953b46e756fSKirill A. Shutemov 		if (ret & VM_FAULT_ERROR) {
9544d928e20SMiaohe Lin 			mmap_read_unlock(mm);
9550db501f7SEbru Akagunduz 			trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
95650ad2f24SZach O'Keefe 			return SCAN_FAIL;
957b46e756fSKirill A. Shutemov 		}
9584d928e20SMiaohe Lin 		swapped_in++;
959b46e756fSKirill A. Shutemov 	}
960ae2c5d80SKirill A. Shutemov 
961ae2c5d80SKirill A. Shutemov 	/* Drain LRU add pagevec to remove extra pin on the swapped in pages */
962ae2c5d80SKirill A. Shutemov 	if (swapped_in)
963ae2c5d80SKirill A. Shutemov 		lru_add_drain();
964ae2c5d80SKirill A. Shutemov 
9650db501f7SEbru Akagunduz 	trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
96650ad2f24SZach O'Keefe 	return SCAN_SUCCEED;
967b46e756fSKirill A. Shutemov }
968b46e756fSKirill A. Shutemov 
9699710a78aSZach O'Keefe static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
9709710a78aSZach O'Keefe 			      struct collapse_control *cc)
9719710a78aSZach O'Keefe {
9727d8faaf1SZach O'Keefe 	gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
973e031ff96SYang Shi 		     GFP_TRANSHUGE);
9747d2c4385SZach O'Keefe 	int node = hpage_collapse_find_target_node(cc);
9759710a78aSZach O'Keefe 
976e031ff96SYang Shi 	if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask))
9779710a78aSZach O'Keefe 		return SCAN_ALLOC_HUGE_PAGE_FAIL;
9789710a78aSZach O'Keefe 	if (unlikely(mem_cgroup_charge(page_folio(*hpage), mm, gfp)))
9799710a78aSZach O'Keefe 		return SCAN_CGROUP_CHARGE_FAIL;
9809710a78aSZach O'Keefe 	count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC);
9819710a78aSZach O'Keefe 	return SCAN_SUCCEED;
9829710a78aSZach O'Keefe }
9839710a78aSZach O'Keefe 
98450ad2f24SZach O'Keefe static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
98550ad2f24SZach O'Keefe 			      int referenced, int unmapped,
98650ad2f24SZach O'Keefe 			      struct collapse_control *cc)
987b46e756fSKirill A. Shutemov {
9885503fbf2SKirill A. Shutemov 	LIST_HEAD(compound_pagelist);
989b46e756fSKirill A. Shutemov 	pmd_t *pmd, _pmd;
990b46e756fSKirill A. Shutemov 	pte_t *pte;
991b46e756fSKirill A. Shutemov 	pgtable_t pgtable;
99250ad2f24SZach O'Keefe 	struct page *hpage;
993b46e756fSKirill A. Shutemov 	spinlock_t *pmd_ptl, *pte_ptl;
99450ad2f24SZach O'Keefe 	int result = SCAN_FAIL;
995c131f751SKirill A. Shutemov 	struct vm_area_struct *vma;
996ac46d4f3SJérôme Glisse 	struct mmu_notifier_range range;
997b46e756fSKirill A. Shutemov 
998b46e756fSKirill A. Shutemov 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
999b46e756fSKirill A. Shutemov 
1000988ddb71SKirill A. Shutemov 	/*
1001c1e8d7c6SMichel Lespinasse 	 * Before allocating the hugepage, release the mmap_lock read lock.
1002988ddb71SKirill A. Shutemov 	 * The allocation can take potentially a long time if it involves
1003c1e8d7c6SMichel Lespinasse 	 * sync compaction, and we do not need to hold the mmap_lock during
1004988ddb71SKirill A. Shutemov 	 * that. We will recheck the vma after taking it again in write mode.
1005988ddb71SKirill A. Shutemov 	 */
1006d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
1007b46e756fSKirill A. Shutemov 
100850ad2f24SZach O'Keefe 	result = alloc_charge_hpage(&hpage, mm, cc);
10099710a78aSZach O'Keefe 	if (result != SCAN_SUCCEED)
1010b46e756fSKirill A. Shutemov 		goto out_nolock;
1011b46e756fSKirill A. Shutemov 
1012d8ed45c5SMichel Lespinasse 	mmap_read_lock(mm);
101334488399SZach O'Keefe 	result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
101450ad2f24SZach O'Keefe 	if (result != SCAN_SUCCEED) {
1015d8ed45c5SMichel Lespinasse 		mmap_read_unlock(mm);
1016b46e756fSKirill A. Shutemov 		goto out_nolock;
1017b46e756fSKirill A. Shutemov 	}
1018b46e756fSKirill A. Shutemov 
101950722804SZach O'Keefe 	result = find_pmd_or_thp_or_none(mm, address, &pmd);
102050722804SZach O'Keefe 	if (result != SCAN_SUCCEED) {
1021d8ed45c5SMichel Lespinasse 		mmap_read_unlock(mm);
1022b46e756fSKirill A. Shutemov 		goto out_nolock;
1023b46e756fSKirill A. Shutemov 	}
1024b46e756fSKirill A. Shutemov 
102550ad2f24SZach O'Keefe 	if (unmapped) {
1026b46e756fSKirill A. Shutemov 		/*
102750ad2f24SZach O'Keefe 		 * __collapse_huge_page_swapin will return with mmap_lock
102850ad2f24SZach O'Keefe 		 * released when it fails. So we jump out_nolock directly in
102950ad2f24SZach O'Keefe 		 * that case.  Continuing to collapse causes inconsistency.
1030b46e756fSKirill A. Shutemov 		 */
103150ad2f24SZach O'Keefe 		result = __collapse_huge_page_swapin(mm, vma, address, pmd,
103250ad2f24SZach O'Keefe 						     referenced);
103350ad2f24SZach O'Keefe 		if (result != SCAN_SUCCEED)
1034b46e756fSKirill A. Shutemov 			goto out_nolock;
1035b46e756fSKirill A. Shutemov 	}
1036b46e756fSKirill A. Shutemov 
1037d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
1038b46e756fSKirill A. Shutemov 	/*
1039b46e756fSKirill A. Shutemov 	 * Prevent all access to pagetables with the exception of
1040b46e756fSKirill A. Shutemov 	 * gup_fast later handled by the ptep_clear_flush and the VM
1041b46e756fSKirill A. Shutemov 	 * handled by the anon_vma lock + PG_lock.
1042b46e756fSKirill A. Shutemov 	 */
1043d8ed45c5SMichel Lespinasse 	mmap_write_lock(mm);
104434488399SZach O'Keefe 	result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
104550ad2f24SZach O'Keefe 	if (result != SCAN_SUCCEED)
104618d24a7cSMiaohe Lin 		goto out_up_write;
1047b46e756fSKirill A. Shutemov 	/* check if the pmd is still valid */
104850722804SZach O'Keefe 	result = check_pmd_still_valid(mm, address, pmd);
104950722804SZach O'Keefe 	if (result != SCAN_SUCCEED)
105018d24a7cSMiaohe Lin 		goto out_up_write;
1051b46e756fSKirill A. Shutemov 
1052b46e756fSKirill A. Shutemov 	anon_vma_lock_write(vma->anon_vma);
1053b46e756fSKirill A. Shutemov 
10547d4a8be0SAlistair Popple 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address,
10557d4a8be0SAlistair Popple 				address + HPAGE_PMD_SIZE);
1056ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_start(&range);
1057ec649c9dSVille Syrjälä 
1058ec649c9dSVille Syrjälä 	pte = pte_offset_map(pmd, address);
1059ec649c9dSVille Syrjälä 	pte_ptl = pte_lockptr(mm, pmd);
1060ec649c9dSVille Syrjälä 
1061b46e756fSKirill A. Shutemov 	pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1062b46e756fSKirill A. Shutemov 	/*
106370cbc3ccSYang Shi 	 * This removes any huge TLB entry from the CPU so we won't allow
106470cbc3ccSYang Shi 	 * huge and small TLB entries for the same virtual address to
106570cbc3ccSYang Shi 	 * avoid the risk of CPU bugs in that area.
106670cbc3ccSYang Shi 	 *
106770cbc3ccSYang Shi 	 * Parallel fast GUP is fine since fast GUP will back off when
106870cbc3ccSYang Shi 	 * it detects PMD is changed.
1069b46e756fSKirill A. Shutemov 	 */
1070b46e756fSKirill A. Shutemov 	_pmd = pmdp_collapse_flush(vma, address, pmd);
1071b46e756fSKirill A. Shutemov 	spin_unlock(pmd_ptl);
1072ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_end(&range);
10732ba99c5eSJann Horn 	tlb_remove_table_sync_one();
1074b46e756fSKirill A. Shutemov 
1075b46e756fSKirill A. Shutemov 	spin_lock(pte_ptl);
1076d8ea7cc8SZach O'Keefe 	result =  __collapse_huge_page_isolate(vma, address, pte, cc,
10775503fbf2SKirill A. Shutemov 					       &compound_pagelist);
1078b46e756fSKirill A. Shutemov 	spin_unlock(pte_ptl);
1079b46e756fSKirill A. Shutemov 
108050ad2f24SZach O'Keefe 	if (unlikely(result != SCAN_SUCCEED)) {
1081b46e756fSKirill A. Shutemov 		pte_unmap(pte);
1082b46e756fSKirill A. Shutemov 		spin_lock(pmd_ptl);
1083b46e756fSKirill A. Shutemov 		BUG_ON(!pmd_none(*pmd));
1084b46e756fSKirill A. Shutemov 		/*
1085b46e756fSKirill A. Shutemov 		 * We can only use set_pmd_at when establishing
1086b46e756fSKirill A. Shutemov 		 * hugepmds and never for establishing regular pmds that
1087b46e756fSKirill A. Shutemov 		 * points to regular pagetables. Use pmd_populate for that
1088b46e756fSKirill A. Shutemov 		 */
1089b46e756fSKirill A. Shutemov 		pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1090b46e756fSKirill A. Shutemov 		spin_unlock(pmd_ptl);
1091b46e756fSKirill A. Shutemov 		anon_vma_unlock_write(vma->anon_vma);
109218d24a7cSMiaohe Lin 		goto out_up_write;
1093b46e756fSKirill A. Shutemov 	}
1094b46e756fSKirill A. Shutemov 
1095b46e756fSKirill A. Shutemov 	/*
1096b46e756fSKirill A. Shutemov 	 * All pages are isolated and locked so anon_vma rmap
1097b46e756fSKirill A. Shutemov 	 * can't run anymore.
1098b46e756fSKirill A. Shutemov 	 */
1099b46e756fSKirill A. Shutemov 	anon_vma_unlock_write(vma->anon_vma);
1100b46e756fSKirill A. Shutemov 
110150ad2f24SZach O'Keefe 	__collapse_huge_page_copy(pte, hpage, vma, address, pte_ptl,
11025503fbf2SKirill A. Shutemov 				  &compound_pagelist);
1103b46e756fSKirill A. Shutemov 	pte_unmap(pte);
1104588d01f9SMiaohe Lin 	/*
1105588d01f9SMiaohe Lin 	 * spin_lock() below is not the equivalent of smp_wmb(), but
1106588d01f9SMiaohe Lin 	 * the smp_wmb() inside __SetPageUptodate() can be reused to
1107588d01f9SMiaohe Lin 	 * avoid the copy_huge_page writes to become visible after
1108588d01f9SMiaohe Lin 	 * the set_pmd_at() write.
1109588d01f9SMiaohe Lin 	 */
111050ad2f24SZach O'Keefe 	__SetPageUptodate(hpage);
1111b46e756fSKirill A. Shutemov 	pgtable = pmd_pgtable(_pmd);
1112b46e756fSKirill A. Shutemov 
111350ad2f24SZach O'Keefe 	_pmd = mk_huge_pmd(hpage, vma->vm_page_prot);
1114f55e1014SLinus Torvalds 	_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1115b46e756fSKirill A. Shutemov 
1116b46e756fSKirill A. Shutemov 	spin_lock(pmd_ptl);
1117b46e756fSKirill A. Shutemov 	BUG_ON(!pmd_none(*pmd));
111850ad2f24SZach O'Keefe 	page_add_new_anon_rmap(hpage, vma, address);
111950ad2f24SZach O'Keefe 	lru_cache_add_inactive_or_unevictable(hpage, vma);
1120b46e756fSKirill A. Shutemov 	pgtable_trans_huge_deposit(mm, pmd, pgtable);
1121b46e756fSKirill A. Shutemov 	set_pmd_at(mm, address, pmd, _pmd);
1122b46e756fSKirill A. Shutemov 	update_mmu_cache_pmd(vma, address, pmd);
1123b46e756fSKirill A. Shutemov 	spin_unlock(pmd_ptl);
1124b46e756fSKirill A. Shutemov 
112550ad2f24SZach O'Keefe 	hpage = NULL;
1126b46e756fSKirill A. Shutemov 
1127b46e756fSKirill A. Shutemov 	result = SCAN_SUCCEED;
1128b46e756fSKirill A. Shutemov out_up_write:
1129d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
1130b46e756fSKirill A. Shutemov out_nolock:
113150ad2f24SZach O'Keefe 	if (hpage) {
113250ad2f24SZach O'Keefe 		mem_cgroup_uncharge(page_folio(hpage));
113350ad2f24SZach O'Keefe 		put_page(hpage);
1134c6a7f445SYang Shi 	}
113550ad2f24SZach O'Keefe 	trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result);
113650ad2f24SZach O'Keefe 	return result;
1137b46e756fSKirill A. Shutemov }
1138b46e756fSKirill A. Shutemov 
11397d2c4385SZach O'Keefe static int hpage_collapse_scan_pmd(struct mm_struct *mm,
1140b46e756fSKirill A. Shutemov 				   struct vm_area_struct *vma,
114150ad2f24SZach O'Keefe 				   unsigned long address, bool *mmap_locked,
114234d6b470SZach O'Keefe 				   struct collapse_control *cc)
1143b46e756fSKirill A. Shutemov {
1144b46e756fSKirill A. Shutemov 	pmd_t *pmd;
1145b46e756fSKirill A. Shutemov 	pte_t *pte, *_pte;
114650ad2f24SZach O'Keefe 	int result = SCAN_FAIL, referenced = 0;
114771a2c112SKirill A. Shutemov 	int none_or_zero = 0, shared = 0;
1148b46e756fSKirill A. Shutemov 	struct page *page = NULL;
1149b46e756fSKirill A. Shutemov 	unsigned long _address;
1150b46e756fSKirill A. Shutemov 	spinlock_t *ptl;
1151b46e756fSKirill A. Shutemov 	int node = NUMA_NO_NODE, unmapped = 0;
11520db501f7SEbru Akagunduz 	bool writable = false;
1153b46e756fSKirill A. Shutemov 
1154b46e756fSKirill A. Shutemov 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1155b46e756fSKirill A. Shutemov 
115650722804SZach O'Keefe 	result = find_pmd_or_thp_or_none(mm, address, &pmd);
115750722804SZach O'Keefe 	if (result != SCAN_SUCCEED)
1158b46e756fSKirill A. Shutemov 		goto out;
1159b46e756fSKirill A. Shutemov 
116034d6b470SZach O'Keefe 	memset(cc->node_load, 0, sizeof(cc->node_load));
1161e031ff96SYang Shi 	nodes_clear(cc->alloc_nmask);
1162b46e756fSKirill A. Shutemov 	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1163b46e756fSKirill A. Shutemov 	for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
1164b46e756fSKirill A. Shutemov 	     _pte++, _address += PAGE_SIZE) {
1165b46e756fSKirill A. Shutemov 		pte_t pteval = *_pte;
1166b46e756fSKirill A. Shutemov 		if (is_swap_pte(pteval)) {
1167d8ea7cc8SZach O'Keefe 			++unmapped;
1168d8ea7cc8SZach O'Keefe 			if (!cc->is_khugepaged ||
1169d8ea7cc8SZach O'Keefe 			    unmapped <= khugepaged_max_ptes_swap) {
1170e1e267c7SPeter Xu 				/*
1171e1e267c7SPeter Xu 				 * Always be strict with uffd-wp
1172e1e267c7SPeter Xu 				 * enabled swap entries.  Please see
1173e1e267c7SPeter Xu 				 * comment below for pte_uffd_wp().
1174e1e267c7SPeter Xu 				 */
1175e1e267c7SPeter Xu 				if (pte_swp_uffd_wp(pteval)) {
1176e1e267c7SPeter Xu 					result = SCAN_PTE_UFFD_WP;
1177e1e267c7SPeter Xu 					goto out_unmap;
1178e1e267c7SPeter Xu 				}
1179b46e756fSKirill A. Shutemov 				continue;
1180b46e756fSKirill A. Shutemov 			} else {
1181b46e756fSKirill A. Shutemov 				result = SCAN_EXCEED_SWAP_PTE;
1182e9ea874aSYang Yang 				count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
1183b46e756fSKirill A. Shutemov 				goto out_unmap;
1184b46e756fSKirill A. Shutemov 			}
1185b46e756fSKirill A. Shutemov 		}
1186b46e756fSKirill A. Shutemov 		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1187d8ea7cc8SZach O'Keefe 			++none_or_zero;
1188b46e756fSKirill A. Shutemov 			if (!userfaultfd_armed(vma) &&
1189d8ea7cc8SZach O'Keefe 			    (!cc->is_khugepaged ||
1190d8ea7cc8SZach O'Keefe 			     none_or_zero <= khugepaged_max_ptes_none)) {
1191b46e756fSKirill A. Shutemov 				continue;
1192b46e756fSKirill A. Shutemov 			} else {
1193b46e756fSKirill A. Shutemov 				result = SCAN_EXCEED_NONE_PTE;
1194e9ea874aSYang Yang 				count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
1195b46e756fSKirill A. Shutemov 				goto out_unmap;
1196b46e756fSKirill A. Shutemov 			}
1197b46e756fSKirill A. Shutemov 		}
1198e1e267c7SPeter Xu 		if (pte_uffd_wp(pteval)) {
1199e1e267c7SPeter Xu 			/*
1200e1e267c7SPeter Xu 			 * Don't collapse the page if any of the small
1201e1e267c7SPeter Xu 			 * PTEs are armed with uffd write protection.
1202e1e267c7SPeter Xu 			 * Here we can also mark the new huge pmd as
1203e1e267c7SPeter Xu 			 * write protected if any of the small ones is
12048958b249SHaitao Shi 			 * marked but that could bring unknown
1205e1e267c7SPeter Xu 			 * userfault messages that falls outside of
1206e1e267c7SPeter Xu 			 * the registered range.  So, just be simple.
1207e1e267c7SPeter Xu 			 */
1208e1e267c7SPeter Xu 			result = SCAN_PTE_UFFD_WP;
1209e1e267c7SPeter Xu 			goto out_unmap;
1210e1e267c7SPeter Xu 		}
1211b46e756fSKirill A. Shutemov 		if (pte_write(pteval))
1212b46e756fSKirill A. Shutemov 			writable = true;
1213b46e756fSKirill A. Shutemov 
1214b46e756fSKirill A. Shutemov 		page = vm_normal_page(vma, _address, pteval);
12153218f871SAlex Sierra 		if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
1216b46e756fSKirill A. Shutemov 			result = SCAN_PAGE_NULL;
1217b46e756fSKirill A. Shutemov 			goto out_unmap;
1218b46e756fSKirill A. Shutemov 		}
1219b46e756fSKirill A. Shutemov 
1220d8ea7cc8SZach O'Keefe 		if (page_mapcount(page) > 1) {
1221d8ea7cc8SZach O'Keefe 			++shared;
1222d8ea7cc8SZach O'Keefe 			if (cc->is_khugepaged &&
1223d8ea7cc8SZach O'Keefe 			    shared > khugepaged_max_ptes_shared) {
122471a2c112SKirill A. Shutemov 				result = SCAN_EXCEED_SHARED_PTE;
1225e9ea874aSYang Yang 				count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
122671a2c112SKirill A. Shutemov 				goto out_unmap;
122771a2c112SKirill A. Shutemov 			}
1228d8ea7cc8SZach O'Keefe 		}
122971a2c112SKirill A. Shutemov 
12305503fbf2SKirill A. Shutemov 		page = compound_head(page);
1231b46e756fSKirill A. Shutemov 
1232b46e756fSKirill A. Shutemov 		/*
1233b46e756fSKirill A. Shutemov 		 * Record which node the original page is from and save this
123434d6b470SZach O'Keefe 		 * information to cc->node_load[].
12350b8f0d87SQuanfa Fu 		 * Khugepaged will allocate hugepage from the node has the max
1236b46e756fSKirill A. Shutemov 		 * hit record.
1237b46e756fSKirill A. Shutemov 		 */
1238b46e756fSKirill A. Shutemov 		node = page_to_nid(page);
12397d2c4385SZach O'Keefe 		if (hpage_collapse_scan_abort(node, cc)) {
1240b46e756fSKirill A. Shutemov 			result = SCAN_SCAN_ABORT;
1241b46e756fSKirill A. Shutemov 			goto out_unmap;
1242b46e756fSKirill A. Shutemov 		}
124334d6b470SZach O'Keefe 		cc->node_load[node]++;
1244b46e756fSKirill A. Shutemov 		if (!PageLRU(page)) {
1245b46e756fSKirill A. Shutemov 			result = SCAN_PAGE_LRU;
1246b46e756fSKirill A. Shutemov 			goto out_unmap;
1247b46e756fSKirill A. Shutemov 		}
1248b46e756fSKirill A. Shutemov 		if (PageLocked(page)) {
1249b46e756fSKirill A. Shutemov 			result = SCAN_PAGE_LOCK;
1250b46e756fSKirill A. Shutemov 			goto out_unmap;
1251b46e756fSKirill A. Shutemov 		}
1252b46e756fSKirill A. Shutemov 		if (!PageAnon(page)) {
1253b46e756fSKirill A. Shutemov 			result = SCAN_PAGE_ANON;
1254b46e756fSKirill A. Shutemov 			goto out_unmap;
1255b46e756fSKirill A. Shutemov 		}
1256b46e756fSKirill A. Shutemov 
1257b46e756fSKirill A. Shutemov 		/*
12589445689fSKirill A. Shutemov 		 * Check if the page has any GUP (or other external) pins.
12599445689fSKirill A. Shutemov 		 *
1260cb67f428SHugh Dickins 		 * Here the check may be racy:
1261cb67f428SHugh Dickins 		 * it may see total_mapcount > refcount in some cases?
12629445689fSKirill A. Shutemov 		 * But such case is ephemeral we could always retry collapse
12639445689fSKirill A. Shutemov 		 * later.  However it may report false positive if the page
12649445689fSKirill A. Shutemov 		 * has excessive GUP pins (i.e. 512).  Anyway the same check
12659445689fSKirill A. Shutemov 		 * will be done again later the risk seems low.
1266b46e756fSKirill A. Shutemov 		 */
12679445689fSKirill A. Shutemov 		if (!is_refcount_suitable(page)) {
1268b46e756fSKirill A. Shutemov 			result = SCAN_PAGE_COUNT;
1269b46e756fSKirill A. Shutemov 			goto out_unmap;
1270b46e756fSKirill A. Shutemov 		}
1271d8ea7cc8SZach O'Keefe 
1272d8ea7cc8SZach O'Keefe 		/*
1273d8ea7cc8SZach O'Keefe 		 * If collapse was initiated by khugepaged, check that there is
1274d8ea7cc8SZach O'Keefe 		 * enough young pte to justify collapsing the page
1275d8ea7cc8SZach O'Keefe 		 */
1276d8ea7cc8SZach O'Keefe 		if (cc->is_khugepaged &&
1277d8ea7cc8SZach O'Keefe 		    (pte_young(pteval) || page_is_young(page) ||
1278d8ea7cc8SZach O'Keefe 		     PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
1279d8ea7cc8SZach O'Keefe 								     address)))
12800db501f7SEbru Akagunduz 			referenced++;
1281b46e756fSKirill A. Shutemov 	}
1282ffe945e6SKirill A. Shutemov 	if (!writable) {
1283ffe945e6SKirill A. Shutemov 		result = SCAN_PAGE_RO;
1284d8ea7cc8SZach O'Keefe 	} else if (cc->is_khugepaged &&
1285d8ea7cc8SZach O'Keefe 		   (!referenced ||
1286d8ea7cc8SZach O'Keefe 		    (unmapped && referenced < HPAGE_PMD_NR / 2))) {
1287ffe945e6SKirill A. Shutemov 		result = SCAN_LACK_REFERENCED_PAGE;
1288ffe945e6SKirill A. Shutemov 	} else {
1289b46e756fSKirill A. Shutemov 		result = SCAN_SUCCEED;
1290b46e756fSKirill A. Shutemov 	}
1291b46e756fSKirill A. Shutemov out_unmap:
1292b46e756fSKirill A. Shutemov 	pte_unmap_unlock(pte, ptl);
129350ad2f24SZach O'Keefe 	if (result == SCAN_SUCCEED) {
129450ad2f24SZach O'Keefe 		result = collapse_huge_page(mm, address, referenced,
129550ad2f24SZach O'Keefe 					    unmapped, cc);
1296c1e8d7c6SMichel Lespinasse 		/* collapse_huge_page will return with the mmap_lock released */
129750ad2f24SZach O'Keefe 		*mmap_locked = false;
1298b46e756fSKirill A. Shutemov 	}
1299b46e756fSKirill A. Shutemov out:
1300b46e756fSKirill A. Shutemov 	trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1301b46e756fSKirill A. Shutemov 				     none_or_zero, result, unmapped);
130250ad2f24SZach O'Keefe 	return result;
1303b46e756fSKirill A. Shutemov }
1304b46e756fSKirill A. Shutemov 
1305b26e2701SQi Zheng static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
1306b46e756fSKirill A. Shutemov {
1307b26e2701SQi Zheng 	struct mm_slot *slot = &mm_slot->slot;
1308b26e2701SQi Zheng 	struct mm_struct *mm = slot->mm;
1309b46e756fSKirill A. Shutemov 
131035f3aa39SLance Roy 	lockdep_assert_held(&khugepaged_mm_lock);
1311b46e756fSKirill A. Shutemov 
13127d2c4385SZach O'Keefe 	if (hpage_collapse_test_exit(mm)) {
1313b46e756fSKirill A. Shutemov 		/* free mm_slot */
1314b26e2701SQi Zheng 		hash_del(&slot->hash);
1315b26e2701SQi Zheng 		list_del(&slot->mm_node);
1316b46e756fSKirill A. Shutemov 
1317b46e756fSKirill A. Shutemov 		/*
1318b46e756fSKirill A. Shutemov 		 * Not strictly needed because the mm exited already.
1319b46e756fSKirill A. Shutemov 		 *
1320b46e756fSKirill A. Shutemov 		 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1321b46e756fSKirill A. Shutemov 		 */
1322b46e756fSKirill A. Shutemov 
1323b46e756fSKirill A. Shutemov 		/* khugepaged_mm_lock actually not necessary for the below */
1324b26e2701SQi Zheng 		mm_slot_free(mm_slot_cache, mm_slot);
1325b46e756fSKirill A. Shutemov 		mmdrop(mm);
1326b46e756fSKirill A. Shutemov 	}
1327b46e756fSKirill A. Shutemov }
1328b46e756fSKirill A. Shutemov 
1329396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_SHMEM
133027e1f827SSong Liu /*
133127e1f827SSong Liu  * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
133227e1f827SSong Liu  * khugepaged should try to collapse the page table.
133334488399SZach O'Keefe  *
133434488399SZach O'Keefe  * Note that following race exists:
133534488399SZach O'Keefe  * (1) khugepaged calls khugepaged_collapse_pte_mapped_thps() for mm_struct A,
133634488399SZach O'Keefe  *     emptying the A's ->pte_mapped_thp[] array.
133734488399SZach O'Keefe  * (2) MADV_COLLAPSE collapses some file extent with target mm_struct B, and
133834488399SZach O'Keefe  *     retract_page_tables() finds a VMA in mm_struct A mapping the same extent
133934488399SZach O'Keefe  *     (at virtual address X) and adds an entry (for X) into mm_struct A's
134034488399SZach O'Keefe  *     ->pte-mapped_thp[] array.
134134488399SZach O'Keefe  * (3) khugepaged calls khugepaged_collapse_scan_file() for mm_struct A at X,
134234488399SZach O'Keefe  *     sees a pte-mapped THP (SCAN_PTE_MAPPED_HUGEPAGE) and adds an entry
134334488399SZach O'Keefe  *     (for X) into mm_struct A's ->pte-mapped_thp[] array.
134434488399SZach O'Keefe  * Thus, it's possible the same address is added multiple times for the same
134534488399SZach O'Keefe  * mm_struct.  Should this happen, we'll simply attempt
134634488399SZach O'Keefe  * collapse_pte_mapped_thp() multiple times for the same address, under the same
134734488399SZach O'Keefe  * exclusive mmap_lock, and assuming the first call is successful, subsequent
134834488399SZach O'Keefe  * attempts will return quickly (without grabbing any additional locks) when
134934488399SZach O'Keefe  * a huge pmd is found in find_pmd_or_thp_or_none().  Since this is a cheap
135034488399SZach O'Keefe  * check, and since this is a rare occurrence, the cost of preventing this
135134488399SZach O'Keefe  * "multiple-add" is thought to be more expensive than just handling it, should
135234488399SZach O'Keefe  * it occur.
135327e1f827SSong Liu  */
135458ac9a89SZach O'Keefe static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
135527e1f827SSong Liu 					  unsigned long addr)
135627e1f827SSong Liu {
1357b26e2701SQi Zheng 	struct khugepaged_mm_slot *mm_slot;
1358b26e2701SQi Zheng 	struct mm_slot *slot;
135958ac9a89SZach O'Keefe 	bool ret = false;
136027e1f827SSong Liu 
136127e1f827SSong Liu 	VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
136227e1f827SSong Liu 
136327e1f827SSong Liu 	spin_lock(&khugepaged_mm_lock);
1364b26e2701SQi Zheng 	slot = mm_slot_lookup(mm_slots_hash, mm);
1365b26e2701SQi Zheng 	mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
136658ac9a89SZach O'Keefe 	if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP)) {
136727e1f827SSong Liu 		mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
136858ac9a89SZach O'Keefe 		ret = true;
136958ac9a89SZach O'Keefe 	}
137027e1f827SSong Liu 	spin_unlock(&khugepaged_mm_lock);
137158ac9a89SZach O'Keefe 	return ret;
137227e1f827SSong Liu }
137327e1f827SSong Liu 
137434488399SZach O'Keefe /* hpage must be locked, and mmap_lock must be held in write */
137534488399SZach O'Keefe static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
137634488399SZach O'Keefe 			pmd_t *pmdp, struct page *hpage)
137734488399SZach O'Keefe {
137834488399SZach O'Keefe 	struct vm_fault vmf = {
137934488399SZach O'Keefe 		.vma = vma,
138034488399SZach O'Keefe 		.address = addr,
138134488399SZach O'Keefe 		.flags = 0,
138234488399SZach O'Keefe 		.pmd = pmdp,
138334488399SZach O'Keefe 	};
138434488399SZach O'Keefe 
138534488399SZach O'Keefe 	VM_BUG_ON(!PageTransHuge(hpage));
138634488399SZach O'Keefe 	mmap_assert_write_locked(vma->vm_mm);
138734488399SZach O'Keefe 
138834488399SZach O'Keefe 	if (do_set_pmd(&vmf, hpage))
138934488399SZach O'Keefe 		return SCAN_FAIL;
139034488399SZach O'Keefe 
139134488399SZach O'Keefe 	get_page(hpage);
139234488399SZach O'Keefe 	return SCAN_SUCCEED;
139327e1f827SSong Liu }
139427e1f827SSong Liu 
13958d3c106eSJann Horn /*
13968d3c106eSJann Horn  * A note about locking:
13978d3c106eSJann Horn  * Trying to take the page table spinlocks would be useless here because those
13988d3c106eSJann Horn  * are only used to synchronize:
13998d3c106eSJann Horn  *
14008d3c106eSJann Horn  *  - modifying terminal entries (ones that point to a data page, not to another
14018d3c106eSJann Horn  *    page table)
14028d3c106eSJann Horn  *  - installing *new* non-terminal entries
14038d3c106eSJann Horn  *
14048d3c106eSJann Horn  * Instead, we need roughly the same kind of protection as free_pgtables() or
14058d3c106eSJann Horn  * mm_take_all_locks() (but only for a single VMA):
14068d3c106eSJann Horn  * The mmap lock together with this VMA's rmap locks covers all paths towards
14078d3c106eSJann Horn  * the page table entries we're messing with here, except for hardware page
14088d3c106eSJann Horn  * table walks and lockless_pages_from_mm().
14098d3c106eSJann Horn  */
1410e59a47b8SPasha Tatashin static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
1411e59a47b8SPasha Tatashin 				  unsigned long addr, pmd_t *pmdp)
1412e59a47b8SPasha Tatashin {
1413e59a47b8SPasha Tatashin 	pmd_t pmd;
1414f268f6cfSJann Horn 	struct mmu_notifier_range range;
1415e59a47b8SPasha Tatashin 
141680110bbfSPasha Tatashin 	mmap_assert_write_locked(mm);
14178d3c106eSJann Horn 	if (vma->vm_file)
14188d3c106eSJann Horn 		lockdep_assert_held_write(&vma->vm_file->f_mapping->i_mmap_rwsem);
14198d3c106eSJann Horn 	/*
14208d3c106eSJann Horn 	 * All anon_vmas attached to the VMA have the same root and are
14218d3c106eSJann Horn 	 * therefore locked by the same lock.
14228d3c106eSJann Horn 	 */
14238d3c106eSJann Horn 	if (vma->anon_vma)
14248d3c106eSJann Horn 		lockdep_assert_held_write(&vma->anon_vma->root->rwsem);
14258d3c106eSJann Horn 
14267d4a8be0SAlistair Popple 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr,
1427f268f6cfSJann Horn 				addr + HPAGE_PMD_SIZE);
1428f268f6cfSJann Horn 	mmu_notifier_invalidate_range_start(&range);
1429e59a47b8SPasha Tatashin 	pmd = pmdp_collapse_flush(vma, addr, pmdp);
14302ba99c5eSJann Horn 	tlb_remove_table_sync_one();
1431f268f6cfSJann Horn 	mmu_notifier_invalidate_range_end(&range);
1432e59a47b8SPasha Tatashin 	mm_dec_nr_ptes(mm);
143380110bbfSPasha Tatashin 	page_table_check_pte_clear_range(mm, addr, pmd);
1434e59a47b8SPasha Tatashin 	pte_free(mm, pmd_pgtable(pmd));
1435e59a47b8SPasha Tatashin }
1436e59a47b8SPasha Tatashin 
143727e1f827SSong Liu /**
1438336e6b53SAlex Shi  * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1439336e6b53SAlex Shi  * address haddr.
1440336e6b53SAlex Shi  *
1441336e6b53SAlex Shi  * @mm: process address space where collapse happens
1442336e6b53SAlex Shi  * @addr: THP collapse address
144334488399SZach O'Keefe  * @install_pmd: If a huge PMD should be installed
144427e1f827SSong Liu  *
144527e1f827SSong Liu  * This function checks whether all the PTEs in the PMD are pointing to the
144627e1f827SSong Liu  * right THP. If so, retract the page table so the THP can refault in with
144734488399SZach O'Keefe  * as pmd-mapped. Possibly install a huge PMD mapping the THP.
144827e1f827SSong Liu  */
144934488399SZach O'Keefe int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
145034488399SZach O'Keefe 			    bool install_pmd)
145127e1f827SSong Liu {
145227e1f827SSong Liu 	unsigned long haddr = addr & HPAGE_PMD_MASK;
145394d815b2SLiam R. Howlett 	struct vm_area_struct *vma = vma_lookup(mm, haddr);
1454119a5fc1SHugh Dickins 	struct page *hpage;
145527e1f827SSong Liu 	pte_t *start_pte, *pte;
1456e59a47b8SPasha Tatashin 	pmd_t *pmd;
145727e1f827SSong Liu 	spinlock_t *ptl;
145858ac9a89SZach O'Keefe 	int count = 0, result = SCAN_FAIL;
145927e1f827SSong Liu 	int i;
146027e1f827SSong Liu 
146158ac9a89SZach O'Keefe 	mmap_assert_write_locked(mm);
146258ac9a89SZach O'Keefe 
146334488399SZach O'Keefe 	/* Fast check before locking page if already PMD-mapped */
146458ac9a89SZach O'Keefe 	result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
146534488399SZach O'Keefe 	if (result == SCAN_PMD_MAPPED)
146634488399SZach O'Keefe 		return result;
146758ac9a89SZach O'Keefe 
146827e1f827SSong Liu 	if (!vma || !vma->vm_file ||
1469fef792a4SMiaohe Lin 	    !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
147034488399SZach O'Keefe 		return SCAN_VMA_CHECK;
147127e1f827SSong Liu 
147227e1f827SSong Liu 	/*
1473a7f4e6e4SZach O'Keefe 	 * If we are here, we've succeeded in replacing all the native pages
1474a7f4e6e4SZach O'Keefe 	 * in the page cache with a single hugepage. If a mm were to fault-in
1475a7f4e6e4SZach O'Keefe 	 * this memory (mapped by a suitably aligned VMA), we'd get the hugepage
1476a7f4e6e4SZach O'Keefe 	 * and map it by a PMD, regardless of sysfs THP settings. As such, let's
1477a7f4e6e4SZach O'Keefe 	 * analogously elide sysfs THP settings here.
147827e1f827SSong Liu 	 */
1479a7f4e6e4SZach O'Keefe 	if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
148034488399SZach O'Keefe 		return SCAN_VMA_CHECK;
148127e1f827SSong Liu 
1482deb4c93aSPeter Xu 	/* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
1483deb4c93aSPeter Xu 	if (userfaultfd_wp(vma))
148434488399SZach O'Keefe 		return SCAN_PTE_UFFD_WP;
1485deb4c93aSPeter Xu 
1486119a5fc1SHugh Dickins 	hpage = find_lock_page(vma->vm_file->f_mapping,
1487119a5fc1SHugh Dickins 			       linear_page_index(vma, haddr));
1488119a5fc1SHugh Dickins 	if (!hpage)
148934488399SZach O'Keefe 		return SCAN_PAGE_NULL;
1490119a5fc1SHugh Dickins 
149134488399SZach O'Keefe 	if (!PageHead(hpage)) {
149234488399SZach O'Keefe 		result = SCAN_FAIL;
1493119a5fc1SHugh Dickins 		goto drop_hpage;
149434488399SZach O'Keefe 	}
1495119a5fc1SHugh Dickins 
149634488399SZach O'Keefe 	if (compound_order(hpage) != HPAGE_PMD_ORDER) {
149734488399SZach O'Keefe 		result = SCAN_PAGE_COMPOUND;
1498119a5fc1SHugh Dickins 		goto drop_hpage;
149934488399SZach O'Keefe 	}
1500780a4b6fSZach O'Keefe 
150134488399SZach O'Keefe 	switch (result) {
150234488399SZach O'Keefe 	case SCAN_SUCCEED:
150334488399SZach O'Keefe 		break;
150434488399SZach O'Keefe 	case SCAN_PMD_NONE:
150534488399SZach O'Keefe 		/*
150634488399SZach O'Keefe 		 * In MADV_COLLAPSE path, possible race with khugepaged where
150734488399SZach O'Keefe 		 * all pte entries have been removed and pmd cleared.  If so,
150834488399SZach O'Keefe 		 * skip all the pte checks and just update the pmd mapping.
150934488399SZach O'Keefe 		 */
151034488399SZach O'Keefe 		goto maybe_install_pmd;
151134488399SZach O'Keefe 	default:
151227e1f827SSong Liu 		goto drop_hpage;
151334488399SZach O'Keefe 	}
151427e1f827SSong Liu 
15158d3c106eSJann Horn 	/*
15168d3c106eSJann Horn 	 * We need to lock the mapping so that from here on, only GUP-fast and
15178d3c106eSJann Horn 	 * hardware page walks can access the parts of the page tables that
15188d3c106eSJann Horn 	 * we're operating on.
15198d3c106eSJann Horn 	 * See collapse_and_free_pmd().
15208d3c106eSJann Horn 	 */
15218d3c106eSJann Horn 	i_mmap_lock_write(vma->vm_file->f_mapping);
15228d3c106eSJann Horn 
15238d3c106eSJann Horn 	/*
15248d3c106eSJann Horn 	 * This spinlock should be unnecessary: Nobody else should be accessing
15258d3c106eSJann Horn 	 * the page tables under spinlock protection here, only
15268d3c106eSJann Horn 	 * lockless_pages_from_mm() and the hardware page walker can access page
15278d3c106eSJann Horn 	 * tables while all the high-level locks are held in write mode.
15288d3c106eSJann Horn 	 */
152927e1f827SSong Liu 	start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
153034488399SZach O'Keefe 	result = SCAN_FAIL;
153127e1f827SSong Liu 
153227e1f827SSong Liu 	/* step 1: check all mapped PTEs are to the right huge page */
153327e1f827SSong Liu 	for (i = 0, addr = haddr, pte = start_pte;
153427e1f827SSong Liu 	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
153527e1f827SSong Liu 		struct page *page;
153627e1f827SSong Liu 
153727e1f827SSong Liu 		/* empty pte, skip */
153827e1f827SSong Liu 		if (pte_none(*pte))
153927e1f827SSong Liu 			continue;
154027e1f827SSong Liu 
154127e1f827SSong Liu 		/* page swapped out, abort */
154234488399SZach O'Keefe 		if (!pte_present(*pte)) {
154334488399SZach O'Keefe 			result = SCAN_PTE_NON_PRESENT;
154427e1f827SSong Liu 			goto abort;
154534488399SZach O'Keefe 		}
154627e1f827SSong Liu 
154727e1f827SSong Liu 		page = vm_normal_page(vma, addr, *pte);
15483218f871SAlex Sierra 		if (WARN_ON_ONCE(page && is_zone_device_page(page)))
15493218f871SAlex Sierra 			page = NULL;
155027e1f827SSong Liu 		/*
1551119a5fc1SHugh Dickins 		 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1552119a5fc1SHugh Dickins 		 * page table, but the new page will not be a subpage of hpage.
155327e1f827SSong Liu 		 */
1554119a5fc1SHugh Dickins 		if (hpage + i != page)
155527e1f827SSong Liu 			goto abort;
155627e1f827SSong Liu 		count++;
155727e1f827SSong Liu 	}
155827e1f827SSong Liu 
155927e1f827SSong Liu 	/* step 2: adjust rmap */
156027e1f827SSong Liu 	for (i = 0, addr = haddr, pte = start_pte;
156127e1f827SSong Liu 	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
156227e1f827SSong Liu 		struct page *page;
156327e1f827SSong Liu 
156427e1f827SSong Liu 		if (pte_none(*pte))
156527e1f827SSong Liu 			continue;
156627e1f827SSong Liu 		page = vm_normal_page(vma, addr, *pte);
15673218f871SAlex Sierra 		if (WARN_ON_ONCE(page && is_zone_device_page(page)))
15683218f871SAlex Sierra 			goto abort;
1569cea86fe2SHugh Dickins 		page_remove_rmap(page, vma, false);
157027e1f827SSong Liu 	}
157127e1f827SSong Liu 
157227e1f827SSong Liu 	pte_unmap_unlock(start_pte, ptl);
157327e1f827SSong Liu 
157427e1f827SSong Liu 	/* step 3: set proper refcount and mm_counters. */
1575119a5fc1SHugh Dickins 	if (count) {
157627e1f827SSong Liu 		page_ref_sub(hpage, count);
157727e1f827SSong Liu 		add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
157827e1f827SSong Liu 	}
157927e1f827SSong Liu 
158034488399SZach O'Keefe 	/* step 4: remove pte entries */
1581ab0c3f12SHugh Dickins 	/* we make no change to anon, but protect concurrent anon page lookup */
1582ab0c3f12SHugh Dickins 	if (vma->anon_vma)
1583ab0c3f12SHugh Dickins 		anon_vma_lock_write(vma->anon_vma);
1584ab0c3f12SHugh Dickins 
1585e59a47b8SPasha Tatashin 	collapse_and_free_pmd(mm, vma, haddr, pmd);
158634488399SZach O'Keefe 
1587ab0c3f12SHugh Dickins 	if (vma->anon_vma)
1588ab0c3f12SHugh Dickins 		anon_vma_unlock_write(vma->anon_vma);
15898d3c106eSJann Horn 	i_mmap_unlock_write(vma->vm_file->f_mapping);
15908d3c106eSJann Horn 
159134488399SZach O'Keefe maybe_install_pmd:
159234488399SZach O'Keefe 	/* step 5: install pmd entry */
159334488399SZach O'Keefe 	result = install_pmd
159434488399SZach O'Keefe 			? set_huge_pmd(vma, haddr, pmd, hpage)
159534488399SZach O'Keefe 			: SCAN_SUCCEED;
159634488399SZach O'Keefe 
1597119a5fc1SHugh Dickins drop_hpage:
1598119a5fc1SHugh Dickins 	unlock_page(hpage);
1599119a5fc1SHugh Dickins 	put_page(hpage);
160034488399SZach O'Keefe 	return result;
160127e1f827SSong Liu 
160227e1f827SSong Liu abort:
160327e1f827SSong Liu 	pte_unmap_unlock(start_pte, ptl);
16048d3c106eSJann Horn 	i_mmap_unlock_write(vma->vm_file->f_mapping);
1605119a5fc1SHugh Dickins 	goto drop_hpage;
160627e1f827SSong Liu }
160727e1f827SSong Liu 
1608b26e2701SQi Zheng static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot)
160927e1f827SSong Liu {
1610b26e2701SQi Zheng 	struct mm_slot *slot = &mm_slot->slot;
1611b26e2701SQi Zheng 	struct mm_struct *mm = slot->mm;
161227e1f827SSong Liu 	int i;
161327e1f827SSong Liu 
161427e1f827SSong Liu 	if (likely(mm_slot->nr_pte_mapped_thp == 0))
16150edf61e5SMiaohe Lin 		return;
161627e1f827SSong Liu 
1617d8ed45c5SMichel Lespinasse 	if (!mmap_write_trylock(mm))
16180edf61e5SMiaohe Lin 		return;
161927e1f827SSong Liu 
16207d2c4385SZach O'Keefe 	if (unlikely(hpage_collapse_test_exit(mm)))
162127e1f827SSong Liu 		goto out;
162227e1f827SSong Liu 
162327e1f827SSong Liu 	for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
162434488399SZach O'Keefe 		collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i], false);
162527e1f827SSong Liu 
162627e1f827SSong Liu out:
162727e1f827SSong Liu 	mm_slot->nr_pte_mapped_thp = 0;
1628d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
162927e1f827SSong Liu }
163027e1f827SSong Liu 
163134488399SZach O'Keefe static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
163234488399SZach O'Keefe 			       struct mm_struct *target_mm,
163334488399SZach O'Keefe 			       unsigned long target_addr, struct page *hpage,
163434488399SZach O'Keefe 			       struct collapse_control *cc)
1635f3f0e1d2SKirill A. Shutemov {
1636f3f0e1d2SKirill A. Shutemov 	struct vm_area_struct *vma;
163734488399SZach O'Keefe 	int target_result = SCAN_FAIL;
1638f3f0e1d2SKirill A. Shutemov 
1639f3f0e1d2SKirill A. Shutemov 	i_mmap_lock_write(mapping);
1640f3f0e1d2SKirill A. Shutemov 	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
164134488399SZach O'Keefe 		int result = SCAN_FAIL;
164234488399SZach O'Keefe 		struct mm_struct *mm = NULL;
164334488399SZach O'Keefe 		unsigned long addr = 0;
164434488399SZach O'Keefe 		pmd_t *pmd;
164534488399SZach O'Keefe 		bool is_target = false;
164634488399SZach O'Keefe 
164727e1f827SSong Liu 		/*
164827e1f827SSong Liu 		 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
164927e1f827SSong Liu 		 * got written to. These VMAs are likely not worth investing
16503e4e28c5SMichel Lespinasse 		 * mmap_write_lock(mm) as PMD-mapping is likely to be split
165127e1f827SSong Liu 		 * later.
165227e1f827SSong Liu 		 *
165336ee2c78SMiaohe Lin 		 * Note that vma->anon_vma check is racy: it can be set up after
1654c1e8d7c6SMichel Lespinasse 		 * the check but before we took mmap_lock by the fault path.
165527e1f827SSong Liu 		 * But page lock would prevent establishing any new ptes of the
165627e1f827SSong Liu 		 * page, so we are safe.
165727e1f827SSong Liu 		 *
165827e1f827SSong Liu 		 * An alternative would be drop the check, but check that page
165927e1f827SSong Liu 		 * table is clear before calling pmdp_collapse_flush() under
166027e1f827SSong Liu 		 * ptl. It has higher chance to recover THP for the VMA, but
16618d3c106eSJann Horn 		 * has higher cost too. It would also probably require locking
16628d3c106eSJann Horn 		 * the anon_vma.
166327e1f827SSong Liu 		 */
1664023f47a8SJann Horn 		if (READ_ONCE(vma->anon_vma)) {
166534488399SZach O'Keefe 			result = SCAN_PAGE_ANON;
166634488399SZach O'Keefe 			goto next;
166734488399SZach O'Keefe 		}
1668f3f0e1d2SKirill A. Shutemov 		addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
166934488399SZach O'Keefe 		if (addr & ~HPAGE_PMD_MASK ||
167034488399SZach O'Keefe 		    vma->vm_end < addr + HPAGE_PMD_SIZE) {
167134488399SZach O'Keefe 			result = SCAN_VMA_CHECK;
167234488399SZach O'Keefe 			goto next;
167334488399SZach O'Keefe 		}
167418e77600SHugh Dickins 		mm = vma->vm_mm;
167534488399SZach O'Keefe 		is_target = mm == target_mm && addr == target_addr;
167634488399SZach O'Keefe 		result = find_pmd_or_thp_or_none(mm, addr, &pmd);
167734488399SZach O'Keefe 		if (result != SCAN_SUCCEED)
167834488399SZach O'Keefe 			goto next;
1679f3f0e1d2SKirill A. Shutemov 		/*
1680c1e8d7c6SMichel Lespinasse 		 * We need exclusive mmap_lock to retract page table.
168127e1f827SSong Liu 		 *
168227e1f827SSong Liu 		 * We use trylock due to lock inversion: we need to acquire
1683c1e8d7c6SMichel Lespinasse 		 * mmap_lock while holding page lock. Fault path does it in
168427e1f827SSong Liu 		 * reverse order. Trylock is a way to avoid deadlock.
168534488399SZach O'Keefe 		 *
168634488399SZach O'Keefe 		 * Also, it's not MADV_COLLAPSE's job to collapse other
168734488399SZach O'Keefe 		 * mappings - let khugepaged take care of them later.
1688f3f0e1d2SKirill A. Shutemov 		 */
168934488399SZach O'Keefe 		result = SCAN_PTE_MAPPED_HUGEPAGE;
169034488399SZach O'Keefe 		if ((cc->is_khugepaged || is_target) &&
169134488399SZach O'Keefe 		    mmap_write_trylock(mm)) {
1692deb4c93aSPeter Xu 			/*
1693023f47a8SJann Horn 			 * Re-check whether we have an ->anon_vma, because
1694023f47a8SJann Horn 			 * collapse_and_free_pmd() requires that either no
1695023f47a8SJann Horn 			 * ->anon_vma exists or the anon_vma is locked.
1696023f47a8SJann Horn 			 * We already checked ->anon_vma above, but that check
1697023f47a8SJann Horn 			 * is racy because ->anon_vma can be populated under the
1698023f47a8SJann Horn 			 * mmap lock in read mode.
1699023f47a8SJann Horn 			 */
1700023f47a8SJann Horn 			if (vma->anon_vma) {
1701023f47a8SJann Horn 				result = SCAN_PAGE_ANON;
1702023f47a8SJann Horn 				goto unlock_next;
1703023f47a8SJann Horn 			}
1704023f47a8SJann Horn 			/*
1705deb4c93aSPeter Xu 			 * When a vma is registered with uffd-wp, we can't
1706deb4c93aSPeter Xu 			 * recycle the pmd pgtable because there can be pte
1707deb4c93aSPeter Xu 			 * markers installed.  Skip it only, so the rest mm/vma
1708deb4c93aSPeter Xu 			 * can still have the same file mapped hugely, however
1709deb4c93aSPeter Xu 			 * it'll always mapped in small page size for uffd-wp
1710deb4c93aSPeter Xu 			 * registered ranges.
1711deb4c93aSPeter Xu 			 */
171234488399SZach O'Keefe 			if (hpage_collapse_test_exit(mm)) {
171334488399SZach O'Keefe 				result = SCAN_ANY_PROCESS;
171434488399SZach O'Keefe 				goto unlock_next;
1715f3f0e1d2SKirill A. Shutemov 			}
171634488399SZach O'Keefe 			if (userfaultfd_wp(vma)) {
171734488399SZach O'Keefe 				result = SCAN_PTE_UFFD_WP;
171834488399SZach O'Keefe 				goto unlock_next;
171934488399SZach O'Keefe 			}
172034488399SZach O'Keefe 			collapse_and_free_pmd(mm, vma, addr, pmd);
172134488399SZach O'Keefe 			if (!cc->is_khugepaged && is_target)
172234488399SZach O'Keefe 				result = set_huge_pmd(vma, addr, pmd, hpage);
172334488399SZach O'Keefe 			else
172434488399SZach O'Keefe 				result = SCAN_SUCCEED;
172534488399SZach O'Keefe 
172634488399SZach O'Keefe unlock_next:
172734488399SZach O'Keefe 			mmap_write_unlock(mm);
172834488399SZach O'Keefe 			goto next;
172934488399SZach O'Keefe 		}
173034488399SZach O'Keefe 		/*
173134488399SZach O'Keefe 		 * Calling context will handle target mm/addr. Otherwise, let
173234488399SZach O'Keefe 		 * khugepaged try again later.
173334488399SZach O'Keefe 		 */
173434488399SZach O'Keefe 		if (!is_target) {
173534488399SZach O'Keefe 			khugepaged_add_pte_mapped_thp(mm, addr);
173634488399SZach O'Keefe 			continue;
173734488399SZach O'Keefe 		}
173834488399SZach O'Keefe next:
173934488399SZach O'Keefe 		if (is_target)
174034488399SZach O'Keefe 			target_result = result;
1741f3f0e1d2SKirill A. Shutemov 	}
1742f3f0e1d2SKirill A. Shutemov 	i_mmap_unlock_write(mapping);
174334488399SZach O'Keefe 	return target_result;
1744f3f0e1d2SKirill A. Shutemov }
1745f3f0e1d2SKirill A. Shutemov 
1746f3f0e1d2SKirill A. Shutemov /**
174799cb0dbdSSong Liu  * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1748f3f0e1d2SKirill A. Shutemov  *
1749336e6b53SAlex Shi  * @mm: process address space where collapse happens
175034488399SZach O'Keefe  * @addr: virtual collapse start address
1751336e6b53SAlex Shi  * @file: file that collapse on
1752336e6b53SAlex Shi  * @start: collapse start address
17539710a78aSZach O'Keefe  * @cc: collapse context and scratchpad
1754336e6b53SAlex Shi  *
1755f3f0e1d2SKirill A. Shutemov  * Basic scheme is simple, details are more complex:
175687c460a0SHugh Dickins  *  - allocate and lock a new huge page;
175777da9389SMatthew Wilcox  *  - scan page cache replacing old pages with the new one
175899cb0dbdSSong Liu  *    + swap/gup in pages if necessary;
1759f3f0e1d2SKirill A. Shutemov  *    + fill in gaps;
176077da9389SMatthew Wilcox  *    + keep old pages around in case rollback is required;
176177da9389SMatthew Wilcox  *  - if replacing succeeds:
1762f3f0e1d2SKirill A. Shutemov  *    + copy data over;
1763f3f0e1d2SKirill A. Shutemov  *    + free old pages;
176487c460a0SHugh Dickins  *    + unlock huge page;
1765f3f0e1d2SKirill A. Shutemov  *  - if replacing failed;
1766f3f0e1d2SKirill A. Shutemov  *    + put all pages back and unfreeze them;
176777da9389SMatthew Wilcox  *    + restore gaps in the page cache;
176887c460a0SHugh Dickins  *    + unlock and free huge page;
1769f3f0e1d2SKirill A. Shutemov  */
177034488399SZach O'Keefe static int collapse_file(struct mm_struct *mm, unsigned long addr,
1771579c571eSSong Liu 			 struct file *file, pgoff_t start,
177234488399SZach O'Keefe 			 struct collapse_control *cc)
1773f3f0e1d2SKirill A. Shutemov {
1774579c571eSSong Liu 	struct address_space *mapping = file->f_mapping;
177550ad2f24SZach O'Keefe 	struct page *hpage;
17764c9473e8SGautam Menghani 	pgoff_t index = 0, end = start + HPAGE_PMD_NR;
1777f3f0e1d2SKirill A. Shutemov 	LIST_HEAD(pagelist);
177877da9389SMatthew Wilcox 	XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
1779f3f0e1d2SKirill A. Shutemov 	int nr_none = 0, result = SCAN_SUCCEED;
178099cb0dbdSSong Liu 	bool is_shmem = shmem_file(file);
17814c9473e8SGautam Menghani 	int nr = 0;
1782f3f0e1d2SKirill A. Shutemov 
178399cb0dbdSSong Liu 	VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1784f3f0e1d2SKirill A. Shutemov 	VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1785f3f0e1d2SKirill A. Shutemov 
178650ad2f24SZach O'Keefe 	result = alloc_charge_hpage(&hpage, mm, cc);
17879710a78aSZach O'Keefe 	if (result != SCAN_SUCCEED)
1788f3f0e1d2SKirill A. Shutemov 		goto out;
1789f3f0e1d2SKirill A. Shutemov 
17906b24ca4aSMatthew Wilcox (Oracle) 	/*
17916b24ca4aSMatthew Wilcox (Oracle) 	 * Ensure we have slots for all the pages in the range.  This is
17926b24ca4aSMatthew Wilcox (Oracle) 	 * almost certainly a no-op because most of the pages must be present
17936b24ca4aSMatthew Wilcox (Oracle) 	 */
179495feeabbSHugh Dickins 	do {
179595feeabbSHugh Dickins 		xas_lock_irq(&xas);
179695feeabbSHugh Dickins 		xas_create_range(&xas);
179795feeabbSHugh Dickins 		if (!xas_error(&xas))
179895feeabbSHugh Dickins 			break;
179995feeabbSHugh Dickins 		xas_unlock_irq(&xas);
180095feeabbSHugh Dickins 		if (!xas_nomem(&xas, GFP_KERNEL)) {
180195feeabbSHugh Dickins 			result = SCAN_FAIL;
180295feeabbSHugh Dickins 			goto out;
180395feeabbSHugh Dickins 		}
180495feeabbSHugh Dickins 	} while (1);
180595feeabbSHugh Dickins 
180650ad2f24SZach O'Keefe 	__SetPageLocked(hpage);
180799cb0dbdSSong Liu 	if (is_shmem)
180850ad2f24SZach O'Keefe 		__SetPageSwapBacked(hpage);
180950ad2f24SZach O'Keefe 	hpage->index = start;
181050ad2f24SZach O'Keefe 	hpage->mapping = mapping;
1811f3f0e1d2SKirill A. Shutemov 
1812f3f0e1d2SKirill A. Shutemov 	/*
181350ad2f24SZach O'Keefe 	 * At this point the hpage is locked and not up-to-date.
181487c460a0SHugh Dickins 	 * It's safe to insert it into the page cache, because nobody would
181587c460a0SHugh Dickins 	 * be able to map it or use it in another way until we unlock it.
1816f3f0e1d2SKirill A. Shutemov 	 */
1817f3f0e1d2SKirill A. Shutemov 
181877da9389SMatthew Wilcox 	xas_set(&xas, start);
181977da9389SMatthew Wilcox 	for (index = start; index < end; index++) {
182077da9389SMatthew Wilcox 		struct page *page = xas_next(&xas);
182164ab3195SVishal Moola (Oracle) 		struct folio *folio;
182277da9389SMatthew Wilcox 
182377da9389SMatthew Wilcox 		VM_BUG_ON(index != xas.xa_index);
182499cb0dbdSSong Liu 		if (is_shmem) {
182577da9389SMatthew Wilcox 			if (!page) {
1826701270faSHugh Dickins 				/*
182799cb0dbdSSong Liu 				 * Stop if extent has been truncated or
182899cb0dbdSSong Liu 				 * hole-punched, and is now completely
182999cb0dbdSSong Liu 				 * empty.
1830701270faSHugh Dickins 				 */
1831701270faSHugh Dickins 				if (index == start) {
1832701270faSHugh Dickins 					if (!xas_next_entry(&xas, end - 1)) {
1833701270faSHugh Dickins 						result = SCAN_TRUNCATED;
1834042a3082SHugh Dickins 						goto xa_locked;
1835701270faSHugh Dickins 					}
1836701270faSHugh Dickins 					xas_set(&xas, index);
1837701270faSHugh Dickins 				}
183877da9389SMatthew Wilcox 				if (!shmem_charge(mapping->host, 1)) {
1839f3f0e1d2SKirill A. Shutemov 					result = SCAN_FAIL;
1840042a3082SHugh Dickins 					goto xa_locked;
1841f3f0e1d2SKirill A. Shutemov 				}
184250ad2f24SZach O'Keefe 				xas_store(&xas, hpage);
184377da9389SMatthew Wilcox 				nr_none++;
184477da9389SMatthew Wilcox 				continue;
1845f3f0e1d2SKirill A. Shutemov 			}
1846f3f0e1d2SKirill A. Shutemov 
18473159f943SMatthew Wilcox 			if (xa_is_value(page) || !PageUptodate(page)) {
184877da9389SMatthew Wilcox 				xas_unlock_irq(&xas);
1849f3f0e1d2SKirill A. Shutemov 				/* swap in or instantiate fallocated page */
18507459c149SMatthew Wilcox (Oracle) 				if (shmem_get_folio(mapping->host, index,
18517459c149SMatthew Wilcox (Oracle) 						&folio, SGP_NOALLOC)) {
1852f3f0e1d2SKirill A. Shutemov 					result = SCAN_FAIL;
185377da9389SMatthew Wilcox 					goto xa_unlocked;
1854f3f0e1d2SKirill A. Shutemov 				}
18557459c149SMatthew Wilcox (Oracle) 				page = folio_file_page(folio, index);
1856f3f0e1d2SKirill A. Shutemov 			} else if (trylock_page(page)) {
1857f3f0e1d2SKirill A. Shutemov 				get_page(page);
1858042a3082SHugh Dickins 				xas_unlock_irq(&xas);
1859f3f0e1d2SKirill A. Shutemov 			} else {
1860f3f0e1d2SKirill A. Shutemov 				result = SCAN_PAGE_LOCK;
1861042a3082SHugh Dickins 				goto xa_locked;
1862f3f0e1d2SKirill A. Shutemov 			}
186399cb0dbdSSong Liu 		} else {	/* !is_shmem */
186499cb0dbdSSong Liu 			if (!page || xa_is_value(page)) {
186599cb0dbdSSong Liu 				xas_unlock_irq(&xas);
186699cb0dbdSSong Liu 				page_cache_sync_readahead(mapping, &file->f_ra,
186799cb0dbdSSong Liu 							  file, index,
1868e5a59d30SDavid Howells 							  end - index);
186999cb0dbdSSong Liu 				/* drain pagevecs to help isolate_lru_page() */
187099cb0dbdSSong Liu 				lru_add_drain();
187199cb0dbdSSong Liu 				page = find_lock_page(mapping, index);
187299cb0dbdSSong Liu 				if (unlikely(page == NULL)) {
187399cb0dbdSSong Liu 					result = SCAN_FAIL;
187499cb0dbdSSong Liu 					goto xa_unlocked;
187599cb0dbdSSong Liu 				}
187675f36069SSong Liu 			} else if (PageDirty(page)) {
187775f36069SSong Liu 				/*
187875f36069SSong Liu 				 * khugepaged only works on read-only fd,
187975f36069SSong Liu 				 * so this page is dirty because it hasn't
188075f36069SSong Liu 				 * been flushed since first write. There
188175f36069SSong Liu 				 * won't be new dirty pages.
188275f36069SSong Liu 				 *
188375f36069SSong Liu 				 * Trigger async flush here and hope the
188475f36069SSong Liu 				 * writeback is done when khugepaged
188575f36069SSong Liu 				 * revisits this page.
188675f36069SSong Liu 				 *
188775f36069SSong Liu 				 * This is a one-off situation. We are not
188875f36069SSong Liu 				 * forcing writeback in loop.
188975f36069SSong Liu 				 */
189075f36069SSong Liu 				xas_unlock_irq(&xas);
189175f36069SSong Liu 				filemap_flush(mapping);
189275f36069SSong Liu 				result = SCAN_FAIL;
189375f36069SSong Liu 				goto xa_unlocked;
189474c42e1bSRongwei Wang 			} else if (PageWriteback(page)) {
189574c42e1bSRongwei Wang 				xas_unlock_irq(&xas);
189674c42e1bSRongwei Wang 				result = SCAN_FAIL;
189774c42e1bSRongwei Wang 				goto xa_unlocked;
189899cb0dbdSSong Liu 			} else if (trylock_page(page)) {
189999cb0dbdSSong Liu 				get_page(page);
190099cb0dbdSSong Liu 				xas_unlock_irq(&xas);
190199cb0dbdSSong Liu 			} else {
190299cb0dbdSSong Liu 				result = SCAN_PAGE_LOCK;
190399cb0dbdSSong Liu 				goto xa_locked;
190499cb0dbdSSong Liu 			}
190599cb0dbdSSong Liu 		}
1906f3f0e1d2SKirill A. Shutemov 
1907f3f0e1d2SKirill A. Shutemov 		/*
1908b93b0163SMatthew Wilcox 		 * The page must be locked, so we can drop the i_pages lock
1909f3f0e1d2SKirill A. Shutemov 		 * without racing with truncate.
1910f3f0e1d2SKirill A. Shutemov 		 */
1911f3f0e1d2SKirill A. Shutemov 		VM_BUG_ON_PAGE(!PageLocked(page), page);
19124655e5e5SSong Liu 
19134655e5e5SSong Liu 		/* make sure the page is up to date */
19144655e5e5SSong Liu 		if (unlikely(!PageUptodate(page))) {
19154655e5e5SSong Liu 			result = SCAN_FAIL;
19164655e5e5SSong Liu 			goto out_unlock;
19174655e5e5SSong Liu 		}
191806a5e126SHugh Dickins 
191906a5e126SHugh Dickins 		/*
192006a5e126SHugh Dickins 		 * If file was truncated then extended, or hole-punched, before
192106a5e126SHugh Dickins 		 * we locked the first page, then a THP might be there already.
192258ac9a89SZach O'Keefe 		 * This will be discovered on the first iteration.
192306a5e126SHugh Dickins 		 */
192406a5e126SHugh Dickins 		if (PageTransCompound(page)) {
192558ac9a89SZach O'Keefe 			struct page *head = compound_head(page);
192658ac9a89SZach O'Keefe 
192758ac9a89SZach O'Keefe 			result = compound_order(head) == HPAGE_PMD_ORDER &&
192858ac9a89SZach O'Keefe 					head->index == start
192958ac9a89SZach O'Keefe 					/* Maybe PMD-mapped */
193058ac9a89SZach O'Keefe 					? SCAN_PTE_MAPPED_HUGEPAGE
193158ac9a89SZach O'Keefe 					: SCAN_PAGE_COMPOUND;
193206a5e126SHugh Dickins 			goto out_unlock;
193306a5e126SHugh Dickins 		}
1934f3f0e1d2SKirill A. Shutemov 
193564ab3195SVishal Moola (Oracle) 		folio = page_folio(page);
193664ab3195SVishal Moola (Oracle) 
193764ab3195SVishal Moola (Oracle) 		if (folio_mapping(folio) != mapping) {
1938f3f0e1d2SKirill A. Shutemov 			result = SCAN_TRUNCATED;
1939f3f0e1d2SKirill A. Shutemov 			goto out_unlock;
1940f3f0e1d2SKirill A. Shutemov 		}
1941f3f0e1d2SKirill A. Shutemov 
194264ab3195SVishal Moola (Oracle) 		if (!is_shmem && (folio_test_dirty(folio) ||
194364ab3195SVishal Moola (Oracle) 				  folio_test_writeback(folio))) {
19444655e5e5SSong Liu 			/*
19454655e5e5SSong Liu 			 * khugepaged only works on read-only fd, so this
19464655e5e5SSong Liu 			 * page is dirty because it hasn't been flushed
19474655e5e5SSong Liu 			 * since first write.
19484655e5e5SSong Liu 			 */
19494655e5e5SSong Liu 			result = SCAN_FAIL;
19504655e5e5SSong Liu 			goto out_unlock;
19514655e5e5SSong Liu 		}
19524655e5e5SSong Liu 
195364ab3195SVishal Moola (Oracle) 		if (folio_isolate_lru(folio)) {
1954f3f0e1d2SKirill A. Shutemov 			result = SCAN_DEL_PAGE_LRU;
1955042a3082SHugh Dickins 			goto out_unlock;
1956f3f0e1d2SKirill A. Shutemov 		}
1957f3f0e1d2SKirill A. Shutemov 
195864ab3195SVishal Moola (Oracle) 		if (folio_has_private(folio) &&
195964ab3195SVishal Moola (Oracle) 		    !filemap_release_folio(folio, GFP_KERNEL)) {
196099cb0dbdSSong Liu 			result = SCAN_PAGE_HAS_PRIVATE;
196164ab3195SVishal Moola (Oracle) 			folio_putback_lru(folio);
196299cb0dbdSSong Liu 			goto out_unlock;
196399cb0dbdSSong Liu 		}
196499cb0dbdSSong Liu 
196564ab3195SVishal Moola (Oracle) 		if (folio_mapped(folio))
196664ab3195SVishal Moola (Oracle) 			try_to_unmap(folio,
1967869f7ee6SMatthew Wilcox (Oracle) 					TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
1968f3f0e1d2SKirill A. Shutemov 
196977da9389SMatthew Wilcox 		xas_lock_irq(&xas);
197077da9389SMatthew Wilcox 		xas_set(&xas, index);
1971f3f0e1d2SKirill A. Shutemov 
197277da9389SMatthew Wilcox 		VM_BUG_ON_PAGE(page != xas_load(&xas), page);
1973f3f0e1d2SKirill A. Shutemov 
1974f3f0e1d2SKirill A. Shutemov 		/*
1975f3f0e1d2SKirill A. Shutemov 		 * The page is expected to have page_count() == 3:
1976f3f0e1d2SKirill A. Shutemov 		 *  - we hold a pin on it;
197777da9389SMatthew Wilcox 		 *  - one reference from page cache;
1978f3f0e1d2SKirill A. Shutemov 		 *  - one from isolate_lru_page;
1979f3f0e1d2SKirill A. Shutemov 		 */
1980f3f0e1d2SKirill A. Shutemov 		if (!page_ref_freeze(page, 3)) {
1981f3f0e1d2SKirill A. Shutemov 			result = SCAN_PAGE_COUNT;
1982042a3082SHugh Dickins 			xas_unlock_irq(&xas);
1983042a3082SHugh Dickins 			putback_lru_page(page);
1984042a3082SHugh Dickins 			goto out_unlock;
1985f3f0e1d2SKirill A. Shutemov 		}
1986f3f0e1d2SKirill A. Shutemov 
1987f3f0e1d2SKirill A. Shutemov 		/*
1988f3f0e1d2SKirill A. Shutemov 		 * Add the page to the list to be able to undo the collapse if
1989f3f0e1d2SKirill A. Shutemov 		 * something go wrong.
1990f3f0e1d2SKirill A. Shutemov 		 */
1991f3f0e1d2SKirill A. Shutemov 		list_add_tail(&page->lru, &pagelist);
1992f3f0e1d2SKirill A. Shutemov 
1993f3f0e1d2SKirill A. Shutemov 		/* Finally, replace with the new page. */
199450ad2f24SZach O'Keefe 		xas_store(&xas, hpage);
1995f3f0e1d2SKirill A. Shutemov 		continue;
1996f3f0e1d2SKirill A. Shutemov out_unlock:
1997f3f0e1d2SKirill A. Shutemov 		unlock_page(page);
1998f3f0e1d2SKirill A. Shutemov 		put_page(page);
1999042a3082SHugh Dickins 		goto xa_unlocked;
2000f3f0e1d2SKirill A. Shutemov 	}
200150ad2f24SZach O'Keefe 	nr = thp_nr_pages(hpage);
2002f3f0e1d2SKirill A. Shutemov 
200399cb0dbdSSong Liu 	if (is_shmem)
200450ad2f24SZach O'Keefe 		__mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
200509d91cdaSSong Liu 	else {
200650ad2f24SZach O'Keefe 		__mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
200709d91cdaSSong Liu 		filemap_nr_thps_inc(mapping);
2008eb6ecbedSCollin Fijalkovich 		/*
2009eb6ecbedSCollin Fijalkovich 		 * Paired with smp_mb() in do_dentry_open() to ensure
2010eb6ecbedSCollin Fijalkovich 		 * i_writecount is up to date and the update to nr_thps is
2011eb6ecbedSCollin Fijalkovich 		 * visible. Ensures the page cache will be truncated if the
2012eb6ecbedSCollin Fijalkovich 		 * file is opened writable.
2013eb6ecbedSCollin Fijalkovich 		 */
2014eb6ecbedSCollin Fijalkovich 		smp_mb();
2015eb6ecbedSCollin Fijalkovich 		if (inode_is_open_for_write(mapping->host)) {
2016eb6ecbedSCollin Fijalkovich 			result = SCAN_FAIL;
201750ad2f24SZach O'Keefe 			__mod_lruvec_page_state(hpage, NR_FILE_THPS, -nr);
2018eb6ecbedSCollin Fijalkovich 			filemap_nr_thps_dec(mapping);
2019eb6ecbedSCollin Fijalkovich 			goto xa_locked;
2020eb6ecbedSCollin Fijalkovich 		}
202109d91cdaSSong Liu 	}
202299cb0dbdSSong Liu 
2023042a3082SHugh Dickins 	if (nr_none) {
202450ad2f24SZach O'Keefe 		__mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
20252f55f070SMiaohe Lin 		/* nr_none is always 0 for non-shmem. */
202650ad2f24SZach O'Keefe 		__mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
2027042a3082SHugh Dickins 	}
2028042a3082SHugh Dickins 
20296b24ca4aSMatthew Wilcox (Oracle) 	/* Join all the small entries into a single multi-index entry */
20306b24ca4aSMatthew Wilcox (Oracle) 	xas_set_order(&xas, start, HPAGE_PMD_ORDER);
203150ad2f24SZach O'Keefe 	xas_store(&xas, hpage);
2032042a3082SHugh Dickins xa_locked:
2033042a3082SHugh Dickins 	xas_unlock_irq(&xas);
203477da9389SMatthew Wilcox xa_unlocked:
2035042a3082SHugh Dickins 
20366d9df8a5SHugh Dickins 	/*
20376d9df8a5SHugh Dickins 	 * If collapse is successful, flush must be done now before copying.
20386d9df8a5SHugh Dickins 	 * If collapse is unsuccessful, does flush actually need to be done?
20396d9df8a5SHugh Dickins 	 * Do it anyway, to clear the state.
20406d9df8a5SHugh Dickins 	 */
20416d9df8a5SHugh Dickins 	try_to_unmap_flush();
20426d9df8a5SHugh Dickins 
2043f3f0e1d2SKirill A. Shutemov 	if (result == SCAN_SUCCEED) {
204477da9389SMatthew Wilcox 		struct page *page, *tmp;
2045284a344eSVishal Moola (Oracle) 		struct folio *folio;
2046f3f0e1d2SKirill A. Shutemov 
2047f3f0e1d2SKirill A. Shutemov 		/*
204877da9389SMatthew Wilcox 		 * Replacing old pages with new one has succeeded, now we
204977da9389SMatthew Wilcox 		 * need to copy the content and free the old pages.
2050f3f0e1d2SKirill A. Shutemov 		 */
20512af8ff29SHugh Dickins 		index = start;
2052f3f0e1d2SKirill A. Shutemov 		list_for_each_entry_safe(page, tmp, &pagelist, lru) {
20532af8ff29SHugh Dickins 			while (index < page->index) {
205450ad2f24SZach O'Keefe 				clear_highpage(hpage + (index % HPAGE_PMD_NR));
20552af8ff29SHugh Dickins 				index++;
20562af8ff29SHugh Dickins 			}
205750ad2f24SZach O'Keefe 			copy_highpage(hpage + (page->index % HPAGE_PMD_NR),
2058f3f0e1d2SKirill A. Shutemov 				      page);
2059f3f0e1d2SKirill A. Shutemov 			list_del(&page->lru);
2060f3f0e1d2SKirill A. Shutemov 			page->mapping = NULL;
2061042a3082SHugh Dickins 			page_ref_unfreeze(page, 1);
2062f3f0e1d2SKirill A. Shutemov 			ClearPageActive(page);
2063f3f0e1d2SKirill A. Shutemov 			ClearPageUnevictable(page);
2064042a3082SHugh Dickins 			unlock_page(page);
2065f3f0e1d2SKirill A. Shutemov 			put_page(page);
20662af8ff29SHugh Dickins 			index++;
20672af8ff29SHugh Dickins 		}
20682af8ff29SHugh Dickins 		while (index < end) {
206950ad2f24SZach O'Keefe 			clear_highpage(hpage + (index % HPAGE_PMD_NR));
20702af8ff29SHugh Dickins 			index++;
2071f3f0e1d2SKirill A. Shutemov 		}
2072f3f0e1d2SKirill A. Shutemov 
2073284a344eSVishal Moola (Oracle) 		folio = page_folio(hpage);
2074284a344eSVishal Moola (Oracle) 		folio_mark_uptodate(folio);
2075284a344eSVishal Moola (Oracle) 		folio_ref_add(folio, HPAGE_PMD_NR - 1);
2076284a344eSVishal Moola (Oracle) 
20776058eaecSJohannes Weiner 		if (is_shmem)
2078284a344eSVishal Moola (Oracle) 			folio_mark_dirty(folio);
2079284a344eSVishal Moola (Oracle) 		folio_add_lru(folio);
2080f3f0e1d2SKirill A. Shutemov 
2081042a3082SHugh Dickins 		/*
2082042a3082SHugh Dickins 		 * Remove pte page tables, so we can re-fault the page as huge.
2083042a3082SHugh Dickins 		 */
208434488399SZach O'Keefe 		result = retract_page_tables(mapping, start, mm, addr, hpage,
208534488399SZach O'Keefe 					     cc);
208650ad2f24SZach O'Keefe 		unlock_page(hpage);
208750ad2f24SZach O'Keefe 		hpage = NULL;
2088f3f0e1d2SKirill A. Shutemov 	} else {
208977da9389SMatthew Wilcox 		struct page *page;
2090aaa52e34SHugh Dickins 
209177da9389SMatthew Wilcox 		/* Something went wrong: roll back page cache changes */
209277da9389SMatthew Wilcox 		xas_lock_irq(&xas);
20932f55f070SMiaohe Lin 		if (nr_none) {
2094aaa52e34SHugh Dickins 			mapping->nrpages -= nr_none;
2095aaa52e34SHugh Dickins 			shmem_uncharge(mapping->host, nr_none);
20962f55f070SMiaohe Lin 		}
2097aaa52e34SHugh Dickins 
209877da9389SMatthew Wilcox 		xas_set(&xas, start);
209977da9389SMatthew Wilcox 		xas_for_each(&xas, page, end - 1) {
2100f3f0e1d2SKirill A. Shutemov 			page = list_first_entry_or_null(&pagelist,
2101f3f0e1d2SKirill A. Shutemov 					struct page, lru);
210277da9389SMatthew Wilcox 			if (!page || xas.xa_index < page->index) {
2103f3f0e1d2SKirill A. Shutemov 				if (!nr_none)
2104f3f0e1d2SKirill A. Shutemov 					break;
2105f3f0e1d2SKirill A. Shutemov 				nr_none--;
210659749e6cSJohannes Weiner 				/* Put holes back where they were */
210777da9389SMatthew Wilcox 				xas_store(&xas, NULL);
2108f3f0e1d2SKirill A. Shutemov 				continue;
2109f3f0e1d2SKirill A. Shutemov 			}
2110f3f0e1d2SKirill A. Shutemov 
211177da9389SMatthew Wilcox 			VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
2112f3f0e1d2SKirill A. Shutemov 
2113f3f0e1d2SKirill A. Shutemov 			/* Unfreeze the page. */
2114f3f0e1d2SKirill A. Shutemov 			list_del(&page->lru);
2115f3f0e1d2SKirill A. Shutemov 			page_ref_unfreeze(page, 2);
211677da9389SMatthew Wilcox 			xas_store(&xas, page);
211777da9389SMatthew Wilcox 			xas_pause(&xas);
211877da9389SMatthew Wilcox 			xas_unlock_irq(&xas);
2119f3f0e1d2SKirill A. Shutemov 			unlock_page(page);
2120042a3082SHugh Dickins 			putback_lru_page(page);
212177da9389SMatthew Wilcox 			xas_lock_irq(&xas);
2122f3f0e1d2SKirill A. Shutemov 		}
2123f3f0e1d2SKirill A. Shutemov 		VM_BUG_ON(nr_none);
212477da9389SMatthew Wilcox 		xas_unlock_irq(&xas);
2125f3f0e1d2SKirill A. Shutemov 
212650ad2f24SZach O'Keefe 		hpage->mapping = NULL;
2127f3f0e1d2SKirill A. Shutemov 	}
2128042a3082SHugh Dickins 
212950ad2f24SZach O'Keefe 	if (hpage)
213050ad2f24SZach O'Keefe 		unlock_page(hpage);
2131f3f0e1d2SKirill A. Shutemov out:
2132f3f0e1d2SKirill A. Shutemov 	VM_BUG_ON(!list_empty(&pagelist));
213350ad2f24SZach O'Keefe 	if (hpage) {
213450ad2f24SZach O'Keefe 		mem_cgroup_uncharge(page_folio(hpage));
213550ad2f24SZach O'Keefe 		put_page(hpage);
2136c6a7f445SYang Shi 	}
21374c9473e8SGautam Menghani 
21384c9473e8SGautam Menghani 	trace_mm_khugepaged_collapse_file(mm, hpage, index, is_shmem, addr, file, nr, result);
213950ad2f24SZach O'Keefe 	return result;
2140f3f0e1d2SKirill A. Shutemov }
2141f3f0e1d2SKirill A. Shutemov 
214234488399SZach O'Keefe static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
214334488399SZach O'Keefe 				    struct file *file, pgoff_t start,
214434488399SZach O'Keefe 				    struct collapse_control *cc)
2145f3f0e1d2SKirill A. Shutemov {
2146f3f0e1d2SKirill A. Shutemov 	struct page *page = NULL;
2147579c571eSSong Liu 	struct address_space *mapping = file->f_mapping;
214885b392dbSMatthew Wilcox 	XA_STATE(xas, &mapping->i_pages, start);
2149f3f0e1d2SKirill A. Shutemov 	int present, swap;
2150f3f0e1d2SKirill A. Shutemov 	int node = NUMA_NO_NODE;
2151f3f0e1d2SKirill A. Shutemov 	int result = SCAN_SUCCEED;
2152f3f0e1d2SKirill A. Shutemov 
2153f3f0e1d2SKirill A. Shutemov 	present = 0;
2154f3f0e1d2SKirill A. Shutemov 	swap = 0;
215534d6b470SZach O'Keefe 	memset(cc->node_load, 0, sizeof(cc->node_load));
2156e031ff96SYang Shi 	nodes_clear(cc->alloc_nmask);
2157f3f0e1d2SKirill A. Shutemov 	rcu_read_lock();
215885b392dbSMatthew Wilcox 	xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
215985b392dbSMatthew Wilcox 		if (xas_retry(&xas, page))
2160f3f0e1d2SKirill A. Shutemov 			continue;
2161f3f0e1d2SKirill A. Shutemov 
216285b392dbSMatthew Wilcox 		if (xa_is_value(page)) {
2163d8ea7cc8SZach O'Keefe 			++swap;
2164d8ea7cc8SZach O'Keefe 			if (cc->is_khugepaged &&
2165d8ea7cc8SZach O'Keefe 			    swap > khugepaged_max_ptes_swap) {
2166f3f0e1d2SKirill A. Shutemov 				result = SCAN_EXCEED_SWAP_PTE;
2167e9ea874aSYang Yang 				count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
2168f3f0e1d2SKirill A. Shutemov 				break;
2169f3f0e1d2SKirill A. Shutemov 			}
2170f3f0e1d2SKirill A. Shutemov 			continue;
2171f3f0e1d2SKirill A. Shutemov 		}
2172f3f0e1d2SKirill A. Shutemov 
21736b24ca4aSMatthew Wilcox (Oracle) 		/*
217458ac9a89SZach O'Keefe 		 * TODO: khugepaged should compact smaller compound pages
21756b24ca4aSMatthew Wilcox (Oracle) 		 * into a PMD sized page
21766b24ca4aSMatthew Wilcox (Oracle) 		 */
2177f3f0e1d2SKirill A. Shutemov 		if (PageTransCompound(page)) {
217858ac9a89SZach O'Keefe 			struct page *head = compound_head(page);
217958ac9a89SZach O'Keefe 
218058ac9a89SZach O'Keefe 			result = compound_order(head) == HPAGE_PMD_ORDER &&
218158ac9a89SZach O'Keefe 					head->index == start
218258ac9a89SZach O'Keefe 					/* Maybe PMD-mapped */
218358ac9a89SZach O'Keefe 					? SCAN_PTE_MAPPED_HUGEPAGE
218458ac9a89SZach O'Keefe 					: SCAN_PAGE_COMPOUND;
218558ac9a89SZach O'Keefe 			/*
218658ac9a89SZach O'Keefe 			 * For SCAN_PTE_MAPPED_HUGEPAGE, further processing
218758ac9a89SZach O'Keefe 			 * by the caller won't touch the page cache, and so
218858ac9a89SZach O'Keefe 			 * it's safe to skip LRU and refcount checks before
218958ac9a89SZach O'Keefe 			 * returning.
219058ac9a89SZach O'Keefe 			 */
2191f3f0e1d2SKirill A. Shutemov 			break;
2192f3f0e1d2SKirill A. Shutemov 		}
2193f3f0e1d2SKirill A. Shutemov 
2194f3f0e1d2SKirill A. Shutemov 		node = page_to_nid(page);
21957d2c4385SZach O'Keefe 		if (hpage_collapse_scan_abort(node, cc)) {
2196f3f0e1d2SKirill A. Shutemov 			result = SCAN_SCAN_ABORT;
2197f3f0e1d2SKirill A. Shutemov 			break;
2198f3f0e1d2SKirill A. Shutemov 		}
219934d6b470SZach O'Keefe 		cc->node_load[node]++;
2200f3f0e1d2SKirill A. Shutemov 
2201f3f0e1d2SKirill A. Shutemov 		if (!PageLRU(page)) {
2202f3f0e1d2SKirill A. Shutemov 			result = SCAN_PAGE_LRU;
2203f3f0e1d2SKirill A. Shutemov 			break;
2204f3f0e1d2SKirill A. Shutemov 		}
2205f3f0e1d2SKirill A. Shutemov 
220699cb0dbdSSong Liu 		if (page_count(page) !=
220799cb0dbdSSong Liu 		    1 + page_mapcount(page) + page_has_private(page)) {
2208f3f0e1d2SKirill A. Shutemov 			result = SCAN_PAGE_COUNT;
2209f3f0e1d2SKirill A. Shutemov 			break;
2210f3f0e1d2SKirill A. Shutemov 		}
2211f3f0e1d2SKirill A. Shutemov 
2212f3f0e1d2SKirill A. Shutemov 		/*
2213f3f0e1d2SKirill A. Shutemov 		 * We probably should check if the page is referenced here, but
2214f3f0e1d2SKirill A. Shutemov 		 * nobody would transfer pte_young() to PageReferenced() for us.
2215f3f0e1d2SKirill A. Shutemov 		 * And rmap walk here is just too costly...
2216f3f0e1d2SKirill A. Shutemov 		 */
2217f3f0e1d2SKirill A. Shutemov 
2218f3f0e1d2SKirill A. Shutemov 		present++;
2219f3f0e1d2SKirill A. Shutemov 
2220f3f0e1d2SKirill A. Shutemov 		if (need_resched()) {
222185b392dbSMatthew Wilcox 			xas_pause(&xas);
2222f3f0e1d2SKirill A. Shutemov 			cond_resched_rcu();
2223f3f0e1d2SKirill A. Shutemov 		}
2224f3f0e1d2SKirill A. Shutemov 	}
2225f3f0e1d2SKirill A. Shutemov 	rcu_read_unlock();
2226f3f0e1d2SKirill A. Shutemov 
2227f3f0e1d2SKirill A. Shutemov 	if (result == SCAN_SUCCEED) {
2228d8ea7cc8SZach O'Keefe 		if (cc->is_khugepaged &&
2229d8ea7cc8SZach O'Keefe 		    present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2230f3f0e1d2SKirill A. Shutemov 			result = SCAN_EXCEED_NONE_PTE;
2231e9ea874aSYang Yang 			count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
2232f3f0e1d2SKirill A. Shutemov 		} else {
223334488399SZach O'Keefe 			result = collapse_file(mm, addr, file, start, cc);
2234f3f0e1d2SKirill A. Shutemov 		}
2235f3f0e1d2SKirill A. Shutemov 	}
2236f3f0e1d2SKirill A. Shutemov 
2237045634ffSGautam Menghani 	trace_mm_khugepaged_scan_file(mm, page, file, present, swap, result);
223850ad2f24SZach O'Keefe 	return result;
2239f3f0e1d2SKirill A. Shutemov }
2240f3f0e1d2SKirill A. Shutemov #else
224134488399SZach O'Keefe static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
224234488399SZach O'Keefe 				    struct file *file, pgoff_t start,
224334488399SZach O'Keefe 				    struct collapse_control *cc)
2244f3f0e1d2SKirill A. Shutemov {
2245f3f0e1d2SKirill A. Shutemov 	BUILD_BUG();
2246f3f0e1d2SKirill A. Shutemov }
224727e1f827SSong Liu 
2248b26e2701SQi Zheng static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot)
224927e1f827SSong Liu {
225027e1f827SSong Liu }
225158ac9a89SZach O'Keefe 
225258ac9a89SZach O'Keefe static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
225358ac9a89SZach O'Keefe 					  unsigned long addr)
225458ac9a89SZach O'Keefe {
225558ac9a89SZach O'Keefe 	return false;
225658ac9a89SZach O'Keefe }
2257f3f0e1d2SKirill A. Shutemov #endif
2258f3f0e1d2SKirill A. Shutemov 
225950ad2f24SZach O'Keefe static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
226034d6b470SZach O'Keefe 					    struct collapse_control *cc)
2261b46e756fSKirill A. Shutemov 	__releases(&khugepaged_mm_lock)
2262b46e756fSKirill A. Shutemov 	__acquires(&khugepaged_mm_lock)
2263b46e756fSKirill A. Shutemov {
226468540502SMatthew Wilcox (Oracle) 	struct vma_iterator vmi;
2265b26e2701SQi Zheng 	struct khugepaged_mm_slot *mm_slot;
2266b26e2701SQi Zheng 	struct mm_slot *slot;
2267b46e756fSKirill A. Shutemov 	struct mm_struct *mm;
2268b46e756fSKirill A. Shutemov 	struct vm_area_struct *vma;
2269b46e756fSKirill A. Shutemov 	int progress = 0;
2270b46e756fSKirill A. Shutemov 
2271b46e756fSKirill A. Shutemov 	VM_BUG_ON(!pages);
227235f3aa39SLance Roy 	lockdep_assert_held(&khugepaged_mm_lock);
227350ad2f24SZach O'Keefe 	*result = SCAN_FAIL;
2274b46e756fSKirill A. Shutemov 
2275b26e2701SQi Zheng 	if (khugepaged_scan.mm_slot) {
2276b46e756fSKirill A. Shutemov 		mm_slot = khugepaged_scan.mm_slot;
2277b26e2701SQi Zheng 		slot = &mm_slot->slot;
2278b26e2701SQi Zheng 	} else {
2279b26e2701SQi Zheng 		slot = list_entry(khugepaged_scan.mm_head.next,
2280b46e756fSKirill A. Shutemov 				     struct mm_slot, mm_node);
2281b26e2701SQi Zheng 		mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
2282b46e756fSKirill A. Shutemov 		khugepaged_scan.address = 0;
2283b46e756fSKirill A. Shutemov 		khugepaged_scan.mm_slot = mm_slot;
2284b46e756fSKirill A. Shutemov 	}
2285b46e756fSKirill A. Shutemov 	spin_unlock(&khugepaged_mm_lock);
228627e1f827SSong Liu 	khugepaged_collapse_pte_mapped_thps(mm_slot);
2287b46e756fSKirill A. Shutemov 
2288b26e2701SQi Zheng 	mm = slot->mm;
22893b454ad3SYang Shi 	/*
22903b454ad3SYang Shi 	 * Don't wait for semaphore (to avoid long wait times).  Just move to
22913b454ad3SYang Shi 	 * the next mm on the list.
22923b454ad3SYang Shi 	 */
2293b46e756fSKirill A. Shutemov 	vma = NULL;
2294d8ed45c5SMichel Lespinasse 	if (unlikely(!mmap_read_trylock(mm)))
2295c1e8d7c6SMichel Lespinasse 		goto breakouterloop_mmap_lock;
2296b46e756fSKirill A. Shutemov 
2297b46e756fSKirill A. Shutemov 	progress++;
229868540502SMatthew Wilcox (Oracle) 	if (unlikely(hpage_collapse_test_exit(mm)))
229968540502SMatthew Wilcox (Oracle) 		goto breakouterloop;
230068540502SMatthew Wilcox (Oracle) 
230168540502SMatthew Wilcox (Oracle) 	vma_iter_init(&vmi, mm, khugepaged_scan.address);
230268540502SMatthew Wilcox (Oracle) 	for_each_vma(vmi, vma) {
2303b46e756fSKirill A. Shutemov 		unsigned long hstart, hend;
2304b46e756fSKirill A. Shutemov 
2305b46e756fSKirill A. Shutemov 		cond_resched();
23067d2c4385SZach O'Keefe 		if (unlikely(hpage_collapse_test_exit(mm))) {
2307b46e756fSKirill A. Shutemov 			progress++;
2308b46e756fSKirill A. Shutemov 			break;
2309b46e756fSKirill A. Shutemov 		}
2310a7f4e6e4SZach O'Keefe 		if (!hugepage_vma_check(vma, vma->vm_flags, false, false, true)) {
2311b46e756fSKirill A. Shutemov skip:
2312b46e756fSKirill A. Shutemov 			progress++;
2313b46e756fSKirill A. Shutemov 			continue;
2314b46e756fSKirill A. Shutemov 		}
23154fa6893fSYang Shi 		hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE);
23164fa6893fSYang Shi 		hend = round_down(vma->vm_end, HPAGE_PMD_SIZE);
2317b46e756fSKirill A. Shutemov 		if (khugepaged_scan.address > hend)
2318b46e756fSKirill A. Shutemov 			goto skip;
2319b46e756fSKirill A. Shutemov 		if (khugepaged_scan.address < hstart)
2320b46e756fSKirill A. Shutemov 			khugepaged_scan.address = hstart;
2321b46e756fSKirill A. Shutemov 		VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2322b46e756fSKirill A. Shutemov 
2323b46e756fSKirill A. Shutemov 		while (khugepaged_scan.address < hend) {
232450ad2f24SZach O'Keefe 			bool mmap_locked = true;
232550ad2f24SZach O'Keefe 
2326b46e756fSKirill A. Shutemov 			cond_resched();
23277d2c4385SZach O'Keefe 			if (unlikely(hpage_collapse_test_exit(mm)))
2328b46e756fSKirill A. Shutemov 				goto breakouterloop;
2329b46e756fSKirill A. Shutemov 
2330b46e756fSKirill A. Shutemov 			VM_BUG_ON(khugepaged_scan.address < hstart ||
2331b46e756fSKirill A. Shutemov 				  khugepaged_scan.address + HPAGE_PMD_SIZE >
2332b46e756fSKirill A. Shutemov 				  hend);
233399cb0dbdSSong Liu 			if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2334396bcc52SMatthew Wilcox (Oracle) 				struct file *file = get_file(vma->vm_file);
2335f3f0e1d2SKirill A. Shutemov 				pgoff_t pgoff = linear_page_index(vma,
2336f3f0e1d2SKirill A. Shutemov 						khugepaged_scan.address);
233799cb0dbdSSong Liu 
2338d8ed45c5SMichel Lespinasse 				mmap_read_unlock(mm);
233934488399SZach O'Keefe 				*result = hpage_collapse_scan_file(mm,
234034488399SZach O'Keefe 								   khugepaged_scan.address,
234134488399SZach O'Keefe 								   file, pgoff, cc);
234250ad2f24SZach O'Keefe 				mmap_locked = false;
2343f3f0e1d2SKirill A. Shutemov 				fput(file);
2344f3f0e1d2SKirill A. Shutemov 			} else {
23457d2c4385SZach O'Keefe 				*result = hpage_collapse_scan_pmd(mm, vma,
2346b46e756fSKirill A. Shutemov 								  khugepaged_scan.address,
23477d2c4385SZach O'Keefe 								  &mmap_locked,
23487d2c4385SZach O'Keefe 								  cc);
2349f3f0e1d2SKirill A. Shutemov 			}
235058ac9a89SZach O'Keefe 			switch (*result) {
235158ac9a89SZach O'Keefe 			case SCAN_PTE_MAPPED_HUGEPAGE: {
235258ac9a89SZach O'Keefe 				pmd_t *pmd;
235358ac9a89SZach O'Keefe 
235458ac9a89SZach O'Keefe 				*result = find_pmd_or_thp_or_none(mm,
235558ac9a89SZach O'Keefe 								  khugepaged_scan.address,
235658ac9a89SZach O'Keefe 								  &pmd);
235758ac9a89SZach O'Keefe 				if (*result != SCAN_SUCCEED)
235858ac9a89SZach O'Keefe 					break;
235958ac9a89SZach O'Keefe 				if (!khugepaged_add_pte_mapped_thp(mm,
236058ac9a89SZach O'Keefe 								   khugepaged_scan.address))
236158ac9a89SZach O'Keefe 					break;
236258ac9a89SZach O'Keefe 			} fallthrough;
236358ac9a89SZach O'Keefe 			case SCAN_SUCCEED:
236450ad2f24SZach O'Keefe 				++khugepaged_pages_collapsed;
236558ac9a89SZach O'Keefe 				break;
236658ac9a89SZach O'Keefe 			default:
236758ac9a89SZach O'Keefe 				break;
236858ac9a89SZach O'Keefe 			}
236958ac9a89SZach O'Keefe 
2370b46e756fSKirill A. Shutemov 			/* move to next address */
2371b46e756fSKirill A. Shutemov 			khugepaged_scan.address += HPAGE_PMD_SIZE;
2372b46e756fSKirill A. Shutemov 			progress += HPAGE_PMD_NR;
237350ad2f24SZach O'Keefe 			if (!mmap_locked)
237450ad2f24SZach O'Keefe 				/*
237550ad2f24SZach O'Keefe 				 * We released mmap_lock so break loop.  Note
237650ad2f24SZach O'Keefe 				 * that we drop mmap_lock before all hugepage
237750ad2f24SZach O'Keefe 				 * allocations, so if allocation fails, we are
237850ad2f24SZach O'Keefe 				 * guaranteed to break here and report the
237950ad2f24SZach O'Keefe 				 * correct result back to caller.
238050ad2f24SZach O'Keefe 				 */
2381c1e8d7c6SMichel Lespinasse 				goto breakouterloop_mmap_lock;
2382b46e756fSKirill A. Shutemov 			if (progress >= pages)
2383b46e756fSKirill A. Shutemov 				goto breakouterloop;
2384b46e756fSKirill A. Shutemov 		}
2385b46e756fSKirill A. Shutemov 	}
2386b46e756fSKirill A. Shutemov breakouterloop:
2387d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
2388c1e8d7c6SMichel Lespinasse breakouterloop_mmap_lock:
2389b46e756fSKirill A. Shutemov 
2390b46e756fSKirill A. Shutemov 	spin_lock(&khugepaged_mm_lock);
2391b46e756fSKirill A. Shutemov 	VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2392b46e756fSKirill A. Shutemov 	/*
2393b46e756fSKirill A. Shutemov 	 * Release the current mm_slot if this mm is about to die, or
2394b46e756fSKirill A. Shutemov 	 * if we scanned all vmas of this mm.
2395b46e756fSKirill A. Shutemov 	 */
23967d2c4385SZach O'Keefe 	if (hpage_collapse_test_exit(mm) || !vma) {
2397b46e756fSKirill A. Shutemov 		/*
2398b46e756fSKirill A. Shutemov 		 * Make sure that if mm_users is reaching zero while
2399b46e756fSKirill A. Shutemov 		 * khugepaged runs here, khugepaged_exit will find
2400b46e756fSKirill A. Shutemov 		 * mm_slot not pointing to the exiting mm.
2401b46e756fSKirill A. Shutemov 		 */
2402b26e2701SQi Zheng 		if (slot->mm_node.next != &khugepaged_scan.mm_head) {
2403b26e2701SQi Zheng 			slot = list_entry(slot->mm_node.next,
2404b46e756fSKirill A. Shutemov 					  struct mm_slot, mm_node);
2405b26e2701SQi Zheng 			khugepaged_scan.mm_slot =
2406b26e2701SQi Zheng 				mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
2407b46e756fSKirill A. Shutemov 			khugepaged_scan.address = 0;
2408b46e756fSKirill A. Shutemov 		} else {
2409b46e756fSKirill A. Shutemov 			khugepaged_scan.mm_slot = NULL;
2410b46e756fSKirill A. Shutemov 			khugepaged_full_scans++;
2411b46e756fSKirill A. Shutemov 		}
2412b46e756fSKirill A. Shutemov 
2413b46e756fSKirill A. Shutemov 		collect_mm_slot(mm_slot);
2414b46e756fSKirill A. Shutemov 	}
2415b46e756fSKirill A. Shutemov 
2416b46e756fSKirill A. Shutemov 	return progress;
2417b46e756fSKirill A. Shutemov }
2418b46e756fSKirill A. Shutemov 
2419b46e756fSKirill A. Shutemov static int khugepaged_has_work(void)
2420b46e756fSKirill A. Shutemov {
2421b46e756fSKirill A. Shutemov 	return !list_empty(&khugepaged_scan.mm_head) &&
24221064026bSYang Shi 		hugepage_flags_enabled();
2423b46e756fSKirill A. Shutemov }
2424b46e756fSKirill A. Shutemov 
2425b46e756fSKirill A. Shutemov static int khugepaged_wait_event(void)
2426b46e756fSKirill A. Shutemov {
2427b46e756fSKirill A. Shutemov 	return !list_empty(&khugepaged_scan.mm_head) ||
2428b46e756fSKirill A. Shutemov 		kthread_should_stop();
2429b46e756fSKirill A. Shutemov }
2430b46e756fSKirill A. Shutemov 
243134d6b470SZach O'Keefe static void khugepaged_do_scan(struct collapse_control *cc)
2432b46e756fSKirill A. Shutemov {
2433b46e756fSKirill A. Shutemov 	unsigned int progress = 0, pass_through_head = 0;
243489dc6a96SYanfei Xu 	unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
2435b46e756fSKirill A. Shutemov 	bool wait = true;
243650ad2f24SZach O'Keefe 	int result = SCAN_SUCCEED;
2437b46e756fSKirill A. Shutemov 
2438a980df33SKirill A. Shutemov 	lru_add_drain_all();
2439a980df33SKirill A. Shutemov 
2440c6a7f445SYang Shi 	while (true) {
2441b46e756fSKirill A. Shutemov 		cond_resched();
2442b46e756fSKirill A. Shutemov 
2443b46e756fSKirill A. Shutemov 		if (unlikely(kthread_should_stop() || try_to_freeze()))
2444b46e756fSKirill A. Shutemov 			break;
2445b46e756fSKirill A. Shutemov 
2446b46e756fSKirill A. Shutemov 		spin_lock(&khugepaged_mm_lock);
2447b46e756fSKirill A. Shutemov 		if (!khugepaged_scan.mm_slot)
2448b46e756fSKirill A. Shutemov 			pass_through_head++;
2449b46e756fSKirill A. Shutemov 		if (khugepaged_has_work() &&
2450b46e756fSKirill A. Shutemov 		    pass_through_head < 2)
2451b46e756fSKirill A. Shutemov 			progress += khugepaged_scan_mm_slot(pages - progress,
245250ad2f24SZach O'Keefe 							    &result, cc);
2453b46e756fSKirill A. Shutemov 		else
2454b46e756fSKirill A. Shutemov 			progress = pages;
2455b46e756fSKirill A. Shutemov 		spin_unlock(&khugepaged_mm_lock);
2456b46e756fSKirill A. Shutemov 
2457c6a7f445SYang Shi 		if (progress >= pages)
2458c6a7f445SYang Shi 			break;
2459c6a7f445SYang Shi 
246050ad2f24SZach O'Keefe 		if (result == SCAN_ALLOC_HUGE_PAGE_FAIL) {
2461c6a7f445SYang Shi 			/*
2462c6a7f445SYang Shi 			 * If fail to allocate the first time, try to sleep for
2463c6a7f445SYang Shi 			 * a while.  When hit again, cancel the scan.
2464c6a7f445SYang Shi 			 */
2465c6a7f445SYang Shi 			if (!wait)
2466c6a7f445SYang Shi 				break;
2467c6a7f445SYang Shi 			wait = false;
2468c6a7f445SYang Shi 			khugepaged_alloc_sleep();
2469c6a7f445SYang Shi 		}
2470c6a7f445SYang Shi 	}
2471b46e756fSKirill A. Shutemov }
2472b46e756fSKirill A. Shutemov 
2473b46e756fSKirill A. Shutemov static bool khugepaged_should_wakeup(void)
2474b46e756fSKirill A. Shutemov {
2475b46e756fSKirill A. Shutemov 	return kthread_should_stop() ||
2476b46e756fSKirill A. Shutemov 	       time_after_eq(jiffies, khugepaged_sleep_expire);
2477b46e756fSKirill A. Shutemov }
2478b46e756fSKirill A. Shutemov 
2479b46e756fSKirill A. Shutemov static void khugepaged_wait_work(void)
2480b46e756fSKirill A. Shutemov {
2481b46e756fSKirill A. Shutemov 	if (khugepaged_has_work()) {
2482b46e756fSKirill A. Shutemov 		const unsigned long scan_sleep_jiffies =
2483b46e756fSKirill A. Shutemov 			msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2484b46e756fSKirill A. Shutemov 
2485b46e756fSKirill A. Shutemov 		if (!scan_sleep_jiffies)
2486b46e756fSKirill A. Shutemov 			return;
2487b46e756fSKirill A. Shutemov 
2488b46e756fSKirill A. Shutemov 		khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2489b46e756fSKirill A. Shutemov 		wait_event_freezable_timeout(khugepaged_wait,
2490b46e756fSKirill A. Shutemov 					     khugepaged_should_wakeup(),
2491b46e756fSKirill A. Shutemov 					     scan_sleep_jiffies);
2492b46e756fSKirill A. Shutemov 		return;
2493b46e756fSKirill A. Shutemov 	}
2494b46e756fSKirill A. Shutemov 
24951064026bSYang Shi 	if (hugepage_flags_enabled())
2496b46e756fSKirill A. Shutemov 		wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2497b46e756fSKirill A. Shutemov }
2498b46e756fSKirill A. Shutemov 
2499b46e756fSKirill A. Shutemov static int khugepaged(void *none)
2500b46e756fSKirill A. Shutemov {
2501b26e2701SQi Zheng 	struct khugepaged_mm_slot *mm_slot;
2502b46e756fSKirill A. Shutemov 
2503b46e756fSKirill A. Shutemov 	set_freezable();
2504b46e756fSKirill A. Shutemov 	set_user_nice(current, MAX_NICE);
2505b46e756fSKirill A. Shutemov 
2506b46e756fSKirill A. Shutemov 	while (!kthread_should_stop()) {
250734d6b470SZach O'Keefe 		khugepaged_do_scan(&khugepaged_collapse_control);
2508b46e756fSKirill A. Shutemov 		khugepaged_wait_work();
2509b46e756fSKirill A. Shutemov 	}
2510b46e756fSKirill A. Shutemov 
2511b46e756fSKirill A. Shutemov 	spin_lock(&khugepaged_mm_lock);
2512b46e756fSKirill A. Shutemov 	mm_slot = khugepaged_scan.mm_slot;
2513b46e756fSKirill A. Shutemov 	khugepaged_scan.mm_slot = NULL;
2514b46e756fSKirill A. Shutemov 	if (mm_slot)
2515b46e756fSKirill A. Shutemov 		collect_mm_slot(mm_slot);
2516b46e756fSKirill A. Shutemov 	spin_unlock(&khugepaged_mm_lock);
2517b46e756fSKirill A. Shutemov 	return 0;
2518b46e756fSKirill A. Shutemov }
2519b46e756fSKirill A. Shutemov 
2520b46e756fSKirill A. Shutemov static void set_recommended_min_free_kbytes(void)
2521b46e756fSKirill A. Shutemov {
2522b46e756fSKirill A. Shutemov 	struct zone *zone;
2523b46e756fSKirill A. Shutemov 	int nr_zones = 0;
2524b46e756fSKirill A. Shutemov 	unsigned long recommended_min;
2525b46e756fSKirill A. Shutemov 
25261064026bSYang Shi 	if (!hugepage_flags_enabled()) {
2527bd3400eaSLiangcai Fan 		calculate_min_free_kbytes();
2528bd3400eaSLiangcai Fan 		goto update_wmarks;
2529bd3400eaSLiangcai Fan 	}
2530bd3400eaSLiangcai Fan 
2531b7d349c7SJoonsoo Kim 	for_each_populated_zone(zone) {
2532b7d349c7SJoonsoo Kim 		/*
2533b7d349c7SJoonsoo Kim 		 * We don't need to worry about fragmentation of
2534b7d349c7SJoonsoo Kim 		 * ZONE_MOVABLE since it only has movable pages.
2535b7d349c7SJoonsoo Kim 		 */
2536b7d349c7SJoonsoo Kim 		if (zone_idx(zone) > gfp_zone(GFP_USER))
2537b7d349c7SJoonsoo Kim 			continue;
2538b7d349c7SJoonsoo Kim 
2539b46e756fSKirill A. Shutemov 		nr_zones++;
2540b7d349c7SJoonsoo Kim 	}
2541b46e756fSKirill A. Shutemov 
2542b46e756fSKirill A. Shutemov 	/* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2543b46e756fSKirill A. Shutemov 	recommended_min = pageblock_nr_pages * nr_zones * 2;
2544b46e756fSKirill A. Shutemov 
2545b46e756fSKirill A. Shutemov 	/*
2546b46e756fSKirill A. Shutemov 	 * Make sure that on average at least two pageblocks are almost free
2547b46e756fSKirill A. Shutemov 	 * of another type, one for a migratetype to fall back to and a
2548b46e756fSKirill A. Shutemov 	 * second to avoid subsequent fallbacks of other types There are 3
2549b46e756fSKirill A. Shutemov 	 * MIGRATE_TYPES we care about.
2550b46e756fSKirill A. Shutemov 	 */
2551b46e756fSKirill A. Shutemov 	recommended_min += pageblock_nr_pages * nr_zones *
2552b46e756fSKirill A. Shutemov 			   MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2553b46e756fSKirill A. Shutemov 
2554b46e756fSKirill A. Shutemov 	/* don't ever allow to reserve more than 5% of the lowmem */
2555b46e756fSKirill A. Shutemov 	recommended_min = min(recommended_min,
2556b46e756fSKirill A. Shutemov 			      (unsigned long) nr_free_buffer_pages() / 20);
2557b46e756fSKirill A. Shutemov 	recommended_min <<= (PAGE_SHIFT-10);
2558b46e756fSKirill A. Shutemov 
2559b46e756fSKirill A. Shutemov 	if (recommended_min > min_free_kbytes) {
2560b46e756fSKirill A. Shutemov 		if (user_min_free_kbytes >= 0)
2561b46e756fSKirill A. Shutemov 			pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2562b46e756fSKirill A. Shutemov 				min_free_kbytes, recommended_min);
2563b46e756fSKirill A. Shutemov 
2564b46e756fSKirill A. Shutemov 		min_free_kbytes = recommended_min;
2565b46e756fSKirill A. Shutemov 	}
2566bd3400eaSLiangcai Fan 
2567bd3400eaSLiangcai Fan update_wmarks:
2568b46e756fSKirill A. Shutemov 	setup_per_zone_wmarks();
2569b46e756fSKirill A. Shutemov }
2570b46e756fSKirill A. Shutemov 
2571b46e756fSKirill A. Shutemov int start_stop_khugepaged(void)
2572b46e756fSKirill A. Shutemov {
2573b46e756fSKirill A. Shutemov 	int err = 0;
2574b46e756fSKirill A. Shutemov 
2575b46e756fSKirill A. Shutemov 	mutex_lock(&khugepaged_mutex);
25761064026bSYang Shi 	if (hugepage_flags_enabled()) {
2577b46e756fSKirill A. Shutemov 		if (!khugepaged_thread)
2578b46e756fSKirill A. Shutemov 			khugepaged_thread = kthread_run(khugepaged, NULL,
2579b46e756fSKirill A. Shutemov 							"khugepaged");
2580b46e756fSKirill A. Shutemov 		if (IS_ERR(khugepaged_thread)) {
2581b46e756fSKirill A. Shutemov 			pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2582b46e756fSKirill A. Shutemov 			err = PTR_ERR(khugepaged_thread);
2583b46e756fSKirill A. Shutemov 			khugepaged_thread = NULL;
2584b46e756fSKirill A. Shutemov 			goto fail;
2585b46e756fSKirill A. Shutemov 		}
2586b46e756fSKirill A. Shutemov 
2587b46e756fSKirill A. Shutemov 		if (!list_empty(&khugepaged_scan.mm_head))
2588b46e756fSKirill A. Shutemov 			wake_up_interruptible(&khugepaged_wait);
2589b46e756fSKirill A. Shutemov 	} else if (khugepaged_thread) {
2590b46e756fSKirill A. Shutemov 		kthread_stop(khugepaged_thread);
2591b46e756fSKirill A. Shutemov 		khugepaged_thread = NULL;
2592b46e756fSKirill A. Shutemov 	}
2593bd3400eaSLiangcai Fan 	set_recommended_min_free_kbytes();
2594b46e756fSKirill A. Shutemov fail:
2595b46e756fSKirill A. Shutemov 	mutex_unlock(&khugepaged_mutex);
2596b46e756fSKirill A. Shutemov 	return err;
2597b46e756fSKirill A. Shutemov }
25984aab2be0SVijay Balakrishna 
25994aab2be0SVijay Balakrishna void khugepaged_min_free_kbytes_update(void)
26004aab2be0SVijay Balakrishna {
26014aab2be0SVijay Balakrishna 	mutex_lock(&khugepaged_mutex);
26021064026bSYang Shi 	if (hugepage_flags_enabled() && khugepaged_thread)
26034aab2be0SVijay Balakrishna 		set_recommended_min_free_kbytes();
26044aab2be0SVijay Balakrishna 	mutex_unlock(&khugepaged_mutex);
26054aab2be0SVijay Balakrishna }
26067d8faaf1SZach O'Keefe 
260757e9cc50SJohannes Weiner bool current_is_khugepaged(void)
260857e9cc50SJohannes Weiner {
260957e9cc50SJohannes Weiner 	return kthread_func(current) == khugepaged;
261057e9cc50SJohannes Weiner }
261157e9cc50SJohannes Weiner 
26127d8faaf1SZach O'Keefe static int madvise_collapse_errno(enum scan_result r)
26137d8faaf1SZach O'Keefe {
26147d8faaf1SZach O'Keefe 	/*
26157d8faaf1SZach O'Keefe 	 * MADV_COLLAPSE breaks from existing madvise(2) conventions to provide
26167d8faaf1SZach O'Keefe 	 * actionable feedback to caller, so they may take an appropriate
26177d8faaf1SZach O'Keefe 	 * fallback measure depending on the nature of the failure.
26187d8faaf1SZach O'Keefe 	 */
26197d8faaf1SZach O'Keefe 	switch (r) {
26207d8faaf1SZach O'Keefe 	case SCAN_ALLOC_HUGE_PAGE_FAIL:
26217d8faaf1SZach O'Keefe 		return -ENOMEM;
26227d8faaf1SZach O'Keefe 	case SCAN_CGROUP_CHARGE_FAIL:
26237d8faaf1SZach O'Keefe 		return -EBUSY;
26247d8faaf1SZach O'Keefe 	/* Resource temporary unavailable - trying again might succeed */
26257d8faaf1SZach O'Keefe 	case SCAN_PAGE_LOCK:
26267d8faaf1SZach O'Keefe 	case SCAN_PAGE_LRU:
26270f3e2a2cSZach O'Keefe 	case SCAN_DEL_PAGE_LRU:
26287d8faaf1SZach O'Keefe 		return -EAGAIN;
26297d8faaf1SZach O'Keefe 	/*
26307d8faaf1SZach O'Keefe 	 * Other: Trying again likely not to succeed / error intrinsic to
26317d8faaf1SZach O'Keefe 	 * specified memory range. khugepaged likely won't be able to collapse
26327d8faaf1SZach O'Keefe 	 * either.
26337d8faaf1SZach O'Keefe 	 */
26347d8faaf1SZach O'Keefe 	default:
26357d8faaf1SZach O'Keefe 		return -EINVAL;
26367d8faaf1SZach O'Keefe 	}
26377d8faaf1SZach O'Keefe }
26387d8faaf1SZach O'Keefe 
26397d8faaf1SZach O'Keefe int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
26407d8faaf1SZach O'Keefe 		     unsigned long start, unsigned long end)
26417d8faaf1SZach O'Keefe {
26427d8faaf1SZach O'Keefe 	struct collapse_control *cc;
26437d8faaf1SZach O'Keefe 	struct mm_struct *mm = vma->vm_mm;
26447d8faaf1SZach O'Keefe 	unsigned long hstart, hend, addr;
26457d8faaf1SZach O'Keefe 	int thps = 0, last_fail = SCAN_FAIL;
26467d8faaf1SZach O'Keefe 	bool mmap_locked = true;
26477d8faaf1SZach O'Keefe 
26487d8faaf1SZach O'Keefe 	BUG_ON(vma->vm_start > start);
26497d8faaf1SZach O'Keefe 	BUG_ON(vma->vm_end < end);
26507d8faaf1SZach O'Keefe 
26517d8faaf1SZach O'Keefe 	*prev = vma;
26527d8faaf1SZach O'Keefe 
26537d8faaf1SZach O'Keefe 	if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
26547d8faaf1SZach O'Keefe 		return -EINVAL;
26557d8faaf1SZach O'Keefe 
26567d8faaf1SZach O'Keefe 	cc = kmalloc(sizeof(*cc), GFP_KERNEL);
26577d8faaf1SZach O'Keefe 	if (!cc)
26587d8faaf1SZach O'Keefe 		return -ENOMEM;
26597d8faaf1SZach O'Keefe 	cc->is_khugepaged = false;
26607d8faaf1SZach O'Keefe 
26617d8faaf1SZach O'Keefe 	mmgrab(mm);
26627d8faaf1SZach O'Keefe 	lru_add_drain_all();
26637d8faaf1SZach O'Keefe 
26647d8faaf1SZach O'Keefe 	hstart = (start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
26657d8faaf1SZach O'Keefe 	hend = end & HPAGE_PMD_MASK;
26667d8faaf1SZach O'Keefe 
26677d8faaf1SZach O'Keefe 	for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) {
26687d8faaf1SZach O'Keefe 		int result = SCAN_FAIL;
26697d8faaf1SZach O'Keefe 
26707d8faaf1SZach O'Keefe 		if (!mmap_locked) {
26717d8faaf1SZach O'Keefe 			cond_resched();
26727d8faaf1SZach O'Keefe 			mmap_read_lock(mm);
26737d8faaf1SZach O'Keefe 			mmap_locked = true;
267434488399SZach O'Keefe 			result = hugepage_vma_revalidate(mm, addr, false, &vma,
267534488399SZach O'Keefe 							 cc);
26767d8faaf1SZach O'Keefe 			if (result  != SCAN_SUCCEED) {
26777d8faaf1SZach O'Keefe 				last_fail = result;
26787d8faaf1SZach O'Keefe 				goto out_nolock;
26797d8faaf1SZach O'Keefe 			}
26804d24de94SYang Shi 
268152dc0310SZach O'Keefe 			hend = min(hend, vma->vm_end & HPAGE_PMD_MASK);
26827d8faaf1SZach O'Keefe 		}
26837d8faaf1SZach O'Keefe 		mmap_assert_locked(mm);
26847d8faaf1SZach O'Keefe 		memset(cc->node_load, 0, sizeof(cc->node_load));
2685e031ff96SYang Shi 		nodes_clear(cc->alloc_nmask);
268634488399SZach O'Keefe 		if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
268734488399SZach O'Keefe 			struct file *file = get_file(vma->vm_file);
268834488399SZach O'Keefe 			pgoff_t pgoff = linear_page_index(vma, addr);
268934488399SZach O'Keefe 
269034488399SZach O'Keefe 			mmap_read_unlock(mm);
269134488399SZach O'Keefe 			mmap_locked = false;
269234488399SZach O'Keefe 			result = hpage_collapse_scan_file(mm, addr, file, pgoff,
26937d2c4385SZach O'Keefe 							  cc);
269434488399SZach O'Keefe 			fput(file);
269534488399SZach O'Keefe 		} else {
269634488399SZach O'Keefe 			result = hpage_collapse_scan_pmd(mm, vma, addr,
269734488399SZach O'Keefe 							 &mmap_locked, cc);
269834488399SZach O'Keefe 		}
26997d8faaf1SZach O'Keefe 		if (!mmap_locked)
27007d8faaf1SZach O'Keefe 			*prev = NULL;  /* Tell caller we dropped mmap_lock */
27017d8faaf1SZach O'Keefe 
270234488399SZach O'Keefe handle_result:
27037d8faaf1SZach O'Keefe 		switch (result) {
27047d8faaf1SZach O'Keefe 		case SCAN_SUCCEED:
27057d8faaf1SZach O'Keefe 		case SCAN_PMD_MAPPED:
27067d8faaf1SZach O'Keefe 			++thps;
27077d8faaf1SZach O'Keefe 			break;
270834488399SZach O'Keefe 		case SCAN_PTE_MAPPED_HUGEPAGE:
270934488399SZach O'Keefe 			BUG_ON(mmap_locked);
271034488399SZach O'Keefe 			BUG_ON(*prev);
271134488399SZach O'Keefe 			mmap_write_lock(mm);
271234488399SZach O'Keefe 			result = collapse_pte_mapped_thp(mm, addr, true);
271334488399SZach O'Keefe 			mmap_write_unlock(mm);
271434488399SZach O'Keefe 			goto handle_result;
27157d8faaf1SZach O'Keefe 		/* Whitelisted set of results where continuing OK */
27167d8faaf1SZach O'Keefe 		case SCAN_PMD_NULL:
27177d8faaf1SZach O'Keefe 		case SCAN_PTE_NON_PRESENT:
27187d8faaf1SZach O'Keefe 		case SCAN_PTE_UFFD_WP:
27197d8faaf1SZach O'Keefe 		case SCAN_PAGE_RO:
27207d8faaf1SZach O'Keefe 		case SCAN_LACK_REFERENCED_PAGE:
27217d8faaf1SZach O'Keefe 		case SCAN_PAGE_NULL:
27227d8faaf1SZach O'Keefe 		case SCAN_PAGE_COUNT:
27237d8faaf1SZach O'Keefe 		case SCAN_PAGE_LOCK:
27247d8faaf1SZach O'Keefe 		case SCAN_PAGE_COMPOUND:
27257d8faaf1SZach O'Keefe 		case SCAN_PAGE_LRU:
27260f3e2a2cSZach O'Keefe 		case SCAN_DEL_PAGE_LRU:
27277d8faaf1SZach O'Keefe 			last_fail = result;
27287d8faaf1SZach O'Keefe 			break;
27297d8faaf1SZach O'Keefe 		default:
27307d8faaf1SZach O'Keefe 			last_fail = result;
27317d8faaf1SZach O'Keefe 			/* Other error, exit */
27327d8faaf1SZach O'Keefe 			goto out_maybelock;
27337d8faaf1SZach O'Keefe 		}
27347d8faaf1SZach O'Keefe 	}
27357d8faaf1SZach O'Keefe 
27367d8faaf1SZach O'Keefe out_maybelock:
27377d8faaf1SZach O'Keefe 	/* Caller expects us to hold mmap_lock on return */
27387d8faaf1SZach O'Keefe 	if (!mmap_locked)
27397d8faaf1SZach O'Keefe 		mmap_read_lock(mm);
27407d8faaf1SZach O'Keefe out_nolock:
27417d8faaf1SZach O'Keefe 	mmap_assert_locked(mm);
27427d8faaf1SZach O'Keefe 	mmdrop(mm);
27437d8faaf1SZach O'Keefe 	kfree(cc);
27447d8faaf1SZach O'Keefe 
27457d8faaf1SZach O'Keefe 	return thps == ((hend - hstart) >> HPAGE_PMD_SHIFT) ? 0
27467d8faaf1SZach O'Keefe 			: madvise_collapse_errno(last_fail);
27477d8faaf1SZach O'Keefe }
2748