xref: /linux/mm/khugepaged.c (revision 100c85421b52e41269ada88f7d71a6b8a06c7a11)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2b46e756fSKirill A. Shutemov #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3b46e756fSKirill A. Shutemov 
4b46e756fSKirill A. Shutemov #include <linux/mm.h>
5b46e756fSKirill A. Shutemov #include <linux/sched.h>
66e84f315SIngo Molnar #include <linux/sched/mm.h>
7f7ccbae4SIngo Molnar #include <linux/sched/coredump.h>
8b46e756fSKirill A. Shutemov #include <linux/mmu_notifier.h>
9b46e756fSKirill A. Shutemov #include <linux/rmap.h>
10b46e756fSKirill A. Shutemov #include <linux/swap.h>
11b46e756fSKirill A. Shutemov #include <linux/mm_inline.h>
12b46e756fSKirill A. Shutemov #include <linux/kthread.h>
13b46e756fSKirill A. Shutemov #include <linux/khugepaged.h>
14b46e756fSKirill A. Shutemov #include <linux/freezer.h>
15b46e756fSKirill A. Shutemov #include <linux/mman.h>
16b46e756fSKirill A. Shutemov #include <linux/hashtable.h>
17b46e756fSKirill A. Shutemov #include <linux/userfaultfd_k.h>
18b46e756fSKirill A. Shutemov #include <linux/page_idle.h>
1980110bbfSPasha Tatashin #include <linux/page_table_check.h>
201e2f2d31SKent Overstreet #include <linux/rcupdate_wait.h>
21b46e756fSKirill A. Shutemov #include <linux/swapops.h>
22f3f0e1d2SKirill A. Shutemov #include <linux/shmem_fs.h>
23e2942062Sxu xin #include <linux/ksm.h>
24b46e756fSKirill A. Shutemov 
25b46e756fSKirill A. Shutemov #include <asm/tlb.h>
26b46e756fSKirill A. Shutemov #include <asm/pgalloc.h>
27b46e756fSKirill A. Shutemov #include "internal.h"
28b26e2701SQi Zheng #include "mm_slot.h"
29b46e756fSKirill A. Shutemov 
30b46e756fSKirill A. Shutemov enum scan_result {
31b46e756fSKirill A. Shutemov 	SCAN_FAIL,
32b46e756fSKirill A. Shutemov 	SCAN_SUCCEED,
33b46e756fSKirill A. Shutemov 	SCAN_PMD_NULL,
3434488399SZach O'Keefe 	SCAN_PMD_NONE,
3550722804SZach O'Keefe 	SCAN_PMD_MAPPED,
36b46e756fSKirill A. Shutemov 	SCAN_EXCEED_NONE_PTE,
3771a2c112SKirill A. Shutemov 	SCAN_EXCEED_SWAP_PTE,
3871a2c112SKirill A. Shutemov 	SCAN_EXCEED_SHARED_PTE,
39b46e756fSKirill A. Shutemov 	SCAN_PTE_NON_PRESENT,
40e1e267c7SPeter Xu 	SCAN_PTE_UFFD_WP,
4158ac9a89SZach O'Keefe 	SCAN_PTE_MAPPED_HUGEPAGE,
42b46e756fSKirill A. Shutemov 	SCAN_PAGE_RO,
430db501f7SEbru Akagunduz 	SCAN_LACK_REFERENCED_PAGE,
44b46e756fSKirill A. Shutemov 	SCAN_PAGE_NULL,
45b46e756fSKirill A. Shutemov 	SCAN_SCAN_ABORT,
46b46e756fSKirill A. Shutemov 	SCAN_PAGE_COUNT,
47b46e756fSKirill A. Shutemov 	SCAN_PAGE_LRU,
48b46e756fSKirill A. Shutemov 	SCAN_PAGE_LOCK,
49b46e756fSKirill A. Shutemov 	SCAN_PAGE_ANON,
50b46e756fSKirill A. Shutemov 	SCAN_PAGE_COMPOUND,
51b46e756fSKirill A. Shutemov 	SCAN_ANY_PROCESS,
52b46e756fSKirill A. Shutemov 	SCAN_VMA_NULL,
53b46e756fSKirill A. Shutemov 	SCAN_VMA_CHECK,
54b46e756fSKirill A. Shutemov 	SCAN_ADDRESS_RANGE,
55b46e756fSKirill A. Shutemov 	SCAN_DEL_PAGE_LRU,
56b46e756fSKirill A. Shutemov 	SCAN_ALLOC_HUGE_PAGE_FAIL,
57b46e756fSKirill A. Shutemov 	SCAN_CGROUP_CHARGE_FAIL,
58f3f0e1d2SKirill A. Shutemov 	SCAN_TRUNCATED,
5999cb0dbdSSong Liu 	SCAN_PAGE_HAS_PRIVATE,
602ce0bdfeSIvan Orlov 	SCAN_STORE_FAILED,
6198c76c9fSJiaqi Yan 	SCAN_COPY_MC,
62ac492b9cSDavid Stevens 	SCAN_PAGE_FILLED,
63b46e756fSKirill A. Shutemov };
64b46e756fSKirill A. Shutemov 
65b46e756fSKirill A. Shutemov #define CREATE_TRACE_POINTS
66b46e756fSKirill A. Shutemov #include <trace/events/huge_memory.h>
67b46e756fSKirill A. Shutemov 
684aab2be0SVijay Balakrishna static struct task_struct *khugepaged_thread __read_mostly;
694aab2be0SVijay Balakrishna static DEFINE_MUTEX(khugepaged_mutex);
704aab2be0SVijay Balakrishna 
71b46e756fSKirill A. Shutemov /* default scan 8*512 pte (or vmas) every 30 second */
72b46e756fSKirill A. Shutemov static unsigned int khugepaged_pages_to_scan __read_mostly;
73b46e756fSKirill A. Shutemov static unsigned int khugepaged_pages_collapsed;
74b46e756fSKirill A. Shutemov static unsigned int khugepaged_full_scans;
75b46e756fSKirill A. Shutemov static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
76b46e756fSKirill A. Shutemov /* during fragmentation poll the hugepage allocator once every minute */
77b46e756fSKirill A. Shutemov static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
78b46e756fSKirill A. Shutemov static unsigned long khugepaged_sleep_expire;
79b46e756fSKirill A. Shutemov static DEFINE_SPINLOCK(khugepaged_mm_lock);
80b46e756fSKirill A. Shutemov static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
81b46e756fSKirill A. Shutemov /*
82b46e756fSKirill A. Shutemov  * default collapse hugepages if there is at least one pte mapped like
83b46e756fSKirill A. Shutemov  * it would have happened if the vma was large enough during page
84b46e756fSKirill A. Shutemov  * fault.
85d8ea7cc8SZach O'Keefe  *
86d8ea7cc8SZach O'Keefe  * Note that these are only respected if collapse was initiated by khugepaged.
87b46e756fSKirill A. Shutemov  */
88b46e756fSKirill A. Shutemov static unsigned int khugepaged_max_ptes_none __read_mostly;
89b46e756fSKirill A. Shutemov static unsigned int khugepaged_max_ptes_swap __read_mostly;
9071a2c112SKirill A. Shutemov static unsigned int khugepaged_max_ptes_shared __read_mostly;
91b46e756fSKirill A. Shutemov 
92b46e756fSKirill A. Shutemov #define MM_SLOTS_HASH_BITS 10
93e1ad3e66SNick Desaulniers static DEFINE_READ_MOSTLY_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
94b46e756fSKirill A. Shutemov 
9568279f9cSAlexey Dobriyan static struct kmem_cache *mm_slot_cache __ro_after_init;
96b46e756fSKirill A. Shutemov 
9734d6b470SZach O'Keefe struct collapse_control {
98d8ea7cc8SZach O'Keefe 	bool is_khugepaged;
99d8ea7cc8SZach O'Keefe 
10034d6b470SZach O'Keefe 	/* Num pages scanned per node */
10134d6b470SZach O'Keefe 	u32 node_load[MAX_NUMNODES];
10234d6b470SZach O'Keefe 
103e031ff96SYang Shi 	/* nodemask for allocation fallback */
104e031ff96SYang Shi 	nodemask_t alloc_nmask;
10534d6b470SZach O'Keefe };
10634d6b470SZach O'Keefe 
107b46e756fSKirill A. Shutemov /**
108b26e2701SQi Zheng  * struct khugepaged_mm_slot - khugepaged information per mm that is being scanned
109b26e2701SQi Zheng  * @slot: hash lookup from mm to mm_slot
110b46e756fSKirill A. Shutemov  */
111b26e2701SQi Zheng struct khugepaged_mm_slot {
112b26e2701SQi Zheng 	struct mm_slot slot;
113b46e756fSKirill A. Shutemov };
114b46e756fSKirill A. Shutemov 
115b46e756fSKirill A. Shutemov /**
116b46e756fSKirill A. Shutemov  * struct khugepaged_scan - cursor for scanning
117b46e756fSKirill A. Shutemov  * @mm_head: the head of the mm list to scan
118b46e756fSKirill A. Shutemov  * @mm_slot: the current mm_slot we are scanning
119b46e756fSKirill A. Shutemov  * @address: the next address inside that to be scanned
120b46e756fSKirill A. Shutemov  *
121b46e756fSKirill A. Shutemov  * There is only the one khugepaged_scan instance of this cursor structure.
122b46e756fSKirill A. Shutemov  */
123b46e756fSKirill A. Shutemov struct khugepaged_scan {
124b46e756fSKirill A. Shutemov 	struct list_head mm_head;
125b26e2701SQi Zheng 	struct khugepaged_mm_slot *mm_slot;
126b46e756fSKirill A. Shutemov 	unsigned long address;
127b46e756fSKirill A. Shutemov };
128b46e756fSKirill A. Shutemov 
129b46e756fSKirill A. Shutemov static struct khugepaged_scan khugepaged_scan = {
130b46e756fSKirill A. Shutemov 	.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
131b46e756fSKirill A. Shutemov };
132b46e756fSKirill A. Shutemov 
133e1465d12SJérémy Lefaure #ifdef CONFIG_SYSFS
134b46e756fSKirill A. Shutemov static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
135b46e756fSKirill A. Shutemov 					 struct kobj_attribute *attr,
136b46e756fSKirill A. Shutemov 					 char *buf)
137b46e756fSKirill A. Shutemov {
138ae7a927dSJoe Perches 	return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
139b46e756fSKirill A. Shutemov }
140b46e756fSKirill A. Shutemov 
141b46e756fSKirill A. Shutemov static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
142b46e756fSKirill A. Shutemov 					  struct kobj_attribute *attr,
143b46e756fSKirill A. Shutemov 					  const char *buf, size_t count)
144b46e756fSKirill A. Shutemov {
145dfefd226SAlexey Dobriyan 	unsigned int msecs;
146b46e756fSKirill A. Shutemov 	int err;
147b46e756fSKirill A. Shutemov 
148dfefd226SAlexey Dobriyan 	err = kstrtouint(buf, 10, &msecs);
149dfefd226SAlexey Dobriyan 	if (err)
150b46e756fSKirill A. Shutemov 		return -EINVAL;
151b46e756fSKirill A. Shutemov 
152b46e756fSKirill A. Shutemov 	khugepaged_scan_sleep_millisecs = msecs;
153b46e756fSKirill A. Shutemov 	khugepaged_sleep_expire = 0;
154b46e756fSKirill A. Shutemov 	wake_up_interruptible(&khugepaged_wait);
155b46e756fSKirill A. Shutemov 
156b46e756fSKirill A. Shutemov 	return count;
157b46e756fSKirill A. Shutemov }
158b46e756fSKirill A. Shutemov static struct kobj_attribute scan_sleep_millisecs_attr =
1596dcdc94dSMiaohe Lin 	__ATTR_RW(scan_sleep_millisecs);
160b46e756fSKirill A. Shutemov 
161b46e756fSKirill A. Shutemov static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
162b46e756fSKirill A. Shutemov 					  struct kobj_attribute *attr,
163b46e756fSKirill A. Shutemov 					  char *buf)
164b46e756fSKirill A. Shutemov {
165ae7a927dSJoe Perches 	return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
166b46e756fSKirill A. Shutemov }
167b46e756fSKirill A. Shutemov 
168b46e756fSKirill A. Shutemov static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
169b46e756fSKirill A. Shutemov 					   struct kobj_attribute *attr,
170b46e756fSKirill A. Shutemov 					   const char *buf, size_t count)
171b46e756fSKirill A. Shutemov {
172dfefd226SAlexey Dobriyan 	unsigned int msecs;
173b46e756fSKirill A. Shutemov 	int err;
174b46e756fSKirill A. Shutemov 
175dfefd226SAlexey Dobriyan 	err = kstrtouint(buf, 10, &msecs);
176dfefd226SAlexey Dobriyan 	if (err)
177b46e756fSKirill A. Shutemov 		return -EINVAL;
178b46e756fSKirill A. Shutemov 
179b46e756fSKirill A. Shutemov 	khugepaged_alloc_sleep_millisecs = msecs;
180b46e756fSKirill A. Shutemov 	khugepaged_sleep_expire = 0;
181b46e756fSKirill A. Shutemov 	wake_up_interruptible(&khugepaged_wait);
182b46e756fSKirill A. Shutemov 
183b46e756fSKirill A. Shutemov 	return count;
184b46e756fSKirill A. Shutemov }
185b46e756fSKirill A. Shutemov static struct kobj_attribute alloc_sleep_millisecs_attr =
1866dcdc94dSMiaohe Lin 	__ATTR_RW(alloc_sleep_millisecs);
187b46e756fSKirill A. Shutemov 
188b46e756fSKirill A. Shutemov static ssize_t pages_to_scan_show(struct kobject *kobj,
189b46e756fSKirill A. Shutemov 				  struct kobj_attribute *attr,
190b46e756fSKirill A. Shutemov 				  char *buf)
191b46e756fSKirill A. Shutemov {
192ae7a927dSJoe Perches 	return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
193b46e756fSKirill A. Shutemov }
194b46e756fSKirill A. Shutemov static ssize_t pages_to_scan_store(struct kobject *kobj,
195b46e756fSKirill A. Shutemov 				   struct kobj_attribute *attr,
196b46e756fSKirill A. Shutemov 				   const char *buf, size_t count)
197b46e756fSKirill A. Shutemov {
198dfefd226SAlexey Dobriyan 	unsigned int pages;
199b46e756fSKirill A. Shutemov 	int err;
200b46e756fSKirill A. Shutemov 
201dfefd226SAlexey Dobriyan 	err = kstrtouint(buf, 10, &pages);
202dfefd226SAlexey Dobriyan 	if (err || !pages)
203b46e756fSKirill A. Shutemov 		return -EINVAL;
204b46e756fSKirill A. Shutemov 
205b46e756fSKirill A. Shutemov 	khugepaged_pages_to_scan = pages;
206b46e756fSKirill A. Shutemov 
207b46e756fSKirill A. Shutemov 	return count;
208b46e756fSKirill A. Shutemov }
209b46e756fSKirill A. Shutemov static struct kobj_attribute pages_to_scan_attr =
2106dcdc94dSMiaohe Lin 	__ATTR_RW(pages_to_scan);
211b46e756fSKirill A. Shutemov 
212b46e756fSKirill A. Shutemov static ssize_t pages_collapsed_show(struct kobject *kobj,
213b46e756fSKirill A. Shutemov 				    struct kobj_attribute *attr,
214b46e756fSKirill A. Shutemov 				    char *buf)
215b46e756fSKirill A. Shutemov {
216ae7a927dSJoe Perches 	return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
217b46e756fSKirill A. Shutemov }
218b46e756fSKirill A. Shutemov static struct kobj_attribute pages_collapsed_attr =
219b46e756fSKirill A. Shutemov 	__ATTR_RO(pages_collapsed);
220b46e756fSKirill A. Shutemov 
221b46e756fSKirill A. Shutemov static ssize_t full_scans_show(struct kobject *kobj,
222b46e756fSKirill A. Shutemov 			       struct kobj_attribute *attr,
223b46e756fSKirill A. Shutemov 			       char *buf)
224b46e756fSKirill A. Shutemov {
225ae7a927dSJoe Perches 	return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
226b46e756fSKirill A. Shutemov }
227b46e756fSKirill A. Shutemov static struct kobj_attribute full_scans_attr =
228b46e756fSKirill A. Shutemov 	__ATTR_RO(full_scans);
229b46e756fSKirill A. Shutemov 
2306dcdc94dSMiaohe Lin static ssize_t defrag_show(struct kobject *kobj,
231b46e756fSKirill A. Shutemov 			   struct kobj_attribute *attr, char *buf)
232b46e756fSKirill A. Shutemov {
233b46e756fSKirill A. Shutemov 	return single_hugepage_flag_show(kobj, attr, buf,
234b46e756fSKirill A. Shutemov 					 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
235b46e756fSKirill A. Shutemov }
2366dcdc94dSMiaohe Lin static ssize_t defrag_store(struct kobject *kobj,
237b46e756fSKirill A. Shutemov 			    struct kobj_attribute *attr,
238b46e756fSKirill A. Shutemov 			    const char *buf, size_t count)
239b46e756fSKirill A. Shutemov {
240b46e756fSKirill A. Shutemov 	return single_hugepage_flag_store(kobj, attr, buf, count,
241b46e756fSKirill A. Shutemov 				 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
242b46e756fSKirill A. Shutemov }
243b46e756fSKirill A. Shutemov static struct kobj_attribute khugepaged_defrag_attr =
2446dcdc94dSMiaohe Lin 	__ATTR_RW(defrag);
245b46e756fSKirill A. Shutemov 
246b46e756fSKirill A. Shutemov /*
247b46e756fSKirill A. Shutemov  * max_ptes_none controls if khugepaged should collapse hugepages over
248b46e756fSKirill A. Shutemov  * any unmapped ptes in turn potentially increasing the memory
249b46e756fSKirill A. Shutemov  * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
250b46e756fSKirill A. Shutemov  * reduce the available free memory in the system as it
251b46e756fSKirill A. Shutemov  * runs. Increasing max_ptes_none will instead potentially reduce the
252b46e756fSKirill A. Shutemov  * free memory in the system during the khugepaged scan.
253b46e756fSKirill A. Shutemov  */
2546dcdc94dSMiaohe Lin static ssize_t max_ptes_none_show(struct kobject *kobj,
255b46e756fSKirill A. Shutemov 				  struct kobj_attribute *attr,
256b46e756fSKirill A. Shutemov 				  char *buf)
257b46e756fSKirill A. Shutemov {
258ae7a927dSJoe Perches 	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
259b46e756fSKirill A. Shutemov }
2606dcdc94dSMiaohe Lin static ssize_t max_ptes_none_store(struct kobject *kobj,
261b46e756fSKirill A. Shutemov 				   struct kobj_attribute *attr,
262b46e756fSKirill A. Shutemov 				   const char *buf, size_t count)
263b46e756fSKirill A. Shutemov {
264b46e756fSKirill A. Shutemov 	int err;
265b46e756fSKirill A. Shutemov 	unsigned long max_ptes_none;
266b46e756fSKirill A. Shutemov 
267b46e756fSKirill A. Shutemov 	err = kstrtoul(buf, 10, &max_ptes_none);
268b46e756fSKirill A. Shutemov 	if (err || max_ptes_none > HPAGE_PMD_NR - 1)
269b46e756fSKirill A. Shutemov 		return -EINVAL;
270b46e756fSKirill A. Shutemov 
271b46e756fSKirill A. Shutemov 	khugepaged_max_ptes_none = max_ptes_none;
272b46e756fSKirill A. Shutemov 
273b46e756fSKirill A. Shutemov 	return count;
274b46e756fSKirill A. Shutemov }
275b46e756fSKirill A. Shutemov static struct kobj_attribute khugepaged_max_ptes_none_attr =
2766dcdc94dSMiaohe Lin 	__ATTR_RW(max_ptes_none);
277b46e756fSKirill A. Shutemov 
2786dcdc94dSMiaohe Lin static ssize_t max_ptes_swap_show(struct kobject *kobj,
279b46e756fSKirill A. Shutemov 				  struct kobj_attribute *attr,
280b46e756fSKirill A. Shutemov 				  char *buf)
281b46e756fSKirill A. Shutemov {
282ae7a927dSJoe Perches 	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
283b46e756fSKirill A. Shutemov }
284b46e756fSKirill A. Shutemov 
2856dcdc94dSMiaohe Lin static ssize_t max_ptes_swap_store(struct kobject *kobj,
286b46e756fSKirill A. Shutemov 				   struct kobj_attribute *attr,
287b46e756fSKirill A. Shutemov 				   const char *buf, size_t count)
288b46e756fSKirill A. Shutemov {
289b46e756fSKirill A. Shutemov 	int err;
290b46e756fSKirill A. Shutemov 	unsigned long max_ptes_swap;
291b46e756fSKirill A. Shutemov 
292b46e756fSKirill A. Shutemov 	err  = kstrtoul(buf, 10, &max_ptes_swap);
293b46e756fSKirill A. Shutemov 	if (err || max_ptes_swap > HPAGE_PMD_NR - 1)
294b46e756fSKirill A. Shutemov 		return -EINVAL;
295b46e756fSKirill A. Shutemov 
296b46e756fSKirill A. Shutemov 	khugepaged_max_ptes_swap = max_ptes_swap;
297b46e756fSKirill A. Shutemov 
298b46e756fSKirill A. Shutemov 	return count;
299b46e756fSKirill A. Shutemov }
300b46e756fSKirill A. Shutemov 
301b46e756fSKirill A. Shutemov static struct kobj_attribute khugepaged_max_ptes_swap_attr =
3026dcdc94dSMiaohe Lin 	__ATTR_RW(max_ptes_swap);
303b46e756fSKirill A. Shutemov 
3046dcdc94dSMiaohe Lin static ssize_t max_ptes_shared_show(struct kobject *kobj,
30571a2c112SKirill A. Shutemov 				    struct kobj_attribute *attr,
30671a2c112SKirill A. Shutemov 				    char *buf)
30771a2c112SKirill A. Shutemov {
308ae7a927dSJoe Perches 	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
30971a2c112SKirill A. Shutemov }
31071a2c112SKirill A. Shutemov 
3116dcdc94dSMiaohe Lin static ssize_t max_ptes_shared_store(struct kobject *kobj,
31271a2c112SKirill A. Shutemov 				     struct kobj_attribute *attr,
31371a2c112SKirill A. Shutemov 				     const char *buf, size_t count)
31471a2c112SKirill A. Shutemov {
31571a2c112SKirill A. Shutemov 	int err;
31671a2c112SKirill A. Shutemov 	unsigned long max_ptes_shared;
31771a2c112SKirill A. Shutemov 
31871a2c112SKirill A. Shutemov 	err  = kstrtoul(buf, 10, &max_ptes_shared);
31971a2c112SKirill A. Shutemov 	if (err || max_ptes_shared > HPAGE_PMD_NR - 1)
32071a2c112SKirill A. Shutemov 		return -EINVAL;
32171a2c112SKirill A. Shutemov 
32271a2c112SKirill A. Shutemov 	khugepaged_max_ptes_shared = max_ptes_shared;
32371a2c112SKirill A. Shutemov 
32471a2c112SKirill A. Shutemov 	return count;
32571a2c112SKirill A. Shutemov }
32671a2c112SKirill A. Shutemov 
32771a2c112SKirill A. Shutemov static struct kobj_attribute khugepaged_max_ptes_shared_attr =
3286dcdc94dSMiaohe Lin 	__ATTR_RW(max_ptes_shared);
32971a2c112SKirill A. Shutemov 
330b46e756fSKirill A. Shutemov static struct attribute *khugepaged_attr[] = {
331b46e756fSKirill A. Shutemov 	&khugepaged_defrag_attr.attr,
332b46e756fSKirill A. Shutemov 	&khugepaged_max_ptes_none_attr.attr,
33371a2c112SKirill A. Shutemov 	&khugepaged_max_ptes_swap_attr.attr,
33471a2c112SKirill A. Shutemov 	&khugepaged_max_ptes_shared_attr.attr,
335b46e756fSKirill A. Shutemov 	&pages_to_scan_attr.attr,
336b46e756fSKirill A. Shutemov 	&pages_collapsed_attr.attr,
337b46e756fSKirill A. Shutemov 	&full_scans_attr.attr,
338b46e756fSKirill A. Shutemov 	&scan_sleep_millisecs_attr.attr,
339b46e756fSKirill A. Shutemov 	&alloc_sleep_millisecs_attr.attr,
340b46e756fSKirill A. Shutemov 	NULL,
341b46e756fSKirill A. Shutemov };
342b46e756fSKirill A. Shutemov 
343b46e756fSKirill A. Shutemov struct attribute_group khugepaged_attr_group = {
344b46e756fSKirill A. Shutemov 	.attrs = khugepaged_attr,
345b46e756fSKirill A. Shutemov 	.name = "khugepaged",
346b46e756fSKirill A. Shutemov };
347e1465d12SJérémy Lefaure #endif /* CONFIG_SYSFS */
348b46e756fSKirill A. Shutemov 
349b46e756fSKirill A. Shutemov int hugepage_madvise(struct vm_area_struct *vma,
350b46e756fSKirill A. Shutemov 		     unsigned long *vm_flags, int advice)
351b46e756fSKirill A. Shutemov {
352b46e756fSKirill A. Shutemov 	switch (advice) {
353b46e756fSKirill A. Shutemov 	case MADV_HUGEPAGE:
354b46e756fSKirill A. Shutemov #ifdef CONFIG_S390
355b46e756fSKirill A. Shutemov 		/*
356b46e756fSKirill A. Shutemov 		 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
357b46e756fSKirill A. Shutemov 		 * can't handle this properly after s390_enable_sie, so we simply
358b46e756fSKirill A. Shutemov 		 * ignore the madvise to prevent qemu from causing a SIGSEGV.
359b46e756fSKirill A. Shutemov 		 */
360b46e756fSKirill A. Shutemov 		if (mm_has_pgste(vma->vm_mm))
361b46e756fSKirill A. Shutemov 			return 0;
362b46e756fSKirill A. Shutemov #endif
363b46e756fSKirill A. Shutemov 		*vm_flags &= ~VM_NOHUGEPAGE;
364b46e756fSKirill A. Shutemov 		*vm_flags |= VM_HUGEPAGE;
365b46e756fSKirill A. Shutemov 		/*
366b46e756fSKirill A. Shutemov 		 * If the vma become good for khugepaged to scan,
367b46e756fSKirill A. Shutemov 		 * register it here without waiting a page fault that
368b46e756fSKirill A. Shutemov 		 * may not happen any time soon.
369b46e756fSKirill A. Shutemov 		 */
370c791576cSYang Shi 		khugepaged_enter_vma(vma, *vm_flags);
371b46e756fSKirill A. Shutemov 		break;
372b46e756fSKirill A. Shutemov 	case MADV_NOHUGEPAGE:
373b46e756fSKirill A. Shutemov 		*vm_flags &= ~VM_HUGEPAGE;
374b46e756fSKirill A. Shutemov 		*vm_flags |= VM_NOHUGEPAGE;
375b46e756fSKirill A. Shutemov 		/*
376b46e756fSKirill A. Shutemov 		 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
377b46e756fSKirill A. Shutemov 		 * this vma even if we leave the mm registered in khugepaged if
378b46e756fSKirill A. Shutemov 		 * it got registered before VM_NOHUGEPAGE was set.
379b46e756fSKirill A. Shutemov 		 */
380b46e756fSKirill A. Shutemov 		break;
381b46e756fSKirill A. Shutemov 	}
382b46e756fSKirill A. Shutemov 
383b46e756fSKirill A. Shutemov 	return 0;
384b46e756fSKirill A. Shutemov }
385b46e756fSKirill A. Shutemov 
386b46e756fSKirill A. Shutemov int __init khugepaged_init(void)
387b46e756fSKirill A. Shutemov {
388b46e756fSKirill A. Shutemov 	mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
389b26e2701SQi Zheng 					  sizeof(struct khugepaged_mm_slot),
390b26e2701SQi Zheng 					  __alignof__(struct khugepaged_mm_slot),
391b26e2701SQi Zheng 					  0, NULL);
392b46e756fSKirill A. Shutemov 	if (!mm_slot_cache)
393b46e756fSKirill A. Shutemov 		return -ENOMEM;
394b46e756fSKirill A. Shutemov 
395b46e756fSKirill A. Shutemov 	khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
396b46e756fSKirill A. Shutemov 	khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
397b46e756fSKirill A. Shutemov 	khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
39871a2c112SKirill A. Shutemov 	khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
399b46e756fSKirill A. Shutemov 
400b46e756fSKirill A. Shutemov 	return 0;
401b46e756fSKirill A. Shutemov }
402b46e756fSKirill A. Shutemov 
403b46e756fSKirill A. Shutemov void __init khugepaged_destroy(void)
404b46e756fSKirill A. Shutemov {
405b46e756fSKirill A. Shutemov 	kmem_cache_destroy(mm_slot_cache);
406b46e756fSKirill A. Shutemov }
407b46e756fSKirill A. Shutemov 
4087d2c4385SZach O'Keefe static inline int hpage_collapse_test_exit(struct mm_struct *mm)
409b46e756fSKirill A. Shutemov {
4104d45e75aSJann Horn 	return atomic_read(&mm->mm_users) == 0;
411b46e756fSKirill A. Shutemov }
412b46e756fSKirill A. Shutemov 
413879c6000SLance Yang static inline int hpage_collapse_test_exit_or_disable(struct mm_struct *mm)
414879c6000SLance Yang {
415879c6000SLance Yang 	return hpage_collapse_test_exit(mm) ||
416879c6000SLance Yang 	       test_bit(MMF_DISABLE_THP, &mm->flags);
417879c6000SLance Yang }
418879c6000SLance Yang 
419d2081b2bSYang Shi void __khugepaged_enter(struct mm_struct *mm)
420b46e756fSKirill A. Shutemov {
421b26e2701SQi Zheng 	struct khugepaged_mm_slot *mm_slot;
422b26e2701SQi Zheng 	struct mm_slot *slot;
423b46e756fSKirill A. Shutemov 	int wakeup;
424b46e756fSKirill A. Shutemov 
42516618670SXin Hao 	/* __khugepaged_exit() must not run from under us */
42616618670SXin Hao 	VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm);
42716618670SXin Hao 	if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags)))
42816618670SXin Hao 		return;
42916618670SXin Hao 
430b26e2701SQi Zheng 	mm_slot = mm_slot_alloc(mm_slot_cache);
431b46e756fSKirill A. Shutemov 	if (!mm_slot)
432d2081b2bSYang Shi 		return;
433b46e756fSKirill A. Shutemov 
434b26e2701SQi Zheng 	slot = &mm_slot->slot;
435b26e2701SQi Zheng 
436b46e756fSKirill A. Shutemov 	spin_lock(&khugepaged_mm_lock);
437b26e2701SQi Zheng 	mm_slot_insert(mm_slots_hash, mm, slot);
438b46e756fSKirill A. Shutemov 	/*
439b46e756fSKirill A. Shutemov 	 * Insert just behind the scanning cursor, to let the area settle
440b46e756fSKirill A. Shutemov 	 * down a little.
441b46e756fSKirill A. Shutemov 	 */
442b46e756fSKirill A. Shutemov 	wakeup = list_empty(&khugepaged_scan.mm_head);
443b26e2701SQi Zheng 	list_add_tail(&slot->mm_node, &khugepaged_scan.mm_head);
444b46e756fSKirill A. Shutemov 	spin_unlock(&khugepaged_mm_lock);
445b46e756fSKirill A. Shutemov 
446f1f10076SVegard Nossum 	mmgrab(mm);
447b46e756fSKirill A. Shutemov 	if (wakeup)
448b46e756fSKirill A. Shutemov 		wake_up_interruptible(&khugepaged_wait);
449b46e756fSKirill A. Shutemov }
450b46e756fSKirill A. Shutemov 
451c791576cSYang Shi void khugepaged_enter_vma(struct vm_area_struct *vma,
452b46e756fSKirill A. Shutemov 			  unsigned long vm_flags)
453b46e756fSKirill A. Shutemov {
4542647d11bSYang Shi 	if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
4551064026bSYang Shi 	    hugepage_flags_enabled()) {
4563485b883SRyan Roberts 		if (thp_vma_allowable_order(vma, vm_flags, false, false, true,
4573485b883SRyan Roberts 					    PMD_ORDER))
4582647d11bSYang Shi 			__khugepaged_enter(vma->vm_mm);
4592647d11bSYang Shi 	}
460b46e756fSKirill A. Shutemov }
461b46e756fSKirill A. Shutemov 
462b46e756fSKirill A. Shutemov void __khugepaged_exit(struct mm_struct *mm)
463b46e756fSKirill A. Shutemov {
464b26e2701SQi Zheng 	struct khugepaged_mm_slot *mm_slot;
465b26e2701SQi Zheng 	struct mm_slot *slot;
466b46e756fSKirill A. Shutemov 	int free = 0;
467b46e756fSKirill A. Shutemov 
468b46e756fSKirill A. Shutemov 	spin_lock(&khugepaged_mm_lock);
469b26e2701SQi Zheng 	slot = mm_slot_lookup(mm_slots_hash, mm);
470b26e2701SQi Zheng 	mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
471b46e756fSKirill A. Shutemov 	if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
472b26e2701SQi Zheng 		hash_del(&slot->hash);
473b26e2701SQi Zheng 		list_del(&slot->mm_node);
474b46e756fSKirill A. Shutemov 		free = 1;
475b46e756fSKirill A. Shutemov 	}
476b46e756fSKirill A. Shutemov 	spin_unlock(&khugepaged_mm_lock);
477b46e756fSKirill A. Shutemov 
478b46e756fSKirill A. Shutemov 	if (free) {
479b46e756fSKirill A. Shutemov 		clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
480b26e2701SQi Zheng 		mm_slot_free(mm_slot_cache, mm_slot);
481b46e756fSKirill A. Shutemov 		mmdrop(mm);
482b46e756fSKirill A. Shutemov 	} else if (mm_slot) {
483b46e756fSKirill A. Shutemov 		/*
484b46e756fSKirill A. Shutemov 		 * This is required to serialize against
4857d2c4385SZach O'Keefe 		 * hpage_collapse_test_exit() (which is guaranteed to run
4867d2c4385SZach O'Keefe 		 * under mmap sem read mode). Stop here (after we return all
4877d2c4385SZach O'Keefe 		 * pagetables will be destroyed) until khugepaged has finished
4887d2c4385SZach O'Keefe 		 * working on the pagetables under the mmap_lock.
489b46e756fSKirill A. Shutemov 		 */
490d8ed45c5SMichel Lespinasse 		mmap_write_lock(mm);
491d8ed45c5SMichel Lespinasse 		mmap_write_unlock(mm);
492b46e756fSKirill A. Shutemov 	}
493b46e756fSKirill A. Shutemov }
494b46e756fSKirill A. Shutemov 
49592644f58SVishal Moola (Oracle) static void release_pte_folio(struct folio *folio)
49692644f58SVishal Moola (Oracle) {
49792644f58SVishal Moola (Oracle) 	node_stat_mod_folio(folio,
49892644f58SVishal Moola (Oracle) 			NR_ISOLATED_ANON + folio_is_file_lru(folio),
49992644f58SVishal Moola (Oracle) 			-folio_nr_pages(folio));
50092644f58SVishal Moola (Oracle) 	folio_unlock(folio);
50192644f58SVishal Moola (Oracle) 	folio_putback_lru(folio);
50292644f58SVishal Moola (Oracle) }
50392644f58SVishal Moola (Oracle) 
5045503fbf2SKirill A. Shutemov static void release_pte_pages(pte_t *pte, pte_t *_pte,
5055503fbf2SKirill A. Shutemov 		struct list_head *compound_pagelist)
506b46e756fSKirill A. Shutemov {
5079bdfeea4SVishal Moola (Oracle) 	struct folio *folio, *tmp;
5085503fbf2SKirill A. Shutemov 
509b46e756fSKirill A. Shutemov 	while (--_pte >= pte) {
510c33c7948SRyan Roberts 		pte_t pteval = ptep_get(_pte);
511f528260bSVishal Moola (Oracle) 		unsigned long pfn;
5125503fbf2SKirill A. Shutemov 
513f528260bSVishal Moola (Oracle) 		if (pte_none(pteval))
514f528260bSVishal Moola (Oracle) 			continue;
515f528260bSVishal Moola (Oracle) 		pfn = pte_pfn(pteval);
516f528260bSVishal Moola (Oracle) 		if (is_zero_pfn(pfn))
517f528260bSVishal Moola (Oracle) 			continue;
518f528260bSVishal Moola (Oracle) 		folio = pfn_folio(pfn);
519f528260bSVishal Moola (Oracle) 		if (folio_test_large(folio))
520f528260bSVishal Moola (Oracle) 			continue;
5219bdfeea4SVishal Moola (Oracle) 		release_pte_folio(folio);
5225503fbf2SKirill A. Shutemov 	}
5235503fbf2SKirill A. Shutemov 
5249bdfeea4SVishal Moola (Oracle) 	list_for_each_entry_safe(folio, tmp, compound_pagelist, lru) {
5259bdfeea4SVishal Moola (Oracle) 		list_del(&folio->lru);
5269bdfeea4SVishal Moola (Oracle) 		release_pte_folio(folio);
527b46e756fSKirill A. Shutemov 	}
528b46e756fSKirill A. Shutemov }
529b46e756fSKirill A. Shutemov 
530dbf85c21SVishal Moola (Oracle) static bool is_refcount_suitable(struct folio *folio)
5319445689fSKirill A. Shutemov {
5329445689fSKirill A. Shutemov 	int expected_refcount;
5339445689fSKirill A. Shutemov 
534dbf85c21SVishal Moola (Oracle) 	expected_refcount = folio_mapcount(folio);
535dbf85c21SVishal Moola (Oracle) 	if (folio_test_swapcache(folio))
536dbf85c21SVishal Moola (Oracle) 		expected_refcount += folio_nr_pages(folio);
5379445689fSKirill A. Shutemov 
538dbf85c21SVishal Moola (Oracle) 	return folio_ref_count(folio) == expected_refcount;
5399445689fSKirill A. Shutemov }
5409445689fSKirill A. Shutemov 
541b46e756fSKirill A. Shutemov static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
542b46e756fSKirill A. Shutemov 					unsigned long address,
5435503fbf2SKirill A. Shutemov 					pte_t *pte,
544d8ea7cc8SZach O'Keefe 					struct collapse_control *cc,
5455503fbf2SKirill A. Shutemov 					struct list_head *compound_pagelist)
546b46e756fSKirill A. Shutemov {
547b46e756fSKirill A. Shutemov 	struct page *page = NULL;
5488dd1e896SVishal Moola (Oracle) 	struct folio *folio = NULL;
549b46e756fSKirill A. Shutemov 	pte_t *_pte;
55050ad2f24SZach O'Keefe 	int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
5510db501f7SEbru Akagunduz 	bool writable = false;
552b46e756fSKirill A. Shutemov 
553b46e756fSKirill A. Shutemov 	for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
554b46e756fSKirill A. Shutemov 	     _pte++, address += PAGE_SIZE) {
555c33c7948SRyan Roberts 		pte_t pteval = ptep_get(_pte);
556b46e756fSKirill A. Shutemov 		if (pte_none(pteval) || (pte_present(pteval) &&
557b46e756fSKirill A. Shutemov 				is_zero_pfn(pte_pfn(pteval)))) {
558d8ea7cc8SZach O'Keefe 			++none_or_zero;
559b46e756fSKirill A. Shutemov 			if (!userfaultfd_armed(vma) &&
560d8ea7cc8SZach O'Keefe 			    (!cc->is_khugepaged ||
561d8ea7cc8SZach O'Keefe 			     none_or_zero <= khugepaged_max_ptes_none)) {
562b46e756fSKirill A. Shutemov 				continue;
563b46e756fSKirill A. Shutemov 			} else {
564b46e756fSKirill A. Shutemov 				result = SCAN_EXCEED_NONE_PTE;
565e9ea874aSYang Yang 				count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
566b46e756fSKirill A. Shutemov 				goto out;
567b46e756fSKirill A. Shutemov 			}
568b46e756fSKirill A. Shutemov 		}
569b46e756fSKirill A. Shutemov 		if (!pte_present(pteval)) {
570b46e756fSKirill A. Shutemov 			result = SCAN_PTE_NON_PRESENT;
571b46e756fSKirill A. Shutemov 			goto out;
572b46e756fSKirill A. Shutemov 		}
573dd47ac42SPeter Xu 		if (pte_uffd_wp(pteval)) {
574dd47ac42SPeter Xu 			result = SCAN_PTE_UFFD_WP;
575dd47ac42SPeter Xu 			goto out;
576dd47ac42SPeter Xu 		}
577b46e756fSKirill A. Shutemov 		page = vm_normal_page(vma, address, pteval);
5783218f871SAlex Sierra 		if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
579b46e756fSKirill A. Shutemov 			result = SCAN_PAGE_NULL;
580b46e756fSKirill A. Shutemov 			goto out;
581b46e756fSKirill A. Shutemov 		}
582b46e756fSKirill A. Shutemov 
5838dd1e896SVishal Moola (Oracle) 		folio = page_folio(page);
5848dd1e896SVishal Moola (Oracle) 		VM_BUG_ON_FOLIO(!folio_test_anon(folio), folio);
585b46e756fSKirill A. Shutemov 
586d8ea7cc8SZach O'Keefe 		if (page_mapcount(page) > 1) {
587d8ea7cc8SZach O'Keefe 			++shared;
588d8ea7cc8SZach O'Keefe 			if (cc->is_khugepaged &&
589d8ea7cc8SZach O'Keefe 			    shared > khugepaged_max_ptes_shared) {
59071a2c112SKirill A. Shutemov 				result = SCAN_EXCEED_SHARED_PTE;
591e9ea874aSYang Yang 				count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
59271a2c112SKirill A. Shutemov 				goto out;
59371a2c112SKirill A. Shutemov 			}
594d8ea7cc8SZach O'Keefe 		}
59571a2c112SKirill A. Shutemov 
5968dd1e896SVishal Moola (Oracle) 		if (folio_test_large(folio)) {
5978dd1e896SVishal Moola (Oracle) 			struct folio *f;
5985503fbf2SKirill A. Shutemov 
5995503fbf2SKirill A. Shutemov 			/*
6005503fbf2SKirill A. Shutemov 			 * Check if we have dealt with the compound page
6015503fbf2SKirill A. Shutemov 			 * already
6025503fbf2SKirill A. Shutemov 			 */
6038dd1e896SVishal Moola (Oracle) 			list_for_each_entry(f, compound_pagelist, lru) {
6048dd1e896SVishal Moola (Oracle) 				if (folio == f)
6055503fbf2SKirill A. Shutemov 					goto next;
6065503fbf2SKirill A. Shutemov 			}
6075503fbf2SKirill A. Shutemov 		}
6085503fbf2SKirill A. Shutemov 
609b46e756fSKirill A. Shutemov 		/*
610b46e756fSKirill A. Shutemov 		 * We can do it before isolate_lru_page because the
611b46e756fSKirill A. Shutemov 		 * page can't be freed from under us. NOTE: PG_lock
612b46e756fSKirill A. Shutemov 		 * is needed to serialize against split_huge_page
613b46e756fSKirill A. Shutemov 		 * when invoked from the VM.
614b46e756fSKirill A. Shutemov 		 */
6158dd1e896SVishal Moola (Oracle) 		if (!folio_trylock(folio)) {
616b46e756fSKirill A. Shutemov 			result = SCAN_PAGE_LOCK;
617b46e756fSKirill A. Shutemov 			goto out;
618b46e756fSKirill A. Shutemov 		}
619b46e756fSKirill A. Shutemov 
620b46e756fSKirill A. Shutemov 		/*
6219445689fSKirill A. Shutemov 		 * Check if the page has any GUP (or other external) pins.
6229445689fSKirill A. Shutemov 		 *
6239445689fSKirill A. Shutemov 		 * The page table that maps the page has been already unlinked
6249445689fSKirill A. Shutemov 		 * from the page table tree and this process cannot get
625f0953a1bSIngo Molnar 		 * an additional pin on the page.
6269445689fSKirill A. Shutemov 		 *
6279445689fSKirill A. Shutemov 		 * New pins can come later if the page is shared across fork,
6289445689fSKirill A. Shutemov 		 * but not from this process. The other process cannot write to
6299445689fSKirill A. Shutemov 		 * the page, only trigger CoW.
630b46e756fSKirill A. Shutemov 		 */
631dbf85c21SVishal Moola (Oracle) 		if (!is_refcount_suitable(folio)) {
6328dd1e896SVishal Moola (Oracle) 			folio_unlock(folio);
633b46e756fSKirill A. Shutemov 			result = SCAN_PAGE_COUNT;
634b46e756fSKirill A. Shutemov 			goto out;
635b46e756fSKirill A. Shutemov 		}
636b46e756fSKirill A. Shutemov 
637b46e756fSKirill A. Shutemov 		/*
638b46e756fSKirill A. Shutemov 		 * Isolate the page to avoid collapsing an hugepage
639b46e756fSKirill A. Shutemov 		 * currently in use by the VM.
640b46e756fSKirill A. Shutemov 		 */
6418dd1e896SVishal Moola (Oracle) 		if (!folio_isolate_lru(folio)) {
6428dd1e896SVishal Moola (Oracle) 			folio_unlock(folio);
643b46e756fSKirill A. Shutemov 			result = SCAN_DEL_PAGE_LRU;
644b46e756fSKirill A. Shutemov 			goto out;
645b46e756fSKirill A. Shutemov 		}
6468dd1e896SVishal Moola (Oracle) 		node_stat_mod_folio(folio,
6478dd1e896SVishal Moola (Oracle) 				NR_ISOLATED_ANON + folio_is_file_lru(folio),
6488dd1e896SVishal Moola (Oracle) 				folio_nr_pages(folio));
6498dd1e896SVishal Moola (Oracle) 		VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
6508dd1e896SVishal Moola (Oracle) 		VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
651b46e756fSKirill A. Shutemov 
6528dd1e896SVishal Moola (Oracle) 		if (folio_test_large(folio))
6538dd1e896SVishal Moola (Oracle) 			list_add_tail(&folio->lru, compound_pagelist);
6545503fbf2SKirill A. Shutemov next:
655d8ea7cc8SZach O'Keefe 		/*
656d8ea7cc8SZach O'Keefe 		 * If collapse was initiated by khugepaged, check that there is
657d8ea7cc8SZach O'Keefe 		 * enough young pte to justify collapsing the page
658d8ea7cc8SZach O'Keefe 		 */
659d8ea7cc8SZach O'Keefe 		if (cc->is_khugepaged &&
6608dd1e896SVishal Moola (Oracle) 		    (pte_young(pteval) || folio_test_young(folio) ||
6618dd1e896SVishal Moola (Oracle) 		     folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
662d8ea7cc8SZach O'Keefe 								     address)))
6630db501f7SEbru Akagunduz 			referenced++;
6645503fbf2SKirill A. Shutemov 
6655503fbf2SKirill A. Shutemov 		if (pte_write(pteval))
6665503fbf2SKirill A. Shutemov 			writable = true;
667b46e756fSKirill A. Shutemov 	}
66874e579bfSMiaohe Lin 
66974e579bfSMiaohe Lin 	if (unlikely(!writable)) {
67074e579bfSMiaohe Lin 		result = SCAN_PAGE_RO;
671d8ea7cc8SZach O'Keefe 	} else if (unlikely(cc->is_khugepaged && !referenced)) {
67274e579bfSMiaohe Lin 		result = SCAN_LACK_REFERENCED_PAGE;
67374e579bfSMiaohe Lin 	} else {
674b46e756fSKirill A. Shutemov 		result = SCAN_SUCCEED;
6758dd1e896SVishal Moola (Oracle) 		trace_mm_collapse_huge_page_isolate(&folio->page, none_or_zero,
676b46e756fSKirill A. Shutemov 						    referenced, writable, result);
67750ad2f24SZach O'Keefe 		return result;
678b46e756fSKirill A. Shutemov 	}
679b46e756fSKirill A. Shutemov out:
6805503fbf2SKirill A. Shutemov 	release_pte_pages(pte, _pte, compound_pagelist);
6818dd1e896SVishal Moola (Oracle) 	trace_mm_collapse_huge_page_isolate(&folio->page, none_or_zero,
682b46e756fSKirill A. Shutemov 					    referenced, writable, result);
68350ad2f24SZach O'Keefe 	return result;
684b46e756fSKirill A. Shutemov }
685b46e756fSKirill A. Shutemov 
68698c76c9fSJiaqi Yan static void __collapse_huge_page_copy_succeeded(pte_t *pte,
687b46e756fSKirill A. Shutemov 						struct vm_area_struct *vma,
688b46e756fSKirill A. Shutemov 						unsigned long address,
6895503fbf2SKirill A. Shutemov 						spinlock_t *ptl,
6905503fbf2SKirill A. Shutemov 						struct list_head *compound_pagelist)
691b46e756fSKirill A. Shutemov {
692d4111eecSMatthew Wilcox (Oracle) 	struct folio *src, *tmp;
693b46e756fSKirill A. Shutemov 	pte_t *_pte;
69498c76c9fSJiaqi Yan 	pte_t pteval;
695b46e756fSKirill A. Shutemov 
69698c76c9fSJiaqi Yan 	for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
69798c76c9fSJiaqi Yan 	     _pte++, address += PAGE_SIZE) {
698c33c7948SRyan Roberts 		pteval = ptep_get(_pte);
699b46e756fSKirill A. Shutemov 		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
700b46e756fSKirill A. Shutemov 			add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
701b46e756fSKirill A. Shutemov 			if (is_zero_pfn(pte_pfn(pteval))) {
702b46e756fSKirill A. Shutemov 				/*
703b46e756fSKirill A. Shutemov 				 * ptl mostly unnecessary.
704b46e756fSKirill A. Shutemov 				 */
705b46e756fSKirill A. Shutemov 				spin_lock(ptl);
70608d5b29eSPasha Tatashin 				ptep_clear(vma->vm_mm, address, _pte);
707b46e756fSKirill A. Shutemov 				spin_unlock(ptl);
7086080d19fSxu xin 				ksm_might_unmap_zero_page(vma->vm_mm, pteval);
709b46e756fSKirill A. Shutemov 			}
710b46e756fSKirill A. Shutemov 		} else {
711d4111eecSMatthew Wilcox (Oracle) 			struct page *src_page = pte_page(pteval);
712d4111eecSMatthew Wilcox (Oracle) 
713d4111eecSMatthew Wilcox (Oracle) 			src = page_folio(src_page);
714d4111eecSMatthew Wilcox (Oracle) 			if (!folio_test_large(src))
715d4111eecSMatthew Wilcox (Oracle) 				release_pte_folio(src);
716b46e756fSKirill A. Shutemov 			/*
717b46e756fSKirill A. Shutemov 			 * ptl mostly unnecessary, but preempt has to
718b46e756fSKirill A. Shutemov 			 * be disabled to update the per-cpu stats
71935668a43SDavid Hildenbrand 			 * inside folio_remove_rmap_pte().
720b46e756fSKirill A. Shutemov 			 */
721b46e756fSKirill A. Shutemov 			spin_lock(ptl);
72208d5b29eSPasha Tatashin 			ptep_clear(vma->vm_mm, address, _pte);
723d4111eecSMatthew Wilcox (Oracle) 			folio_remove_rmap_pte(src, src_page, vma);
724b46e756fSKirill A. Shutemov 			spin_unlock(ptl);
725b46e756fSKirill A. Shutemov 			free_page_and_swap_cache(src_page);
726b46e756fSKirill A. Shutemov 		}
727b46e756fSKirill A. Shutemov 	}
7285503fbf2SKirill A. Shutemov 
729d4111eecSMatthew Wilcox (Oracle) 	list_for_each_entry_safe(src, tmp, compound_pagelist, lru) {
730d4111eecSMatthew Wilcox (Oracle) 		list_del(&src->lru);
731d4111eecSMatthew Wilcox (Oracle) 		node_stat_sub_folio(src, NR_ISOLATED_ANON +
732d4111eecSMatthew Wilcox (Oracle) 				folio_is_file_lru(src));
733d4111eecSMatthew Wilcox (Oracle) 		folio_unlock(src);
734*63b77499SMatthew Wilcox (Oracle) 		free_swap_cache(src);
735d4111eecSMatthew Wilcox (Oracle) 		folio_putback_lru(src);
7365503fbf2SKirill A. Shutemov 	}
737b46e756fSKirill A. Shutemov }
738b46e756fSKirill A. Shutemov 
73998c76c9fSJiaqi Yan static void __collapse_huge_page_copy_failed(pte_t *pte,
74098c76c9fSJiaqi Yan 					     pmd_t *pmd,
74198c76c9fSJiaqi Yan 					     pmd_t orig_pmd,
74298c76c9fSJiaqi Yan 					     struct vm_area_struct *vma,
74398c76c9fSJiaqi Yan 					     struct list_head *compound_pagelist)
74498c76c9fSJiaqi Yan {
74598c76c9fSJiaqi Yan 	spinlock_t *pmd_ptl;
74698c76c9fSJiaqi Yan 
74798c76c9fSJiaqi Yan 	/*
74898c76c9fSJiaqi Yan 	 * Re-establish the PMD to point to the original page table
74998c76c9fSJiaqi Yan 	 * entry. Restoring PMD needs to be done prior to releasing
75098c76c9fSJiaqi Yan 	 * pages. Since pages are still isolated and locked here,
75198c76c9fSJiaqi Yan 	 * acquiring anon_vma_lock_write is unnecessary.
75298c76c9fSJiaqi Yan 	 */
75398c76c9fSJiaqi Yan 	pmd_ptl = pmd_lock(vma->vm_mm, pmd);
75498c76c9fSJiaqi Yan 	pmd_populate(vma->vm_mm, pmd, pmd_pgtable(orig_pmd));
75598c76c9fSJiaqi Yan 	spin_unlock(pmd_ptl);
75698c76c9fSJiaqi Yan 	/*
75798c76c9fSJiaqi Yan 	 * Release both raw and compound pages isolated
75898c76c9fSJiaqi Yan 	 * in __collapse_huge_page_isolate.
75998c76c9fSJiaqi Yan 	 */
76098c76c9fSJiaqi Yan 	release_pte_pages(pte, pte + HPAGE_PMD_NR, compound_pagelist);
76198c76c9fSJiaqi Yan }
76298c76c9fSJiaqi Yan 
76398c76c9fSJiaqi Yan /*
76498c76c9fSJiaqi Yan  * __collapse_huge_page_copy - attempts to copy memory contents from raw
76598c76c9fSJiaqi Yan  * pages to a hugepage. Cleans up the raw pages if copying succeeds;
76698c76c9fSJiaqi Yan  * otherwise restores the original page table and releases isolated raw pages.
76798c76c9fSJiaqi Yan  * Returns SCAN_SUCCEED if copying succeeds, otherwise returns SCAN_COPY_MC.
76898c76c9fSJiaqi Yan  *
76998c76c9fSJiaqi Yan  * @pte: starting of the PTEs to copy from
77098c76c9fSJiaqi Yan  * @page: the new hugepage to copy contents to
77198c76c9fSJiaqi Yan  * @pmd: pointer to the new hugepage's PMD
77298c76c9fSJiaqi Yan  * @orig_pmd: the original raw pages' PMD
77398c76c9fSJiaqi Yan  * @vma: the original raw pages' virtual memory area
77498c76c9fSJiaqi Yan  * @address: starting address to copy
77598c76c9fSJiaqi Yan  * @ptl: lock on raw pages' PTEs
77698c76c9fSJiaqi Yan  * @compound_pagelist: list that stores compound pages
77798c76c9fSJiaqi Yan  */
77898c76c9fSJiaqi Yan static int __collapse_huge_page_copy(pte_t *pte,
77998c76c9fSJiaqi Yan 				     struct page *page,
78098c76c9fSJiaqi Yan 				     pmd_t *pmd,
78198c76c9fSJiaqi Yan 				     pmd_t orig_pmd,
78298c76c9fSJiaqi Yan 				     struct vm_area_struct *vma,
78398c76c9fSJiaqi Yan 				     unsigned long address,
78498c76c9fSJiaqi Yan 				     spinlock_t *ptl,
78598c76c9fSJiaqi Yan 				     struct list_head *compound_pagelist)
78698c76c9fSJiaqi Yan {
78798c76c9fSJiaqi Yan 	struct page *src_page;
78898c76c9fSJiaqi Yan 	pte_t *_pte;
78998c76c9fSJiaqi Yan 	pte_t pteval;
79098c76c9fSJiaqi Yan 	unsigned long _address;
79198c76c9fSJiaqi Yan 	int result = SCAN_SUCCEED;
79298c76c9fSJiaqi Yan 
79398c76c9fSJiaqi Yan 	/*
79498c76c9fSJiaqi Yan 	 * Copying pages' contents is subject to memory poison at any iteration.
79598c76c9fSJiaqi Yan 	 */
79698c76c9fSJiaqi Yan 	for (_pte = pte, _address = address; _pte < pte + HPAGE_PMD_NR;
79798c76c9fSJiaqi Yan 	     _pte++, page++, _address += PAGE_SIZE) {
798c33c7948SRyan Roberts 		pteval = ptep_get(_pte);
79998c76c9fSJiaqi Yan 		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
80098c76c9fSJiaqi Yan 			clear_user_highpage(page, _address);
80198c76c9fSJiaqi Yan 			continue;
80298c76c9fSJiaqi Yan 		}
80398c76c9fSJiaqi Yan 		src_page = pte_page(pteval);
80498c76c9fSJiaqi Yan 		if (copy_mc_user_highpage(page, src_page, _address, vma) > 0) {
80598c76c9fSJiaqi Yan 			result = SCAN_COPY_MC;
80698c76c9fSJiaqi Yan 			break;
80798c76c9fSJiaqi Yan 		}
80898c76c9fSJiaqi Yan 	}
80998c76c9fSJiaqi Yan 
81098c76c9fSJiaqi Yan 	if (likely(result == SCAN_SUCCEED))
81198c76c9fSJiaqi Yan 		__collapse_huge_page_copy_succeeded(pte, vma, address, ptl,
81298c76c9fSJiaqi Yan 						    compound_pagelist);
81398c76c9fSJiaqi Yan 	else
81498c76c9fSJiaqi Yan 		__collapse_huge_page_copy_failed(pte, pmd, orig_pmd, vma,
81598c76c9fSJiaqi Yan 						 compound_pagelist);
81698c76c9fSJiaqi Yan 
81798c76c9fSJiaqi Yan 	return result;
81898c76c9fSJiaqi Yan }
81998c76c9fSJiaqi Yan 
820b46e756fSKirill A. Shutemov static void khugepaged_alloc_sleep(void)
821b46e756fSKirill A. Shutemov {
822b46e756fSKirill A. Shutemov 	DEFINE_WAIT(wait);
823b46e756fSKirill A. Shutemov 
824b46e756fSKirill A. Shutemov 	add_wait_queue(&khugepaged_wait, &wait);
825f5d39b02SPeter Zijlstra 	__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
826f5d39b02SPeter Zijlstra 	schedule_timeout(msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
827b46e756fSKirill A. Shutemov 	remove_wait_queue(&khugepaged_wait, &wait);
828b46e756fSKirill A. Shutemov }
829b46e756fSKirill A. Shutemov 
83034d6b470SZach O'Keefe struct collapse_control khugepaged_collapse_control = {
831d8ea7cc8SZach O'Keefe 	.is_khugepaged = true,
83234d6b470SZach O'Keefe };
833b46e756fSKirill A. Shutemov 
8347d2c4385SZach O'Keefe static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
835b46e756fSKirill A. Shutemov {
836b46e756fSKirill A. Shutemov 	int i;
837b46e756fSKirill A. Shutemov 
838b46e756fSKirill A. Shutemov 	/*
839a5f5f91dSMel Gorman 	 * If node_reclaim_mode is disabled, then no extra effort is made to
840b46e756fSKirill A. Shutemov 	 * allocate memory locally.
841b46e756fSKirill A. Shutemov 	 */
842202e35dbSDave Hansen 	if (!node_reclaim_enabled())
843b46e756fSKirill A. Shutemov 		return false;
844b46e756fSKirill A. Shutemov 
845b46e756fSKirill A. Shutemov 	/* If there is a count for this node already, it must be acceptable */
84634d6b470SZach O'Keefe 	if (cc->node_load[nid])
847b46e756fSKirill A. Shutemov 		return false;
848b46e756fSKirill A. Shutemov 
849b46e756fSKirill A. Shutemov 	for (i = 0; i < MAX_NUMNODES; i++) {
85034d6b470SZach O'Keefe 		if (!cc->node_load[i])
851b46e756fSKirill A. Shutemov 			continue;
852a55c7454SMatt Fleming 		if (node_distance(nid, i) > node_reclaim_distance)
853b46e756fSKirill A. Shutemov 			return true;
854b46e756fSKirill A. Shutemov 	}
855b46e756fSKirill A. Shutemov 	return false;
856b46e756fSKirill A. Shutemov }
857b46e756fSKirill A. Shutemov 
8581064026bSYang Shi #define khugepaged_defrag()					\
8591064026bSYang Shi 	(transparent_hugepage_flags &				\
8601064026bSYang Shi 	 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
8611064026bSYang Shi 
862b46e756fSKirill A. Shutemov /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
863b46e756fSKirill A. Shutemov static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
864b46e756fSKirill A. Shutemov {
86525160354SVlastimil Babka 	return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
866b46e756fSKirill A. Shutemov }
867b46e756fSKirill A. Shutemov 
868b46e756fSKirill A. Shutemov #ifdef CONFIG_NUMA
8697d2c4385SZach O'Keefe static int hpage_collapse_find_target_node(struct collapse_control *cc)
870b46e756fSKirill A. Shutemov {
871b46e756fSKirill A. Shutemov 	int nid, target_node = 0, max_value = 0;
872b46e756fSKirill A. Shutemov 
873b46e756fSKirill A. Shutemov 	/* find first node with max normal pages hit */
874b46e756fSKirill A. Shutemov 	for (nid = 0; nid < MAX_NUMNODES; nid++)
87534d6b470SZach O'Keefe 		if (cc->node_load[nid] > max_value) {
87634d6b470SZach O'Keefe 			max_value = cc->node_load[nid];
877b46e756fSKirill A. Shutemov 			target_node = nid;
878b46e756fSKirill A. Shutemov 		}
879b46e756fSKirill A. Shutemov 
880e031ff96SYang Shi 	for_each_online_node(nid) {
881e031ff96SYang Shi 		if (max_value == cc->node_load[nid])
882e031ff96SYang Shi 			node_set(nid, cc->alloc_nmask);
883b46e756fSKirill A. Shutemov 	}
884b46e756fSKirill A. Shutemov 
885b46e756fSKirill A. Shutemov 	return target_node;
886b46e756fSKirill A. Shutemov }
887c6a7f445SYang Shi #else
8887d2c4385SZach O'Keefe static int hpage_collapse_find_target_node(struct collapse_control *cc)
889b46e756fSKirill A. Shutemov {
890c6a7f445SYang Shi 	return 0;
891b46e756fSKirill A. Shutemov }
892c6a7f445SYang Shi #endif
893b46e756fSKirill A. Shutemov 
894b455f39dSVishal Moola (Oracle) static bool hpage_collapse_alloc_folio(struct folio **folio, gfp_t gfp, int node,
895e031ff96SYang Shi 				      nodemask_t *nmask)
896b46e756fSKirill A. Shutemov {
897b455f39dSVishal Moola (Oracle) 	*folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, nmask);
898b455f39dSVishal Moola (Oracle) 
899b455f39dSVishal Moola (Oracle) 	if (unlikely(!*folio)) {
900b46e756fSKirill A. Shutemov 		count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
9019710a78aSZach O'Keefe 		return false;
902b46e756fSKirill A. Shutemov 	}
903b46e756fSKirill A. Shutemov 
904b46e756fSKirill A. Shutemov 	count_vm_event(THP_COLLAPSE_ALLOC);
905b46e756fSKirill A. Shutemov 	return true;
906b46e756fSKirill A. Shutemov }
907b46e756fSKirill A. Shutemov 
908b46e756fSKirill A. Shutemov /*
909c1e8d7c6SMichel Lespinasse  * If mmap_lock temporarily dropped, revalidate vma
910c1e8d7c6SMichel Lespinasse  * before taking mmap_lock.
91150ad2f24SZach O'Keefe  * Returns enum scan_result value.
912b46e756fSKirill A. Shutemov  */
913b46e756fSKirill A. Shutemov 
914c131f751SKirill A. Shutemov static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
91534488399SZach O'Keefe 				   bool expect_anon,
916a7f4e6e4SZach O'Keefe 				   struct vm_area_struct **vmap,
917a7f4e6e4SZach O'Keefe 				   struct collapse_control *cc)
918b46e756fSKirill A. Shutemov {
919b46e756fSKirill A. Shutemov 	struct vm_area_struct *vma;
920b46e756fSKirill A. Shutemov 
9215dad6048SLance Yang 	if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
922b46e756fSKirill A. Shutemov 		return SCAN_ANY_PROCESS;
923b46e756fSKirill A. Shutemov 
924c131f751SKirill A. Shutemov 	*vmap = vma = find_vma(mm, address);
925b46e756fSKirill A. Shutemov 	if (!vma)
926b46e756fSKirill A. Shutemov 		return SCAN_VMA_NULL;
927b46e756fSKirill A. Shutemov 
9283485b883SRyan Roberts 	if (!thp_vma_suitable_order(vma, address, PMD_ORDER))
929b46e756fSKirill A. Shutemov 		return SCAN_ADDRESS_RANGE;
9303485b883SRyan Roberts 	if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
9313485b883SRyan Roberts 				     cc->is_khugepaged, PMD_ORDER))
932b46e756fSKirill A. Shutemov 		return SCAN_VMA_CHECK;
933f707fa49SYang Shi 	/*
934f707fa49SYang Shi 	 * Anon VMA expected, the address may be unmapped then
935f707fa49SYang Shi 	 * remapped to file after khugepaged reaquired the mmap_lock.
936f707fa49SYang Shi 	 *
9373485b883SRyan Roberts 	 * thp_vma_allowable_order may return true for qualified file
938f707fa49SYang Shi 	 * vmas.
939f707fa49SYang Shi 	 */
94034488399SZach O'Keefe 	if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
94134488399SZach O'Keefe 		return SCAN_PAGE_ANON;
94250ad2f24SZach O'Keefe 	return SCAN_SUCCEED;
943b46e756fSKirill A. Shutemov }
944b46e756fSKirill A. Shutemov 
94550722804SZach O'Keefe static int find_pmd_or_thp_or_none(struct mm_struct *mm,
94650722804SZach O'Keefe 				   unsigned long address,
94750722804SZach O'Keefe 				   pmd_t **pmd)
94850722804SZach O'Keefe {
94950722804SZach O'Keefe 	pmd_t pmde;
95050722804SZach O'Keefe 
95150722804SZach O'Keefe 	*pmd = mm_find_pmd(mm, address);
95250722804SZach O'Keefe 	if (!*pmd)
95350722804SZach O'Keefe 		return SCAN_PMD_NULL;
95450722804SZach O'Keefe 
955dab6e717SPeter Zijlstra 	pmde = pmdp_get_lockless(*pmd);
95634488399SZach O'Keefe 	if (pmd_none(pmde))
95734488399SZach O'Keefe 		return SCAN_PMD_NONE;
958edb5d0cfSZach O'Keefe 	if (!pmd_present(pmde))
959edb5d0cfSZach O'Keefe 		return SCAN_PMD_NULL;
96050722804SZach O'Keefe 	if (pmd_trans_huge(pmde))
96150722804SZach O'Keefe 		return SCAN_PMD_MAPPED;
962edb5d0cfSZach O'Keefe 	if (pmd_devmap(pmde))
963edb5d0cfSZach O'Keefe 		return SCAN_PMD_NULL;
96450722804SZach O'Keefe 	if (pmd_bad(pmde))
96550722804SZach O'Keefe 		return SCAN_PMD_NULL;
96650722804SZach O'Keefe 	return SCAN_SUCCEED;
96750722804SZach O'Keefe }
96850722804SZach O'Keefe 
96950722804SZach O'Keefe static int check_pmd_still_valid(struct mm_struct *mm,
97050722804SZach O'Keefe 				 unsigned long address,
97150722804SZach O'Keefe 				 pmd_t *pmd)
97250722804SZach O'Keefe {
97350722804SZach O'Keefe 	pmd_t *new_pmd;
97450722804SZach O'Keefe 	int result = find_pmd_or_thp_or_none(mm, address, &new_pmd);
97550722804SZach O'Keefe 
97650722804SZach O'Keefe 	if (result != SCAN_SUCCEED)
97750722804SZach O'Keefe 		return result;
97850722804SZach O'Keefe 	if (new_pmd != pmd)
97950722804SZach O'Keefe 		return SCAN_FAIL;
98050722804SZach O'Keefe 	return SCAN_SUCCEED;
981b46e756fSKirill A. Shutemov }
982b46e756fSKirill A. Shutemov 
983b46e756fSKirill A. Shutemov /*
984b46e756fSKirill A. Shutemov  * Bring missing pages in from swap, to complete THP collapse.
9857d2c4385SZach O'Keefe  * Only done if hpage_collapse_scan_pmd believes it is worthwhile.
986b46e756fSKirill A. Shutemov  *
9874d928e20SMiaohe Lin  * Called and returns without pte mapped or spinlocks held.
988895f5ee4SHugh Dickins  * Returns result: if not SCAN_SUCCEED, mmap_lock has been released.
989b46e756fSKirill A. Shutemov  */
99050ad2f24SZach O'Keefe static int __collapse_huge_page_swapin(struct mm_struct *mm,
991b46e756fSKirill A. Shutemov 				       struct vm_area_struct *vma,
9922b635dd3SWill Deacon 				       unsigned long haddr, pmd_t *pmd,
9930db501f7SEbru Akagunduz 				       int referenced)
994b46e756fSKirill A. Shutemov {
9952b740303SSouptick Joarder 	int swapped_in = 0;
9962b740303SSouptick Joarder 	vm_fault_t ret = 0;
9972b635dd3SWill Deacon 	unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
998895f5ee4SHugh Dickins 	int result;
999895f5ee4SHugh Dickins 	pte_t *pte = NULL;
1000c7ad0880SHugh Dickins 	spinlock_t *ptl;
10012b635dd3SWill Deacon 
10022b635dd3SWill Deacon 	for (address = haddr; address < end; address += PAGE_SIZE) {
100382b0f8c3SJan Kara 		struct vm_fault vmf = {
1004b46e756fSKirill A. Shutemov 			.vma = vma,
1005b46e756fSKirill A. Shutemov 			.address = address,
1006895f5ee4SHugh Dickins 			.pgoff = linear_page_index(vma, address),
1007b46e756fSKirill A. Shutemov 			.flags = FAULT_FLAG_ALLOW_RETRY,
1008b46e756fSKirill A. Shutemov 			.pmd = pmd,
1009b46e756fSKirill A. Shutemov 		};
1010b46e756fSKirill A. Shutemov 
1011895f5ee4SHugh Dickins 		if (!pte++) {
1012c7ad0880SHugh Dickins 			pte = pte_offset_map_nolock(mm, pmd, address, &ptl);
1013895f5ee4SHugh Dickins 			if (!pte) {
1014895f5ee4SHugh Dickins 				mmap_read_unlock(mm);
1015895f5ee4SHugh Dickins 				result = SCAN_PMD_NULL;
1016895f5ee4SHugh Dickins 				goto out;
10172b635dd3SWill Deacon 			}
1018895f5ee4SHugh Dickins 		}
1019895f5ee4SHugh Dickins 
1020c7ad0880SHugh Dickins 		vmf.orig_pte = ptep_get_lockless(pte);
1021895f5ee4SHugh Dickins 		if (!is_swap_pte(vmf.orig_pte))
1022895f5ee4SHugh Dickins 			continue;
1023895f5ee4SHugh Dickins 
1024895f5ee4SHugh Dickins 		vmf.pte = pte;
1025c7ad0880SHugh Dickins 		vmf.ptl = ptl;
10262994302bSJan Kara 		ret = do_swap_page(&vmf);
1027895f5ee4SHugh Dickins 		/* Which unmaps pte (after perhaps re-checking the entry) */
1028895f5ee4SHugh Dickins 		pte = NULL;
10290db501f7SEbru Akagunduz 
10304d928e20SMiaohe Lin 		/*
10314d928e20SMiaohe Lin 		 * do_swap_page returns VM_FAULT_RETRY with released mmap_lock.
10324d928e20SMiaohe Lin 		 * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because
10334d928e20SMiaohe Lin 		 * we do not retry here and swap entry will remain in pagetable
10344d928e20SMiaohe Lin 		 * resulting in later failure.
10354d928e20SMiaohe Lin 		 */
1036b46e756fSKirill A. Shutemov 		if (ret & VM_FAULT_RETRY) {
103750ad2f24SZach O'Keefe 			/* Likely, but not guaranteed, that page lock failed */
1038895f5ee4SHugh Dickins 			result = SCAN_PAGE_LOCK;
1039895f5ee4SHugh Dickins 			goto out;
104047f863eaSEbru Akagunduz 		}
1041b46e756fSKirill A. Shutemov 		if (ret & VM_FAULT_ERROR) {
10424d928e20SMiaohe Lin 			mmap_read_unlock(mm);
1043895f5ee4SHugh Dickins 			result = SCAN_FAIL;
1044895f5ee4SHugh Dickins 			goto out;
1045b46e756fSKirill A. Shutemov 		}
10464d928e20SMiaohe Lin 		swapped_in++;
1047b46e756fSKirill A. Shutemov 	}
1048ae2c5d80SKirill A. Shutemov 
1049895f5ee4SHugh Dickins 	if (pte)
1050895f5ee4SHugh Dickins 		pte_unmap(pte);
1051895f5ee4SHugh Dickins 
10521fec6890SMatthew Wilcox (Oracle) 	/* Drain LRU cache to remove extra pin on the swapped in pages */
1053ae2c5d80SKirill A. Shutemov 	if (swapped_in)
1054ae2c5d80SKirill A. Shutemov 		lru_add_drain();
1055ae2c5d80SKirill A. Shutemov 
1056895f5ee4SHugh Dickins 	result = SCAN_SUCCEED;
1057895f5ee4SHugh Dickins out:
1058895f5ee4SHugh Dickins 	trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, result);
1059895f5ee4SHugh Dickins 	return result;
1060b46e756fSKirill A. Shutemov }
1061b46e756fSKirill A. Shutemov 
10629710a78aSZach O'Keefe static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
10639710a78aSZach O'Keefe 			      struct collapse_control *cc)
10649710a78aSZach O'Keefe {
10657d8faaf1SZach O'Keefe 	gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
1066e031ff96SYang Shi 		     GFP_TRANSHUGE);
10677d2c4385SZach O'Keefe 	int node = hpage_collapse_find_target_node(cc);
106894c02ad7SPeter Xu 	struct folio *folio;
10699710a78aSZach O'Keefe 
1070b455f39dSVishal Moola (Oracle) 	if (!hpage_collapse_alloc_folio(&folio, gfp, node, &cc->alloc_nmask)) {
1071b455f39dSVishal Moola (Oracle) 		*hpage = NULL;
10729710a78aSZach O'Keefe 		return SCAN_ALLOC_HUGE_PAGE_FAIL;
1073b455f39dSVishal Moola (Oracle) 	}
107494c02ad7SPeter Xu 
107594c02ad7SPeter Xu 	if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
107694c02ad7SPeter Xu 		folio_put(folio);
107794c02ad7SPeter Xu 		*hpage = NULL;
10789710a78aSZach O'Keefe 		return SCAN_CGROUP_CHARGE_FAIL;
107994c02ad7SPeter Xu 	}
108094c02ad7SPeter Xu 
1081b455f39dSVishal Moola (Oracle) 	count_memcg_folio_events(folio, THP_COLLAPSE_ALLOC, 1);
1082b455f39dSVishal Moola (Oracle) 
1083b455f39dSVishal Moola (Oracle) 	*hpage = folio_page(folio, 0);
10849710a78aSZach O'Keefe 	return SCAN_SUCCEED;
10859710a78aSZach O'Keefe }
10869710a78aSZach O'Keefe 
108750ad2f24SZach O'Keefe static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
108850ad2f24SZach O'Keefe 			      int referenced, int unmapped,
108950ad2f24SZach O'Keefe 			      struct collapse_control *cc)
1090b46e756fSKirill A. Shutemov {
10915503fbf2SKirill A. Shutemov 	LIST_HEAD(compound_pagelist);
1092b46e756fSKirill A. Shutemov 	pmd_t *pmd, _pmd;
1093b46e756fSKirill A. Shutemov 	pte_t *pte;
1094b46e756fSKirill A. Shutemov 	pgtable_t pgtable;
109554327268SMatthew Wilcox (Oracle) 	struct folio *folio;
109650ad2f24SZach O'Keefe 	struct page *hpage;
1097b46e756fSKirill A. Shutemov 	spinlock_t *pmd_ptl, *pte_ptl;
109850ad2f24SZach O'Keefe 	int result = SCAN_FAIL;
1099c131f751SKirill A. Shutemov 	struct vm_area_struct *vma;
1100ac46d4f3SJérôme Glisse 	struct mmu_notifier_range range;
1101b46e756fSKirill A. Shutemov 
1102b46e756fSKirill A. Shutemov 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1103b46e756fSKirill A. Shutemov 
1104988ddb71SKirill A. Shutemov 	/*
1105c1e8d7c6SMichel Lespinasse 	 * Before allocating the hugepage, release the mmap_lock read lock.
1106988ddb71SKirill A. Shutemov 	 * The allocation can take potentially a long time if it involves
1107c1e8d7c6SMichel Lespinasse 	 * sync compaction, and we do not need to hold the mmap_lock during
1108988ddb71SKirill A. Shutemov 	 * that. We will recheck the vma after taking it again in write mode.
1109988ddb71SKirill A. Shutemov 	 */
1110d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
1111b46e756fSKirill A. Shutemov 
111250ad2f24SZach O'Keefe 	result = alloc_charge_hpage(&hpage, mm, cc);
11139710a78aSZach O'Keefe 	if (result != SCAN_SUCCEED)
1114b46e756fSKirill A. Shutemov 		goto out_nolock;
1115b46e756fSKirill A. Shutemov 
1116d8ed45c5SMichel Lespinasse 	mmap_read_lock(mm);
111734488399SZach O'Keefe 	result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
111850ad2f24SZach O'Keefe 	if (result != SCAN_SUCCEED) {
1119d8ed45c5SMichel Lespinasse 		mmap_read_unlock(mm);
1120b46e756fSKirill A. Shutemov 		goto out_nolock;
1121b46e756fSKirill A. Shutemov 	}
1122b46e756fSKirill A. Shutemov 
112350722804SZach O'Keefe 	result = find_pmd_or_thp_or_none(mm, address, &pmd);
112450722804SZach O'Keefe 	if (result != SCAN_SUCCEED) {
1125d8ed45c5SMichel Lespinasse 		mmap_read_unlock(mm);
1126b46e756fSKirill A. Shutemov 		goto out_nolock;
1127b46e756fSKirill A. Shutemov 	}
1128b46e756fSKirill A. Shutemov 
112950ad2f24SZach O'Keefe 	if (unmapped) {
1130b46e756fSKirill A. Shutemov 		/*
113150ad2f24SZach O'Keefe 		 * __collapse_huge_page_swapin will return with mmap_lock
113250ad2f24SZach O'Keefe 		 * released when it fails. So we jump out_nolock directly in
113350ad2f24SZach O'Keefe 		 * that case.  Continuing to collapse causes inconsistency.
1134b46e756fSKirill A. Shutemov 		 */
113550ad2f24SZach O'Keefe 		result = __collapse_huge_page_swapin(mm, vma, address, pmd,
113650ad2f24SZach O'Keefe 						     referenced);
113750ad2f24SZach O'Keefe 		if (result != SCAN_SUCCEED)
1138b46e756fSKirill A. Shutemov 			goto out_nolock;
1139b46e756fSKirill A. Shutemov 	}
1140b46e756fSKirill A. Shutemov 
1141d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
1142b46e756fSKirill A. Shutemov 	/*
1143b46e756fSKirill A. Shutemov 	 * Prevent all access to pagetables with the exception of
1144b46e756fSKirill A. Shutemov 	 * gup_fast later handled by the ptep_clear_flush and the VM
1145b46e756fSKirill A. Shutemov 	 * handled by the anon_vma lock + PG_lock.
1146adef4406SAndrea Arcangeli 	 *
1147adef4406SAndrea Arcangeli 	 * UFFDIO_MOVE is prevented to race as well thanks to the
1148adef4406SAndrea Arcangeli 	 * mmap_lock.
1149b46e756fSKirill A. Shutemov 	 */
1150d8ed45c5SMichel Lespinasse 	mmap_write_lock(mm);
115134488399SZach O'Keefe 	result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
115250ad2f24SZach O'Keefe 	if (result != SCAN_SUCCEED)
115318d24a7cSMiaohe Lin 		goto out_up_write;
1154b46e756fSKirill A. Shutemov 	/* check if the pmd is still valid */
115550722804SZach O'Keefe 	result = check_pmd_still_valid(mm, address, pmd);
115650722804SZach O'Keefe 	if (result != SCAN_SUCCEED)
115718d24a7cSMiaohe Lin 		goto out_up_write;
1158b46e756fSKirill A. Shutemov 
115955fd6fccSSuren Baghdasaryan 	vma_start_write(vma);
1160b46e756fSKirill A. Shutemov 	anon_vma_lock_write(vma->anon_vma);
1161b46e756fSKirill A. Shutemov 
11627d4a8be0SAlistair Popple 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address,
11637d4a8be0SAlistair Popple 				address + HPAGE_PMD_SIZE);
1164ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_start(&range);
1165ec649c9dSVille Syrjälä 
1166b46e756fSKirill A. Shutemov 	pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1167b46e756fSKirill A. Shutemov 	/*
116870cbc3ccSYang Shi 	 * This removes any huge TLB entry from the CPU so we won't allow
116970cbc3ccSYang Shi 	 * huge and small TLB entries for the same virtual address to
117070cbc3ccSYang Shi 	 * avoid the risk of CPU bugs in that area.
117170cbc3ccSYang Shi 	 *
117270cbc3ccSYang Shi 	 * Parallel fast GUP is fine since fast GUP will back off when
117370cbc3ccSYang Shi 	 * it detects PMD is changed.
1174b46e756fSKirill A. Shutemov 	 */
1175b46e756fSKirill A. Shutemov 	_pmd = pmdp_collapse_flush(vma, address, pmd);
1176b46e756fSKirill A. Shutemov 	spin_unlock(pmd_ptl);
1177ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_end(&range);
11782ba99c5eSJann Horn 	tlb_remove_table_sync_one();
1179b46e756fSKirill A. Shutemov 
1180895f5ee4SHugh Dickins 	pte = pte_offset_map_lock(mm, &_pmd, address, &pte_ptl);
1181895f5ee4SHugh Dickins 	if (pte) {
1182d8ea7cc8SZach O'Keefe 		result = __collapse_huge_page_isolate(vma, address, pte, cc,
11835503fbf2SKirill A. Shutemov 						      &compound_pagelist);
1184b46e756fSKirill A. Shutemov 		spin_unlock(pte_ptl);
1185895f5ee4SHugh Dickins 	} else {
1186895f5ee4SHugh Dickins 		result = SCAN_PMD_NULL;
1187895f5ee4SHugh Dickins 	}
1188b46e756fSKirill A. Shutemov 
118950ad2f24SZach O'Keefe 	if (unlikely(result != SCAN_SUCCEED)) {
1190895f5ee4SHugh Dickins 		if (pte)
1191b46e756fSKirill A. Shutemov 			pte_unmap(pte);
1192b46e756fSKirill A. Shutemov 		spin_lock(pmd_ptl);
1193b46e756fSKirill A. Shutemov 		BUG_ON(!pmd_none(*pmd));
1194b46e756fSKirill A. Shutemov 		/*
1195b46e756fSKirill A. Shutemov 		 * We can only use set_pmd_at when establishing
1196b46e756fSKirill A. Shutemov 		 * hugepmds and never for establishing regular pmds that
1197b46e756fSKirill A. Shutemov 		 * points to regular pagetables. Use pmd_populate for that
1198b46e756fSKirill A. Shutemov 		 */
1199b46e756fSKirill A. Shutemov 		pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1200b46e756fSKirill A. Shutemov 		spin_unlock(pmd_ptl);
1201b46e756fSKirill A. Shutemov 		anon_vma_unlock_write(vma->anon_vma);
120218d24a7cSMiaohe Lin 		goto out_up_write;
1203b46e756fSKirill A. Shutemov 	}
1204b46e756fSKirill A. Shutemov 
1205b46e756fSKirill A. Shutemov 	/*
1206b46e756fSKirill A. Shutemov 	 * All pages are isolated and locked so anon_vma rmap
1207b46e756fSKirill A. Shutemov 	 * can't run anymore.
1208b46e756fSKirill A. Shutemov 	 */
1209b46e756fSKirill A. Shutemov 	anon_vma_unlock_write(vma->anon_vma);
1210b46e756fSKirill A. Shutemov 
121198c76c9fSJiaqi Yan 	result = __collapse_huge_page_copy(pte, hpage, pmd, _pmd,
121298c76c9fSJiaqi Yan 					   vma, address, pte_ptl,
12135503fbf2SKirill A. Shutemov 					   &compound_pagelist);
1214b46e756fSKirill A. Shutemov 	pte_unmap(pte);
121598c76c9fSJiaqi Yan 	if (unlikely(result != SCAN_SUCCEED))
121698c76c9fSJiaqi Yan 		goto out_up_write;
121798c76c9fSJiaqi Yan 
121854327268SMatthew Wilcox (Oracle) 	folio = page_folio(hpage);
1219588d01f9SMiaohe Lin 	/*
122054327268SMatthew Wilcox (Oracle) 	 * The smp_wmb() inside __folio_mark_uptodate() ensures the
122154327268SMatthew Wilcox (Oracle) 	 * copy_huge_page writes become visible before the set_pmd_at()
122254327268SMatthew Wilcox (Oracle) 	 * write.
1223588d01f9SMiaohe Lin 	 */
122454327268SMatthew Wilcox (Oracle) 	__folio_mark_uptodate(folio);
1225b46e756fSKirill A. Shutemov 	pgtable = pmd_pgtable(_pmd);
1226b46e756fSKirill A. Shutemov 
122750ad2f24SZach O'Keefe 	_pmd = mk_huge_pmd(hpage, vma->vm_page_prot);
1228f55e1014SLinus Torvalds 	_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1229b46e756fSKirill A. Shutemov 
1230b46e756fSKirill A. Shutemov 	spin_lock(pmd_ptl);
1231b46e756fSKirill A. Shutemov 	BUG_ON(!pmd_none(*pmd));
123254327268SMatthew Wilcox (Oracle) 	folio_add_new_anon_rmap(folio, vma, address);
123354327268SMatthew Wilcox (Oracle) 	folio_add_lru_vma(folio, vma);
1234b46e756fSKirill A. Shutemov 	pgtable_trans_huge_deposit(mm, pmd, pgtable);
1235b46e756fSKirill A. Shutemov 	set_pmd_at(mm, address, pmd, _pmd);
1236b46e756fSKirill A. Shutemov 	update_mmu_cache_pmd(vma, address, pmd);
1237b46e756fSKirill A. Shutemov 	spin_unlock(pmd_ptl);
1238b46e756fSKirill A. Shutemov 
123950ad2f24SZach O'Keefe 	hpage = NULL;
1240b46e756fSKirill A. Shutemov 
1241b46e756fSKirill A. Shutemov 	result = SCAN_SUCCEED;
1242b46e756fSKirill A. Shutemov out_up_write:
1243d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
1244b46e756fSKirill A. Shutemov out_nolock:
12457cb1d7efSPeter Xu 	if (hpage)
124650ad2f24SZach O'Keefe 		put_page(hpage);
124750ad2f24SZach O'Keefe 	trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result);
124850ad2f24SZach O'Keefe 	return result;
1249b46e756fSKirill A. Shutemov }
1250b46e756fSKirill A. Shutemov 
12517d2c4385SZach O'Keefe static int hpage_collapse_scan_pmd(struct mm_struct *mm,
1252b46e756fSKirill A. Shutemov 				   struct vm_area_struct *vma,
125350ad2f24SZach O'Keefe 				   unsigned long address, bool *mmap_locked,
125434d6b470SZach O'Keefe 				   struct collapse_control *cc)
1255b46e756fSKirill A. Shutemov {
1256b46e756fSKirill A. Shutemov 	pmd_t *pmd;
1257b46e756fSKirill A. Shutemov 	pte_t *pte, *_pte;
125850ad2f24SZach O'Keefe 	int result = SCAN_FAIL, referenced = 0;
125971a2c112SKirill A. Shutemov 	int none_or_zero = 0, shared = 0;
1260b46e756fSKirill A. Shutemov 	struct page *page = NULL;
12615c07ebb3SVishal Moola (Oracle) 	struct folio *folio = NULL;
1262b46e756fSKirill A. Shutemov 	unsigned long _address;
1263b46e756fSKirill A. Shutemov 	spinlock_t *ptl;
1264b46e756fSKirill A. Shutemov 	int node = NUMA_NO_NODE, unmapped = 0;
12650db501f7SEbru Akagunduz 	bool writable = false;
1266b46e756fSKirill A. Shutemov 
1267b46e756fSKirill A. Shutemov 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1268b46e756fSKirill A. Shutemov 
126950722804SZach O'Keefe 	result = find_pmd_or_thp_or_none(mm, address, &pmd);
127050722804SZach O'Keefe 	if (result != SCAN_SUCCEED)
1271b46e756fSKirill A. Shutemov 		goto out;
1272b46e756fSKirill A. Shutemov 
127334d6b470SZach O'Keefe 	memset(cc->node_load, 0, sizeof(cc->node_load));
1274e031ff96SYang Shi 	nodes_clear(cc->alloc_nmask);
1275b46e756fSKirill A. Shutemov 	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1276895f5ee4SHugh Dickins 	if (!pte) {
1277895f5ee4SHugh Dickins 		result = SCAN_PMD_NULL;
1278895f5ee4SHugh Dickins 		goto out;
1279895f5ee4SHugh Dickins 	}
1280895f5ee4SHugh Dickins 
1281b46e756fSKirill A. Shutemov 	for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
1282b46e756fSKirill A. Shutemov 	     _pte++, _address += PAGE_SIZE) {
1283c33c7948SRyan Roberts 		pte_t pteval = ptep_get(_pte);
1284b46e756fSKirill A. Shutemov 		if (is_swap_pte(pteval)) {
1285d8ea7cc8SZach O'Keefe 			++unmapped;
1286d8ea7cc8SZach O'Keefe 			if (!cc->is_khugepaged ||
1287d8ea7cc8SZach O'Keefe 			    unmapped <= khugepaged_max_ptes_swap) {
1288e1e267c7SPeter Xu 				/*
1289e1e267c7SPeter Xu 				 * Always be strict with uffd-wp
1290e1e267c7SPeter Xu 				 * enabled swap entries.  Please see
1291e1e267c7SPeter Xu 				 * comment below for pte_uffd_wp().
1292e1e267c7SPeter Xu 				 */
12932bad466cSPeter Xu 				if (pte_swp_uffd_wp_any(pteval)) {
1294e1e267c7SPeter Xu 					result = SCAN_PTE_UFFD_WP;
1295e1e267c7SPeter Xu 					goto out_unmap;
1296e1e267c7SPeter Xu 				}
1297b46e756fSKirill A. Shutemov 				continue;
1298b46e756fSKirill A. Shutemov 			} else {
1299b46e756fSKirill A. Shutemov 				result = SCAN_EXCEED_SWAP_PTE;
1300e9ea874aSYang Yang 				count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
1301b46e756fSKirill A. Shutemov 				goto out_unmap;
1302b46e756fSKirill A. Shutemov 			}
1303b46e756fSKirill A. Shutemov 		}
1304b46e756fSKirill A. Shutemov 		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1305d8ea7cc8SZach O'Keefe 			++none_or_zero;
1306b46e756fSKirill A. Shutemov 			if (!userfaultfd_armed(vma) &&
1307d8ea7cc8SZach O'Keefe 			    (!cc->is_khugepaged ||
1308d8ea7cc8SZach O'Keefe 			     none_or_zero <= khugepaged_max_ptes_none)) {
1309b46e756fSKirill A. Shutemov 				continue;
1310b46e756fSKirill A. Shutemov 			} else {
1311b46e756fSKirill A. Shutemov 				result = SCAN_EXCEED_NONE_PTE;
1312e9ea874aSYang Yang 				count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
1313b46e756fSKirill A. Shutemov 				goto out_unmap;
1314b46e756fSKirill A. Shutemov 			}
1315b46e756fSKirill A. Shutemov 		}
1316e1e267c7SPeter Xu 		if (pte_uffd_wp(pteval)) {
1317e1e267c7SPeter Xu 			/*
1318e1e267c7SPeter Xu 			 * Don't collapse the page if any of the small
1319e1e267c7SPeter Xu 			 * PTEs are armed with uffd write protection.
1320e1e267c7SPeter Xu 			 * Here we can also mark the new huge pmd as
1321e1e267c7SPeter Xu 			 * write protected if any of the small ones is
13228958b249SHaitao Shi 			 * marked but that could bring unknown
1323e1e267c7SPeter Xu 			 * userfault messages that falls outside of
1324e1e267c7SPeter Xu 			 * the registered range.  So, just be simple.
1325e1e267c7SPeter Xu 			 */
1326e1e267c7SPeter Xu 			result = SCAN_PTE_UFFD_WP;
1327e1e267c7SPeter Xu 			goto out_unmap;
1328e1e267c7SPeter Xu 		}
1329b46e756fSKirill A. Shutemov 		if (pte_write(pteval))
1330b46e756fSKirill A. Shutemov 			writable = true;
1331b46e756fSKirill A. Shutemov 
1332b46e756fSKirill A. Shutemov 		page = vm_normal_page(vma, _address, pteval);
13333218f871SAlex Sierra 		if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
1334b46e756fSKirill A. Shutemov 			result = SCAN_PAGE_NULL;
1335b46e756fSKirill A. Shutemov 			goto out_unmap;
1336b46e756fSKirill A. Shutemov 		}
1337b46e756fSKirill A. Shutemov 
1338d8ea7cc8SZach O'Keefe 		if (page_mapcount(page) > 1) {
1339d8ea7cc8SZach O'Keefe 			++shared;
1340d8ea7cc8SZach O'Keefe 			if (cc->is_khugepaged &&
1341d8ea7cc8SZach O'Keefe 			    shared > khugepaged_max_ptes_shared) {
134271a2c112SKirill A. Shutemov 				result = SCAN_EXCEED_SHARED_PTE;
1343e9ea874aSYang Yang 				count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
134471a2c112SKirill A. Shutemov 				goto out_unmap;
134571a2c112SKirill A. Shutemov 			}
1346d8ea7cc8SZach O'Keefe 		}
134771a2c112SKirill A. Shutemov 
13485c07ebb3SVishal Moola (Oracle) 		folio = page_folio(page);
1349b46e756fSKirill A. Shutemov 		/*
1350b46e756fSKirill A. Shutemov 		 * Record which node the original page is from and save this
135134d6b470SZach O'Keefe 		 * information to cc->node_load[].
13520b8f0d87SQuanfa Fu 		 * Khugepaged will allocate hugepage from the node has the max
1353b46e756fSKirill A. Shutemov 		 * hit record.
1354b46e756fSKirill A. Shutemov 		 */
13555c07ebb3SVishal Moola (Oracle) 		node = folio_nid(folio);
13567d2c4385SZach O'Keefe 		if (hpage_collapse_scan_abort(node, cc)) {
1357b46e756fSKirill A. Shutemov 			result = SCAN_SCAN_ABORT;
1358b46e756fSKirill A. Shutemov 			goto out_unmap;
1359b46e756fSKirill A. Shutemov 		}
136034d6b470SZach O'Keefe 		cc->node_load[node]++;
13615c07ebb3SVishal Moola (Oracle) 		if (!folio_test_lru(folio)) {
1362b46e756fSKirill A. Shutemov 			result = SCAN_PAGE_LRU;
1363b46e756fSKirill A. Shutemov 			goto out_unmap;
1364b46e756fSKirill A. Shutemov 		}
13655c07ebb3SVishal Moola (Oracle) 		if (folio_test_locked(folio)) {
1366b46e756fSKirill A. Shutemov 			result = SCAN_PAGE_LOCK;
1367b46e756fSKirill A. Shutemov 			goto out_unmap;
1368b46e756fSKirill A. Shutemov 		}
13695c07ebb3SVishal Moola (Oracle) 		if (!folio_test_anon(folio)) {
1370b46e756fSKirill A. Shutemov 			result = SCAN_PAGE_ANON;
1371b46e756fSKirill A. Shutemov 			goto out_unmap;
1372b46e756fSKirill A. Shutemov 		}
1373b46e756fSKirill A. Shutemov 
1374b46e756fSKirill A. Shutemov 		/*
13759445689fSKirill A. Shutemov 		 * Check if the page has any GUP (or other external) pins.
13769445689fSKirill A. Shutemov 		 *
1377cb67f428SHugh Dickins 		 * Here the check may be racy:
1378cb67f428SHugh Dickins 		 * it may see total_mapcount > refcount in some cases?
13799445689fSKirill A. Shutemov 		 * But such case is ephemeral we could always retry collapse
13809445689fSKirill A. Shutemov 		 * later.  However it may report false positive if the page
13819445689fSKirill A. Shutemov 		 * has excessive GUP pins (i.e. 512).  Anyway the same check
13829445689fSKirill A. Shutemov 		 * will be done again later the risk seems low.
1383b46e756fSKirill A. Shutemov 		 */
1384dbf85c21SVishal Moola (Oracle) 		if (!is_refcount_suitable(folio)) {
1385b46e756fSKirill A. Shutemov 			result = SCAN_PAGE_COUNT;
1386b46e756fSKirill A. Shutemov 			goto out_unmap;
1387b46e756fSKirill A. Shutemov 		}
1388d8ea7cc8SZach O'Keefe 
1389d8ea7cc8SZach O'Keefe 		/*
1390d8ea7cc8SZach O'Keefe 		 * If collapse was initiated by khugepaged, check that there is
1391d8ea7cc8SZach O'Keefe 		 * enough young pte to justify collapsing the page
1392d8ea7cc8SZach O'Keefe 		 */
1393d8ea7cc8SZach O'Keefe 		if (cc->is_khugepaged &&
13945c07ebb3SVishal Moola (Oracle) 		    (pte_young(pteval) || folio_test_young(folio) ||
13955c07ebb3SVishal Moola (Oracle) 		     folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
1396d8ea7cc8SZach O'Keefe 								     address)))
13970db501f7SEbru Akagunduz 			referenced++;
1398b46e756fSKirill A. Shutemov 	}
1399ffe945e6SKirill A. Shutemov 	if (!writable) {
1400ffe945e6SKirill A. Shutemov 		result = SCAN_PAGE_RO;
1401d8ea7cc8SZach O'Keefe 	} else if (cc->is_khugepaged &&
1402d8ea7cc8SZach O'Keefe 		   (!referenced ||
1403d8ea7cc8SZach O'Keefe 		    (unmapped && referenced < HPAGE_PMD_NR / 2))) {
1404ffe945e6SKirill A. Shutemov 		result = SCAN_LACK_REFERENCED_PAGE;
1405ffe945e6SKirill A. Shutemov 	} else {
1406b46e756fSKirill A. Shutemov 		result = SCAN_SUCCEED;
1407b46e756fSKirill A. Shutemov 	}
1408b46e756fSKirill A. Shutemov out_unmap:
1409b46e756fSKirill A. Shutemov 	pte_unmap_unlock(pte, ptl);
141050ad2f24SZach O'Keefe 	if (result == SCAN_SUCCEED) {
141150ad2f24SZach O'Keefe 		result = collapse_huge_page(mm, address, referenced,
141250ad2f24SZach O'Keefe 					    unmapped, cc);
1413c1e8d7c6SMichel Lespinasse 		/* collapse_huge_page will return with the mmap_lock released */
141450ad2f24SZach O'Keefe 		*mmap_locked = false;
1415b46e756fSKirill A. Shutemov 	}
1416b46e756fSKirill A. Shutemov out:
14175c07ebb3SVishal Moola (Oracle) 	trace_mm_khugepaged_scan_pmd(mm, &folio->page, writable, referenced,
1418b46e756fSKirill A. Shutemov 				     none_or_zero, result, unmapped);
141950ad2f24SZach O'Keefe 	return result;
1420b46e756fSKirill A. Shutemov }
1421b46e756fSKirill A. Shutemov 
1422b26e2701SQi Zheng static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
1423b46e756fSKirill A. Shutemov {
1424b26e2701SQi Zheng 	struct mm_slot *slot = &mm_slot->slot;
1425b26e2701SQi Zheng 	struct mm_struct *mm = slot->mm;
1426b46e756fSKirill A. Shutemov 
142735f3aa39SLance Roy 	lockdep_assert_held(&khugepaged_mm_lock);
1428b46e756fSKirill A. Shutemov 
14295dad6048SLance Yang 	if (hpage_collapse_test_exit(mm)) {
1430b46e756fSKirill A. Shutemov 		/* free mm_slot */
1431b26e2701SQi Zheng 		hash_del(&slot->hash);
1432b26e2701SQi Zheng 		list_del(&slot->mm_node);
1433b46e756fSKirill A. Shutemov 
1434b46e756fSKirill A. Shutemov 		/*
1435b46e756fSKirill A. Shutemov 		 * Not strictly needed because the mm exited already.
1436b46e756fSKirill A. Shutemov 		 *
1437b46e756fSKirill A. Shutemov 		 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1438b46e756fSKirill A. Shutemov 		 */
1439b46e756fSKirill A. Shutemov 
1440b46e756fSKirill A. Shutemov 		/* khugepaged_mm_lock actually not necessary for the below */
1441b26e2701SQi Zheng 		mm_slot_free(mm_slot_cache, mm_slot);
1442b46e756fSKirill A. Shutemov 		mmdrop(mm);
1443b46e756fSKirill A. Shutemov 	}
1444b46e756fSKirill A. Shutemov }
1445b46e756fSKirill A. Shutemov 
1446396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_SHMEM
14471043173eSHugh Dickins /* hpage must be locked, and mmap_lock must be held */
144834488399SZach O'Keefe static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
144934488399SZach O'Keefe 			pmd_t *pmdp, struct page *hpage)
145034488399SZach O'Keefe {
145134488399SZach O'Keefe 	struct vm_fault vmf = {
145234488399SZach O'Keefe 		.vma = vma,
145334488399SZach O'Keefe 		.address = addr,
145434488399SZach O'Keefe 		.flags = 0,
145534488399SZach O'Keefe 		.pmd = pmdp,
145634488399SZach O'Keefe 	};
145734488399SZach O'Keefe 
145834488399SZach O'Keefe 	VM_BUG_ON(!PageTransHuge(hpage));
14591043173eSHugh Dickins 	mmap_assert_locked(vma->vm_mm);
146034488399SZach O'Keefe 
146134488399SZach O'Keefe 	if (do_set_pmd(&vmf, hpage))
146234488399SZach O'Keefe 		return SCAN_FAIL;
146334488399SZach O'Keefe 
146434488399SZach O'Keefe 	get_page(hpage);
146534488399SZach O'Keefe 	return SCAN_SUCCEED;
146627e1f827SSong Liu }
146727e1f827SSong Liu 
146827e1f827SSong Liu /**
1469336e6b53SAlex Shi  * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1470336e6b53SAlex Shi  * address haddr.
1471336e6b53SAlex Shi  *
1472336e6b53SAlex Shi  * @mm: process address space where collapse happens
1473336e6b53SAlex Shi  * @addr: THP collapse address
147434488399SZach O'Keefe  * @install_pmd: If a huge PMD should be installed
147527e1f827SSong Liu  *
147627e1f827SSong Liu  * This function checks whether all the PTEs in the PMD are pointing to the
147727e1f827SSong Liu  * right THP. If so, retract the page table so the THP can refault in with
147834488399SZach O'Keefe  * as pmd-mapped. Possibly install a huge PMD mapping the THP.
147927e1f827SSong Liu  */
148034488399SZach O'Keefe int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
148134488399SZach O'Keefe 			    bool install_pmd)
148227e1f827SSong Liu {
14831043173eSHugh Dickins 	struct mmu_notifier_range range;
14841043173eSHugh Dickins 	bool notified = false;
148527e1f827SSong Liu 	unsigned long haddr = addr & HPAGE_PMD_MASK;
148694d815b2SLiam R. Howlett 	struct vm_area_struct *vma = vma_lookup(mm, haddr);
148798b32d29SVishal Moola (Oracle) 	struct folio *folio;
148827e1f827SSong Liu 	pte_t *start_pte, *pte;
14891043173eSHugh Dickins 	pmd_t *pmd, pgt_pmd;
1490a9846049SHugh Dickins 	spinlock_t *pml = NULL, *ptl;
14911043173eSHugh Dickins 	int nr_ptes = 0, result = SCAN_FAIL;
149227e1f827SSong Liu 	int i;
149327e1f827SSong Liu 
14941043173eSHugh Dickins 	mmap_assert_locked(mm);
14951043173eSHugh Dickins 
14961043173eSHugh Dickins 	/* First check VMA found, in case page tables are being torn down */
14971043173eSHugh Dickins 	if (!vma || !vma->vm_file ||
14981043173eSHugh Dickins 	    !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
14991043173eSHugh Dickins 		return SCAN_VMA_CHECK;
150058ac9a89SZach O'Keefe 
150134488399SZach O'Keefe 	/* Fast check before locking page if already PMD-mapped */
150258ac9a89SZach O'Keefe 	result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
150334488399SZach O'Keefe 	if (result == SCAN_PMD_MAPPED)
150434488399SZach O'Keefe 		return result;
150558ac9a89SZach O'Keefe 
150627e1f827SSong Liu 	/*
1507a7f4e6e4SZach O'Keefe 	 * If we are here, we've succeeded in replacing all the native pages
1508a7f4e6e4SZach O'Keefe 	 * in the page cache with a single hugepage. If a mm were to fault-in
1509a7f4e6e4SZach O'Keefe 	 * this memory (mapped by a suitably aligned VMA), we'd get the hugepage
1510a7f4e6e4SZach O'Keefe 	 * and map it by a PMD, regardless of sysfs THP settings. As such, let's
1511a7f4e6e4SZach O'Keefe 	 * analogously elide sysfs THP settings here.
151227e1f827SSong Liu 	 */
15133485b883SRyan Roberts 	if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
15143485b883SRyan Roberts 				     PMD_ORDER))
151534488399SZach O'Keefe 		return SCAN_VMA_CHECK;
151627e1f827SSong Liu 
1517deb4c93aSPeter Xu 	/* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
1518deb4c93aSPeter Xu 	if (userfaultfd_wp(vma))
151934488399SZach O'Keefe 		return SCAN_PTE_UFFD_WP;
1520deb4c93aSPeter Xu 
152198b32d29SVishal Moola (Oracle) 	folio = filemap_lock_folio(vma->vm_file->f_mapping,
1522119a5fc1SHugh Dickins 			       linear_page_index(vma, haddr));
152398b32d29SVishal Moola (Oracle) 	if (IS_ERR(folio))
152434488399SZach O'Keefe 		return SCAN_PAGE_NULL;
1525119a5fc1SHugh Dickins 
152698b32d29SVishal Moola (Oracle) 	if (folio_order(folio) != HPAGE_PMD_ORDER) {
152734488399SZach O'Keefe 		result = SCAN_PAGE_COMPOUND;
152898b32d29SVishal Moola (Oracle) 		goto drop_folio;
152934488399SZach O'Keefe 	}
1530780a4b6fSZach O'Keefe 
15311043173eSHugh Dickins 	result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
153234488399SZach O'Keefe 	switch (result) {
153334488399SZach O'Keefe 	case SCAN_SUCCEED:
153434488399SZach O'Keefe 		break;
153534488399SZach O'Keefe 	case SCAN_PMD_NONE:
153634488399SZach O'Keefe 		/*
15371d65b771SHugh Dickins 		 * All pte entries have been removed and pmd cleared.
15381d65b771SHugh Dickins 		 * Skip all the pte checks and just update the pmd mapping.
153934488399SZach O'Keefe 		 */
154034488399SZach O'Keefe 		goto maybe_install_pmd;
154134488399SZach O'Keefe 	default:
154298b32d29SVishal Moola (Oracle) 		goto drop_folio;
154334488399SZach O'Keefe 	}
154427e1f827SSong Liu 
154534488399SZach O'Keefe 	result = SCAN_FAIL;
1546895f5ee4SHugh Dickins 	start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
15471043173eSHugh Dickins 	if (!start_pte)		/* mmap_lock + page lock should prevent this */
154898b32d29SVishal Moola (Oracle) 		goto drop_folio;
154927e1f827SSong Liu 
155027e1f827SSong Liu 	/* step 1: check all mapped PTEs are to the right huge page */
155127e1f827SSong Liu 	for (i = 0, addr = haddr, pte = start_pte;
155227e1f827SSong Liu 	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
155327e1f827SSong Liu 		struct page *page;
1554c33c7948SRyan Roberts 		pte_t ptent = ptep_get(pte);
155527e1f827SSong Liu 
155627e1f827SSong Liu 		/* empty pte, skip */
1557c33c7948SRyan Roberts 		if (pte_none(ptent))
155827e1f827SSong Liu 			continue;
155927e1f827SSong Liu 
156027e1f827SSong Liu 		/* page swapped out, abort */
1561c33c7948SRyan Roberts 		if (!pte_present(ptent)) {
156234488399SZach O'Keefe 			result = SCAN_PTE_NON_PRESENT;
156327e1f827SSong Liu 			goto abort;
156434488399SZach O'Keefe 		}
156527e1f827SSong Liu 
1566c33c7948SRyan Roberts 		page = vm_normal_page(vma, addr, ptent);
15673218f871SAlex Sierra 		if (WARN_ON_ONCE(page && is_zone_device_page(page)))
15683218f871SAlex Sierra 			page = NULL;
156927e1f827SSong Liu 		/*
1570119a5fc1SHugh Dickins 		 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1571119a5fc1SHugh Dickins 		 * page table, but the new page will not be a subpage of hpage.
157227e1f827SSong Liu 		 */
157398b32d29SVishal Moola (Oracle) 		if (folio_page(folio, i) != page)
157427e1f827SSong Liu 			goto abort;
157527e1f827SSong Liu 	}
157627e1f827SSong Liu 
15771043173eSHugh Dickins 	pte_unmap_unlock(start_pte, ptl);
15781043173eSHugh Dickins 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
15791043173eSHugh Dickins 				haddr, haddr + HPAGE_PMD_SIZE);
15801043173eSHugh Dickins 	mmu_notifier_invalidate_range_start(&range);
15811043173eSHugh Dickins 	notified = true;
1582a9846049SHugh Dickins 
1583a9846049SHugh Dickins 	/*
1584a9846049SHugh Dickins 	 * pmd_lock covers a wider range than ptl, and (if split from mm's
1585a9846049SHugh Dickins 	 * page_table_lock) ptl nests inside pml. The less time we hold pml,
1586a9846049SHugh Dickins 	 * the better; but userfaultfd's mfill_atomic_pte() on a private VMA
1587a9846049SHugh Dickins 	 * inserts a valid as-if-COWed PTE without even looking up page cache.
158898b32d29SVishal Moola (Oracle) 	 * So page lock of folio does not protect from it, so we must not drop
1589a9846049SHugh Dickins 	 * ptl before pgt_pmd is removed, so uffd private needs pml taken now.
1590a9846049SHugh Dickins 	 */
1591a9846049SHugh Dickins 	if (userfaultfd_armed(vma) && !(vma->vm_flags & VM_SHARED))
1592a9846049SHugh Dickins 		pml = pmd_lock(mm, pmd);
1593a9846049SHugh Dickins 
1594a9846049SHugh Dickins 	start_pte = pte_offset_map_nolock(mm, pmd, haddr, &ptl);
15951043173eSHugh Dickins 	if (!start_pte)		/* mmap_lock + page lock should prevent this */
15961043173eSHugh Dickins 		goto abort;
1597a9846049SHugh Dickins 	if (!pml)
1598a9846049SHugh Dickins 		spin_lock(ptl);
1599a9846049SHugh Dickins 	else if (ptl != pml)
1600a9846049SHugh Dickins 		spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
16011043173eSHugh Dickins 
16021043173eSHugh Dickins 	/* step 2: clear page table and adjust rmap */
160327e1f827SSong Liu 	for (i = 0, addr = haddr, pte = start_pte;
160427e1f827SSong Liu 	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
160527e1f827SSong Liu 		struct page *page;
1606c33c7948SRyan Roberts 		pte_t ptent = ptep_get(pte);
160727e1f827SSong Liu 
1608c33c7948SRyan Roberts 		if (pte_none(ptent))
160927e1f827SSong Liu 			continue;
16101043173eSHugh Dickins 		/*
16111043173eSHugh Dickins 		 * We dropped ptl after the first scan, to do the mmu_notifier:
161298b32d29SVishal Moola (Oracle) 		 * page lock stops more PTEs of the folio being faulted in, but
16131043173eSHugh Dickins 		 * does not stop write faults COWing anon copies from existing
16141043173eSHugh Dickins 		 * PTEs; and does not stop those being swapped out or migrated.
16151043173eSHugh Dickins 		 */
16161043173eSHugh Dickins 		if (!pte_present(ptent)) {
16171043173eSHugh Dickins 			result = SCAN_PTE_NON_PRESENT;
16183218f871SAlex Sierra 			goto abort;
16191043173eSHugh Dickins 		}
16201043173eSHugh Dickins 		page = vm_normal_page(vma, addr, ptent);
162198b32d29SVishal Moola (Oracle) 		if (folio_page(folio, i) != page)
16221043173eSHugh Dickins 			goto abort;
16231043173eSHugh Dickins 
16241043173eSHugh Dickins 		/*
16251043173eSHugh Dickins 		 * Must clear entry, or a racing truncate may re-remove it.
16261043173eSHugh Dickins 		 * TLB flush can be left until pmdp_collapse_flush() does it.
16271043173eSHugh Dickins 		 * PTE dirty? Shmem page is already dirty; file is read-only.
16281043173eSHugh Dickins 		 */
16291043173eSHugh Dickins 		ptep_clear(mm, addr, pte);
163035668a43SDavid Hildenbrand 		folio_remove_rmap_pte(folio, page, vma);
16311043173eSHugh Dickins 		nr_ptes++;
163227e1f827SSong Liu 	}
163327e1f827SSong Liu 
1634a9846049SHugh Dickins 	pte_unmap(start_pte);
1635a9846049SHugh Dickins 	if (!pml)
1636a9846049SHugh Dickins 		spin_unlock(ptl);
163727e1f827SSong Liu 
163827e1f827SSong Liu 	/* step 3: set proper refcount and mm_counters. */
16391043173eSHugh Dickins 	if (nr_ptes) {
164098b32d29SVishal Moola (Oracle) 		folio_ref_sub(folio, nr_ptes);
16416b27cc6cSKefeng Wang 		add_mm_counter(mm, mm_counter_file(folio), -nr_ptes);
164227e1f827SSong Liu 	}
164327e1f827SSong Liu 
1644a9846049SHugh Dickins 	/* step 4: remove empty page table */
1645a9846049SHugh Dickins 	if (!pml) {
16461043173eSHugh Dickins 		pml = pmd_lock(mm, pmd);
16471043173eSHugh Dickins 		if (ptl != pml)
16481043173eSHugh Dickins 			spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
1649a9846049SHugh Dickins 	}
16501043173eSHugh Dickins 	pgt_pmd = pmdp_collapse_flush(vma, haddr, pmd);
16511043173eSHugh Dickins 	pmdp_get_lockless_sync();
16521043173eSHugh Dickins 	if (ptl != pml)
16531043173eSHugh Dickins 		spin_unlock(ptl);
16541043173eSHugh Dickins 	spin_unlock(pml);
1655ab0c3f12SHugh Dickins 
16561043173eSHugh Dickins 	mmu_notifier_invalidate_range_end(&range);
165734488399SZach O'Keefe 
16581043173eSHugh Dickins 	mm_dec_nr_ptes(mm);
16591043173eSHugh Dickins 	page_table_check_pte_clear_range(mm, haddr, pgt_pmd);
16601043173eSHugh Dickins 	pte_free_defer(mm, pmd_pgtable(pgt_pmd));
16618d3c106eSJann Horn 
166234488399SZach O'Keefe maybe_install_pmd:
166334488399SZach O'Keefe 	/* step 5: install pmd entry */
166434488399SZach O'Keefe 	result = install_pmd
166598b32d29SVishal Moola (Oracle) 			? set_huge_pmd(vma, haddr, pmd, &folio->page)
166634488399SZach O'Keefe 			: SCAN_SUCCEED;
166798b32d29SVishal Moola (Oracle) 	goto drop_folio;
16681043173eSHugh Dickins abort:
16691043173eSHugh Dickins 	if (nr_ptes) {
16701043173eSHugh Dickins 		flush_tlb_mm(mm);
167198b32d29SVishal Moola (Oracle) 		folio_ref_sub(folio, nr_ptes);
16726b27cc6cSKefeng Wang 		add_mm_counter(mm, mm_counter_file(folio), -nr_ptes);
16731043173eSHugh Dickins 	}
16741043173eSHugh Dickins 	if (start_pte)
16751043173eSHugh Dickins 		pte_unmap_unlock(start_pte, ptl);
1676a9846049SHugh Dickins 	if (pml && pml != ptl)
1677a9846049SHugh Dickins 		spin_unlock(pml);
16781043173eSHugh Dickins 	if (notified)
16791043173eSHugh Dickins 		mmu_notifier_invalidate_range_end(&range);
168098b32d29SVishal Moola (Oracle) drop_folio:
168198b32d29SVishal Moola (Oracle) 	folio_unlock(folio);
168298b32d29SVishal Moola (Oracle) 	folio_put(folio);
168334488399SZach O'Keefe 	return result;
168427e1f827SSong Liu }
168527e1f827SSong Liu 
16861d65b771SHugh Dickins static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1687f3f0e1d2SKirill A. Shutemov {
1688f3f0e1d2SKirill A. Shutemov 	struct vm_area_struct *vma;
1689f3f0e1d2SKirill A. Shutemov 
16901d65b771SHugh Dickins 	i_mmap_lock_read(mapping);
1691f3f0e1d2SKirill A. Shutemov 	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
16921d65b771SHugh Dickins 		struct mmu_notifier_range range;
16931d65b771SHugh Dickins 		struct mm_struct *mm;
16941d65b771SHugh Dickins 		unsigned long addr;
16951d65b771SHugh Dickins 		pmd_t *pmd, pgt_pmd;
16961d65b771SHugh Dickins 		spinlock_t *pml;
16971d65b771SHugh Dickins 		spinlock_t *ptl;
16981d65b771SHugh Dickins 		bool skipped_uffd = false;
169934488399SZach O'Keefe 
170027e1f827SSong Liu 		/*
170127e1f827SSong Liu 		 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
17021d65b771SHugh Dickins 		 * got written to. These VMAs are likely not worth removing
17031d65b771SHugh Dickins 		 * page tables from, as PMD-mapping is likely to be split later.
170427e1f827SSong Liu 		 */
17051d65b771SHugh Dickins 		if (READ_ONCE(vma->anon_vma))
17061d65b771SHugh Dickins 			continue;
17071d65b771SHugh Dickins 
1708f3f0e1d2SKirill A. Shutemov 		addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
170934488399SZach O'Keefe 		if (addr & ~HPAGE_PMD_MASK ||
17101d65b771SHugh Dickins 		    vma->vm_end < addr + HPAGE_PMD_SIZE)
171134488399SZach O'Keefe 			continue;
17121d65b771SHugh Dickins 
17131d65b771SHugh Dickins 		mm = vma->vm_mm;
17141d65b771SHugh Dickins 		if (find_pmd_or_thp_or_none(mm, addr, &pmd) != SCAN_SUCCEED)
17151d65b771SHugh Dickins 			continue;
17161d65b771SHugh Dickins 
17171d65b771SHugh Dickins 		if (hpage_collapse_test_exit(mm))
17181d65b771SHugh Dickins 			continue;
17191d65b771SHugh Dickins 		/*
17201d65b771SHugh Dickins 		 * When a vma is registered with uffd-wp, we cannot recycle
17211d65b771SHugh Dickins 		 * the page table because there may be pte markers installed.
17221d65b771SHugh Dickins 		 * Other vmas can still have the same file mapped hugely, but
17231d65b771SHugh Dickins 		 * skip this one: it will always be mapped in small page size
17241d65b771SHugh Dickins 		 * for uffd-wp registered ranges.
17251d65b771SHugh Dickins 		 */
17261d65b771SHugh Dickins 		if (userfaultfd_wp(vma))
17271d65b771SHugh Dickins 			continue;
17281d65b771SHugh Dickins 
17291d65b771SHugh Dickins 		/* PTEs were notified when unmapped; but now for the PMD? */
17301d65b771SHugh Dickins 		mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
17311d65b771SHugh Dickins 					addr, addr + HPAGE_PMD_SIZE);
17321d65b771SHugh Dickins 		mmu_notifier_invalidate_range_start(&range);
17331d65b771SHugh Dickins 
17341d65b771SHugh Dickins 		pml = pmd_lock(mm, pmd);
17351d65b771SHugh Dickins 		ptl = pte_lockptr(mm, pmd);
17361d65b771SHugh Dickins 		if (ptl != pml)
17371d65b771SHugh Dickins 			spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
17381d65b771SHugh Dickins 
17391d65b771SHugh Dickins 		/*
17401d65b771SHugh Dickins 		 * Huge page lock is still held, so normally the page table
17411d65b771SHugh Dickins 		 * must remain empty; and we have already skipped anon_vma
17421d65b771SHugh Dickins 		 * and userfaultfd_wp() vmas.  But since the mmap_lock is not
17431d65b771SHugh Dickins 		 * held, it is still possible for a racing userfaultfd_ioctl()
17441d65b771SHugh Dickins 		 * to have inserted ptes or markers.  Now that we hold ptlock,
17451d65b771SHugh Dickins 		 * repeating the anon_vma check protects from one category,
17461d65b771SHugh Dickins 		 * and repeating the userfaultfd_wp() check from another.
17471d65b771SHugh Dickins 		 */
17481d65b771SHugh Dickins 		if (unlikely(vma->anon_vma || userfaultfd_wp(vma))) {
17491d65b771SHugh Dickins 			skipped_uffd = true;
17501d65b771SHugh Dickins 		} else {
17511d65b771SHugh Dickins 			pgt_pmd = pmdp_collapse_flush(vma, addr, pmd);
17521d65b771SHugh Dickins 			pmdp_get_lockless_sync();
175334488399SZach O'Keefe 		}
17541d65b771SHugh Dickins 
17551d65b771SHugh Dickins 		if (ptl != pml)
17561d65b771SHugh Dickins 			spin_unlock(ptl);
17571d65b771SHugh Dickins 		spin_unlock(pml);
17581d65b771SHugh Dickins 
17591d65b771SHugh Dickins 		mmu_notifier_invalidate_range_end(&range);
17601d65b771SHugh Dickins 
17611d65b771SHugh Dickins 		if (!skipped_uffd) {
17621d65b771SHugh Dickins 			mm_dec_nr_ptes(mm);
17631d65b771SHugh Dickins 			page_table_check_pte_clear_range(mm, addr, pgt_pmd);
17641d65b771SHugh Dickins 			pte_free_defer(mm, pmd_pgtable(pgt_pmd));
1765f3f0e1d2SKirill A. Shutemov 		}
17661d65b771SHugh Dickins 	}
17671d65b771SHugh Dickins 	i_mmap_unlock_read(mapping);
1768f3f0e1d2SKirill A. Shutemov }
1769f3f0e1d2SKirill A. Shutemov 
1770f3f0e1d2SKirill A. Shutemov /**
177199cb0dbdSSong Liu  * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1772f3f0e1d2SKirill A. Shutemov  *
1773336e6b53SAlex Shi  * @mm: process address space where collapse happens
177434488399SZach O'Keefe  * @addr: virtual collapse start address
1775336e6b53SAlex Shi  * @file: file that collapse on
1776336e6b53SAlex Shi  * @start: collapse start address
17779710a78aSZach O'Keefe  * @cc: collapse context and scratchpad
1778336e6b53SAlex Shi  *
1779f3f0e1d2SKirill A. Shutemov  * Basic scheme is simple, details are more complex:
178087c460a0SHugh Dickins  *  - allocate and lock a new huge page;
1781a2e17cc2SDavid Stevens  *  - scan page cache, locking old pages
178299cb0dbdSSong Liu  *    + swap/gup in pages if necessary;
1783a2e17cc2SDavid Stevens  *  - copy data to new page
1784a2e17cc2SDavid Stevens  *  - handle shmem holes
1785a2e17cc2SDavid Stevens  *    + re-validate that holes weren't filled by someone else
1786a2e17cc2SDavid Stevens  *    + check for userfaultfd
1787ac492b9cSDavid Stevens  *  - finalize updates to the page cache;
178877da9389SMatthew Wilcox  *  - if replacing succeeds:
178987c460a0SHugh Dickins  *    + unlock huge page;
1790a2e17cc2SDavid Stevens  *    + free old pages;
1791f3f0e1d2SKirill A. Shutemov  *  - if replacing failed;
1792a2e17cc2SDavid Stevens  *    + unlock old pages
179387c460a0SHugh Dickins  *    + unlock and free huge page;
1794f3f0e1d2SKirill A. Shutemov  */
179534488399SZach O'Keefe static int collapse_file(struct mm_struct *mm, unsigned long addr,
1796579c571eSSong Liu 			 struct file *file, pgoff_t start,
179734488399SZach O'Keefe 			 struct collapse_control *cc)
1798f3f0e1d2SKirill A. Shutemov {
1799579c571eSSong Liu 	struct address_space *mapping = file->f_mapping;
180050ad2f24SZach O'Keefe 	struct page *hpage;
180112904d95SJiaqi Yan 	struct page *page;
180212904d95SJiaqi Yan 	struct page *tmp;
180312904d95SJiaqi Yan 	struct folio *folio;
18044c9473e8SGautam Menghani 	pgoff_t index = 0, end = start + HPAGE_PMD_NR;
1805f3f0e1d2SKirill A. Shutemov 	LIST_HEAD(pagelist);
180677da9389SMatthew Wilcox 	XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
1807f3f0e1d2SKirill A. Shutemov 	int nr_none = 0, result = SCAN_SUCCEED;
180899cb0dbdSSong Liu 	bool is_shmem = shmem_file(file);
18094c9473e8SGautam Menghani 	int nr = 0;
1810f3f0e1d2SKirill A. Shutemov 
181199cb0dbdSSong Liu 	VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1812f3f0e1d2SKirill A. Shutemov 	VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1813f3f0e1d2SKirill A. Shutemov 
181450ad2f24SZach O'Keefe 	result = alloc_charge_hpage(&hpage, mm, cc);
18159710a78aSZach O'Keefe 	if (result != SCAN_SUCCEED)
1816f3f0e1d2SKirill A. Shutemov 		goto out;
1817f3f0e1d2SKirill A. Shutemov 
1818cae106ddSDavid Stevens 	__SetPageLocked(hpage);
1819cae106ddSDavid Stevens 	if (is_shmem)
1820cae106ddSDavid Stevens 		__SetPageSwapBacked(hpage);
1821cae106ddSDavid Stevens 	hpage->index = start;
1822cae106ddSDavid Stevens 	hpage->mapping = mapping;
1823cae106ddSDavid Stevens 
18246b24ca4aSMatthew Wilcox (Oracle) 	/*
18256b24ca4aSMatthew Wilcox (Oracle) 	 * Ensure we have slots for all the pages in the range.  This is
18266b24ca4aSMatthew Wilcox (Oracle) 	 * almost certainly a no-op because most of the pages must be present
18276b24ca4aSMatthew Wilcox (Oracle) 	 */
182895feeabbSHugh Dickins 	do {
182995feeabbSHugh Dickins 		xas_lock_irq(&xas);
183095feeabbSHugh Dickins 		xas_create_range(&xas);
183195feeabbSHugh Dickins 		if (!xas_error(&xas))
183295feeabbSHugh Dickins 			break;
183395feeabbSHugh Dickins 		xas_unlock_irq(&xas);
183495feeabbSHugh Dickins 		if (!xas_nomem(&xas, GFP_KERNEL)) {
183595feeabbSHugh Dickins 			result = SCAN_FAIL;
1836cae106ddSDavid Stevens 			goto rollback;
183795feeabbSHugh Dickins 		}
183895feeabbSHugh Dickins 	} while (1);
183995feeabbSHugh Dickins 
184077da9389SMatthew Wilcox 	for (index = start; index < end; index++) {
1841e8c716bcSHugh Dickins 		xas_set(&xas, index);
1842e8c716bcSHugh Dickins 		page = xas_load(&xas);
184377da9389SMatthew Wilcox 
184477da9389SMatthew Wilcox 		VM_BUG_ON(index != xas.xa_index);
184599cb0dbdSSong Liu 		if (is_shmem) {
184677da9389SMatthew Wilcox 			if (!page) {
1847701270faSHugh Dickins 				/*
184899cb0dbdSSong Liu 				 * Stop if extent has been truncated or
184999cb0dbdSSong Liu 				 * hole-punched, and is now completely
185099cb0dbdSSong Liu 				 * empty.
1851701270faSHugh Dickins 				 */
1852701270faSHugh Dickins 				if (index == start) {
1853701270faSHugh Dickins 					if (!xas_next_entry(&xas, end - 1)) {
1854701270faSHugh Dickins 						result = SCAN_TRUNCATED;
1855042a3082SHugh Dickins 						goto xa_locked;
1856701270faSHugh Dickins 					}
1857701270faSHugh Dickins 				}
185877da9389SMatthew Wilcox 				nr_none++;
185977da9389SMatthew Wilcox 				continue;
1860f3f0e1d2SKirill A. Shutemov 			}
1861f3f0e1d2SKirill A. Shutemov 
18623159f943SMatthew Wilcox 			if (xa_is_value(page) || !PageUptodate(page)) {
186377da9389SMatthew Wilcox 				xas_unlock_irq(&xas);
1864f3f0e1d2SKirill A. Shutemov 				/* swap in or instantiate fallocated page */
18657459c149SMatthew Wilcox (Oracle) 				if (shmem_get_folio(mapping->host, index,
18667459c149SMatthew Wilcox (Oracle) 						&folio, SGP_NOALLOC)) {
1867f3f0e1d2SKirill A. Shutemov 					result = SCAN_FAIL;
186877da9389SMatthew Wilcox 					goto xa_unlocked;
1869f3f0e1d2SKirill A. Shutemov 				}
18701fec6890SMatthew Wilcox (Oracle) 				/* drain lru cache to help isolate_lru_page() */
1871efa3d814SDavid Stevens 				lru_add_drain();
18727459c149SMatthew Wilcox (Oracle) 				page = folio_file_page(folio, index);
1873f3f0e1d2SKirill A. Shutemov 			} else if (trylock_page(page)) {
1874f3f0e1d2SKirill A. Shutemov 				get_page(page);
1875042a3082SHugh Dickins 				xas_unlock_irq(&xas);
1876f3f0e1d2SKirill A. Shutemov 			} else {
1877f3f0e1d2SKirill A. Shutemov 				result = SCAN_PAGE_LOCK;
1878042a3082SHugh Dickins 				goto xa_locked;
1879f3f0e1d2SKirill A. Shutemov 			}
188099cb0dbdSSong Liu 		} else {	/* !is_shmem */
188199cb0dbdSSong Liu 			if (!page || xa_is_value(page)) {
188299cb0dbdSSong Liu 				xas_unlock_irq(&xas);
188399cb0dbdSSong Liu 				page_cache_sync_readahead(mapping, &file->f_ra,
188499cb0dbdSSong Liu 							  file, index,
1885e5a59d30SDavid Howells 							  end - index);
18861fec6890SMatthew Wilcox (Oracle) 				/* drain lru cache to help isolate_lru_page() */
188799cb0dbdSSong Liu 				lru_add_drain();
188899cb0dbdSSong Liu 				page = find_lock_page(mapping, index);
188999cb0dbdSSong Liu 				if (unlikely(page == NULL)) {
189099cb0dbdSSong Liu 					result = SCAN_FAIL;
189199cb0dbdSSong Liu 					goto xa_unlocked;
189299cb0dbdSSong Liu 				}
189375f36069SSong Liu 			} else if (PageDirty(page)) {
189475f36069SSong Liu 				/*
189575f36069SSong Liu 				 * khugepaged only works on read-only fd,
189675f36069SSong Liu 				 * so this page is dirty because it hasn't
189775f36069SSong Liu 				 * been flushed since first write. There
189875f36069SSong Liu 				 * won't be new dirty pages.
189975f36069SSong Liu 				 *
190075f36069SSong Liu 				 * Trigger async flush here and hope the
190175f36069SSong Liu 				 * writeback is done when khugepaged
190275f36069SSong Liu 				 * revisits this page.
190375f36069SSong Liu 				 *
190475f36069SSong Liu 				 * This is a one-off situation. We are not
190575f36069SSong Liu 				 * forcing writeback in loop.
190675f36069SSong Liu 				 */
190775f36069SSong Liu 				xas_unlock_irq(&xas);
190875f36069SSong Liu 				filemap_flush(mapping);
190975f36069SSong Liu 				result = SCAN_FAIL;
191075f36069SSong Liu 				goto xa_unlocked;
191174c42e1bSRongwei Wang 			} else if (PageWriteback(page)) {
191274c42e1bSRongwei Wang 				xas_unlock_irq(&xas);
191374c42e1bSRongwei Wang 				result = SCAN_FAIL;
191474c42e1bSRongwei Wang 				goto xa_unlocked;
191599cb0dbdSSong Liu 			} else if (trylock_page(page)) {
191699cb0dbdSSong Liu 				get_page(page);
191799cb0dbdSSong Liu 				xas_unlock_irq(&xas);
191899cb0dbdSSong Liu 			} else {
191999cb0dbdSSong Liu 				result = SCAN_PAGE_LOCK;
192099cb0dbdSSong Liu 				goto xa_locked;
192199cb0dbdSSong Liu 			}
192299cb0dbdSSong Liu 		}
1923f3f0e1d2SKirill A. Shutemov 
1924f3f0e1d2SKirill A. Shutemov 		/*
1925b93b0163SMatthew Wilcox 		 * The page must be locked, so we can drop the i_pages lock
1926f3f0e1d2SKirill A. Shutemov 		 * without racing with truncate.
1927f3f0e1d2SKirill A. Shutemov 		 */
1928f3f0e1d2SKirill A. Shutemov 		VM_BUG_ON_PAGE(!PageLocked(page), page);
19294655e5e5SSong Liu 
19304655e5e5SSong Liu 		/* make sure the page is up to date */
19314655e5e5SSong Liu 		if (unlikely(!PageUptodate(page))) {
19324655e5e5SSong Liu 			result = SCAN_FAIL;
19334655e5e5SSong Liu 			goto out_unlock;
19344655e5e5SSong Liu 		}
193506a5e126SHugh Dickins 
193606a5e126SHugh Dickins 		/*
193706a5e126SHugh Dickins 		 * If file was truncated then extended, or hole-punched, before
193806a5e126SHugh Dickins 		 * we locked the first page, then a THP might be there already.
193958ac9a89SZach O'Keefe 		 * This will be discovered on the first iteration.
194006a5e126SHugh Dickins 		 */
194106a5e126SHugh Dickins 		if (PageTransCompound(page)) {
194258ac9a89SZach O'Keefe 			struct page *head = compound_head(page);
194358ac9a89SZach O'Keefe 
194458ac9a89SZach O'Keefe 			result = compound_order(head) == HPAGE_PMD_ORDER &&
194558ac9a89SZach O'Keefe 					head->index == start
194658ac9a89SZach O'Keefe 					/* Maybe PMD-mapped */
194758ac9a89SZach O'Keefe 					? SCAN_PTE_MAPPED_HUGEPAGE
194858ac9a89SZach O'Keefe 					: SCAN_PAGE_COMPOUND;
194906a5e126SHugh Dickins 			goto out_unlock;
195006a5e126SHugh Dickins 		}
1951f3f0e1d2SKirill A. Shutemov 
195264ab3195SVishal Moola (Oracle) 		folio = page_folio(page);
195364ab3195SVishal Moola (Oracle) 
195464ab3195SVishal Moola (Oracle) 		if (folio_mapping(folio) != mapping) {
1955f3f0e1d2SKirill A. Shutemov 			result = SCAN_TRUNCATED;
1956f3f0e1d2SKirill A. Shutemov 			goto out_unlock;
1957f3f0e1d2SKirill A. Shutemov 		}
1958f3f0e1d2SKirill A. Shutemov 
195964ab3195SVishal Moola (Oracle) 		if (!is_shmem && (folio_test_dirty(folio) ||
196064ab3195SVishal Moola (Oracle) 				  folio_test_writeback(folio))) {
19614655e5e5SSong Liu 			/*
19624655e5e5SSong Liu 			 * khugepaged only works on read-only fd, so this
19634655e5e5SSong Liu 			 * page is dirty because it hasn't been flushed
19644655e5e5SSong Liu 			 * since first write.
19654655e5e5SSong Liu 			 */
19664655e5e5SSong Liu 			result = SCAN_FAIL;
19674655e5e5SSong Liu 			goto out_unlock;
19684655e5e5SSong Liu 		}
19694655e5e5SSong Liu 
1970be2d5756SBaolin Wang 		if (!folio_isolate_lru(folio)) {
1971f3f0e1d2SKirill A. Shutemov 			result = SCAN_DEL_PAGE_LRU;
1972042a3082SHugh Dickins 			goto out_unlock;
1973f3f0e1d2SKirill A. Shutemov 		}
1974f3f0e1d2SKirill A. Shutemov 
19750201ebf2SDavid Howells 		if (!filemap_release_folio(folio, GFP_KERNEL)) {
197699cb0dbdSSong Liu 			result = SCAN_PAGE_HAS_PRIVATE;
197764ab3195SVishal Moola (Oracle) 			folio_putback_lru(folio);
197899cb0dbdSSong Liu 			goto out_unlock;
197999cb0dbdSSong Liu 		}
198099cb0dbdSSong Liu 
198164ab3195SVishal Moola (Oracle) 		if (folio_mapped(folio))
198264ab3195SVishal Moola (Oracle) 			try_to_unmap(folio,
1983869f7ee6SMatthew Wilcox (Oracle) 					TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
1984f3f0e1d2SKirill A. Shutemov 
198577da9389SMatthew Wilcox 		xas_lock_irq(&xas);
1986f3f0e1d2SKirill A. Shutemov 
1987e8c716bcSHugh Dickins 		VM_BUG_ON_PAGE(page != xa_load(xas.xa, index), page);
1988f3f0e1d2SKirill A. Shutemov 
1989f3f0e1d2SKirill A. Shutemov 		/*
1990a2e17cc2SDavid Stevens 		 * We control three references to the page:
1991f3f0e1d2SKirill A. Shutemov 		 *  - we hold a pin on it;
199277da9389SMatthew Wilcox 		 *  - one reference from page cache;
1993f3f0e1d2SKirill A. Shutemov 		 *  - one from isolate_lru_page;
1994a2e17cc2SDavid Stevens 		 * If those are the only references, then any new usage of the
1995a2e17cc2SDavid Stevens 		 * page will have to fetch it from the page cache. That requires
1996a2e17cc2SDavid Stevens 		 * locking the page to handle truncate, so any new usage will be
1997a2e17cc2SDavid Stevens 		 * blocked until we unlock page after collapse/during rollback.
1998f3f0e1d2SKirill A. Shutemov 		 */
1999a2e17cc2SDavid Stevens 		if (page_count(page) != 3) {
2000f3f0e1d2SKirill A. Shutemov 			result = SCAN_PAGE_COUNT;
2001042a3082SHugh Dickins 			xas_unlock_irq(&xas);
2002042a3082SHugh Dickins 			putback_lru_page(page);
2003042a3082SHugh Dickins 			goto out_unlock;
2004f3f0e1d2SKirill A. Shutemov 		}
2005f3f0e1d2SKirill A. Shutemov 
2006f3f0e1d2SKirill A. Shutemov 		/*
2007a2e17cc2SDavid Stevens 		 * Accumulate the pages that are being collapsed.
2008f3f0e1d2SKirill A. Shutemov 		 */
2009f3f0e1d2SKirill A. Shutemov 		list_add_tail(&page->lru, &pagelist);
2010f3f0e1d2SKirill A. Shutemov 		continue;
2011f3f0e1d2SKirill A. Shutemov out_unlock:
2012f3f0e1d2SKirill A. Shutemov 		unlock_page(page);
2013f3f0e1d2SKirill A. Shutemov 		put_page(page);
2014042a3082SHugh Dickins 		goto xa_unlocked;
2015f3f0e1d2SKirill A. Shutemov 	}
2016f3f0e1d2SKirill A. Shutemov 
201712904d95SJiaqi Yan 	if (!is_shmem) {
201809d91cdaSSong Liu 		filemap_nr_thps_inc(mapping);
2019eb6ecbedSCollin Fijalkovich 		/*
2020eb6ecbedSCollin Fijalkovich 		 * Paired with smp_mb() in do_dentry_open() to ensure
2021eb6ecbedSCollin Fijalkovich 		 * i_writecount is up to date and the update to nr_thps is
2022eb6ecbedSCollin Fijalkovich 		 * visible. Ensures the page cache will be truncated if the
2023eb6ecbedSCollin Fijalkovich 		 * file is opened writable.
2024eb6ecbedSCollin Fijalkovich 		 */
2025eb6ecbedSCollin Fijalkovich 		smp_mb();
2026eb6ecbedSCollin Fijalkovich 		if (inode_is_open_for_write(mapping->host)) {
2027eb6ecbedSCollin Fijalkovich 			result = SCAN_FAIL;
2028eb6ecbedSCollin Fijalkovich 			filemap_nr_thps_dec(mapping);
2029eb6ecbedSCollin Fijalkovich 		}
203009d91cdaSSong Liu 	}
203199cb0dbdSSong Liu 
2032042a3082SHugh Dickins xa_locked:
2033042a3082SHugh Dickins 	xas_unlock_irq(&xas);
203477da9389SMatthew Wilcox xa_unlocked:
2035042a3082SHugh Dickins 
20366d9df8a5SHugh Dickins 	/*
20376d9df8a5SHugh Dickins 	 * If collapse is successful, flush must be done now before copying.
20386d9df8a5SHugh Dickins 	 * If collapse is unsuccessful, does flush actually need to be done?
20396d9df8a5SHugh Dickins 	 * Do it anyway, to clear the state.
20406d9df8a5SHugh Dickins 	 */
20416d9df8a5SHugh Dickins 	try_to_unmap_flush();
20426d9df8a5SHugh Dickins 
2043509f0069SHugh Dickins 	if (result == SCAN_SUCCEED && nr_none &&
2044509f0069SHugh Dickins 	    !shmem_charge(mapping->host, nr_none))
2045509f0069SHugh Dickins 		result = SCAN_FAIL;
2046509f0069SHugh Dickins 	if (result != SCAN_SUCCEED) {
2047509f0069SHugh Dickins 		nr_none = 0;
2048cae106ddSDavid Stevens 		goto rollback;
2049509f0069SHugh Dickins 	}
2050cae106ddSDavid Stevens 
2051f3f0e1d2SKirill A. Shutemov 	/*
2052a2e17cc2SDavid Stevens 	 * The old pages are locked, so they won't change anymore.
2053f3f0e1d2SKirill A. Shutemov 	 */
20542af8ff29SHugh Dickins 	index = start;
205512904d95SJiaqi Yan 	list_for_each_entry(page, &pagelist, lru) {
20562af8ff29SHugh Dickins 		while (index < page->index) {
205750ad2f24SZach O'Keefe 			clear_highpage(hpage + (index % HPAGE_PMD_NR));
20582af8ff29SHugh Dickins 			index++;
20592af8ff29SHugh Dickins 		}
2060cae106ddSDavid Stevens 		if (copy_mc_highpage(hpage + (page->index % HPAGE_PMD_NR), page) > 0) {
206112904d95SJiaqi Yan 			result = SCAN_COPY_MC;
2062cae106ddSDavid Stevens 			goto rollback;
206312904d95SJiaqi Yan 		}
206412904d95SJiaqi Yan 		index++;
206512904d95SJiaqi Yan 	}
2066cae106ddSDavid Stevens 	while (index < end) {
206712904d95SJiaqi Yan 		clear_highpage(hpage + (index % HPAGE_PMD_NR));
206812904d95SJiaqi Yan 		index++;
206912904d95SJiaqi Yan 	}
207012904d95SJiaqi Yan 
2071ac492b9cSDavid Stevens 	if (nr_none) {
2072ac492b9cSDavid Stevens 		struct vm_area_struct *vma;
2073ac492b9cSDavid Stevens 		int nr_none_check = 0;
2074ac492b9cSDavid Stevens 
2075ac492b9cSDavid Stevens 		i_mmap_lock_read(mapping);
2076ac492b9cSDavid Stevens 		xas_lock_irq(&xas);
2077ac492b9cSDavid Stevens 
2078ac492b9cSDavid Stevens 		xas_set(&xas, start);
2079ac492b9cSDavid Stevens 		for (index = start; index < end; index++) {
2080ac492b9cSDavid Stevens 			if (!xas_next(&xas)) {
2081ac492b9cSDavid Stevens 				xas_store(&xas, XA_RETRY_ENTRY);
2082ac492b9cSDavid Stevens 				if (xas_error(&xas)) {
2083ac492b9cSDavid Stevens 					result = SCAN_STORE_FAILED;
2084ac492b9cSDavid Stevens 					goto immap_locked;
2085ac492b9cSDavid Stevens 				}
2086ac492b9cSDavid Stevens 				nr_none_check++;
2087ac492b9cSDavid Stevens 			}
2088ac492b9cSDavid Stevens 		}
2089ac492b9cSDavid Stevens 
2090ac492b9cSDavid Stevens 		if (nr_none != nr_none_check) {
2091ac492b9cSDavid Stevens 			result = SCAN_PAGE_FILLED;
2092ac492b9cSDavid Stevens 			goto immap_locked;
2093ac492b9cSDavid Stevens 		}
2094ac492b9cSDavid Stevens 
209512904d95SJiaqi Yan 		/*
2096ac492b9cSDavid Stevens 		 * If userspace observed a missing page in a VMA with a MODE_MISSING
2097ac492b9cSDavid Stevens 		 * userfaultfd, then it might expect a UFFD_EVENT_PAGEFAULT for that
2098ac492b9cSDavid Stevens 		 * page. If so, we need to roll back to avoid suppressing such an
2099ac492b9cSDavid Stevens 		 * event. Since wp/minor userfaultfds don't give userspace any
2100ac492b9cSDavid Stevens 		 * guarantees that the kernel doesn't fill a missing page with a zero
2101ac492b9cSDavid Stevens 		 * page, so they don't matter here.
2102ac492b9cSDavid Stevens 		 *
2103ac492b9cSDavid Stevens 		 * Any userfaultfds registered after this point will not be able to
2104ac492b9cSDavid Stevens 		 * observe any missing pages due to the previously inserted retry
2105ac492b9cSDavid Stevens 		 * entries.
210612904d95SJiaqi Yan 		 */
2107ac492b9cSDavid Stevens 		vma_interval_tree_foreach(vma, &mapping->i_mmap, start, end) {
2108ac492b9cSDavid Stevens 			if (userfaultfd_missing(vma)) {
2109ac492b9cSDavid Stevens 				result = SCAN_EXCEED_NONE_PTE;
2110ac492b9cSDavid Stevens 				goto immap_locked;
2111ac492b9cSDavid Stevens 			}
2112ac492b9cSDavid Stevens 		}
2113ac492b9cSDavid Stevens 
2114ac492b9cSDavid Stevens immap_locked:
2115ac492b9cSDavid Stevens 		i_mmap_unlock_read(mapping);
2116ac492b9cSDavid Stevens 		if (result != SCAN_SUCCEED) {
2117ac492b9cSDavid Stevens 			xas_set(&xas, start);
2118ac492b9cSDavid Stevens 			for (index = start; index < end; index++) {
2119ac492b9cSDavid Stevens 				if (xas_next(&xas) == XA_RETRY_ENTRY)
2120ac492b9cSDavid Stevens 					xas_store(&xas, NULL);
2121ac492b9cSDavid Stevens 			}
2122ac492b9cSDavid Stevens 
2123ac492b9cSDavid Stevens 			xas_unlock_irq(&xas);
2124ac492b9cSDavid Stevens 			goto rollback;
2125ac492b9cSDavid Stevens 		}
2126ac492b9cSDavid Stevens 	} else {
2127ac492b9cSDavid Stevens 		xas_lock_irq(&xas);
21282af8ff29SHugh Dickins 	}
212912904d95SJiaqi Yan 
2130b54d60b1SMatthew Wilcox (Oracle) 	folio = page_folio(hpage);
2131b54d60b1SMatthew Wilcox (Oracle) 	nr = folio_nr_pages(folio);
213212904d95SJiaqi Yan 	if (is_shmem)
2133b54d60b1SMatthew Wilcox (Oracle) 		__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr);
213412904d95SJiaqi Yan 	else
2135b54d60b1SMatthew Wilcox (Oracle) 		__lruvec_stat_mod_folio(folio, NR_FILE_THPS, nr);
213612904d95SJiaqi Yan 
213712904d95SJiaqi Yan 	if (nr_none) {
2138b54d60b1SMatthew Wilcox (Oracle) 		__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr_none);
213912904d95SJiaqi Yan 		/* nr_none is always 0 for non-shmem. */
2140b54d60b1SMatthew Wilcox (Oracle) 		__lruvec_stat_mod_folio(folio, NR_SHMEM, nr_none);
2141f3f0e1d2SKirill A. Shutemov 	}
2142f3f0e1d2SKirill A. Shutemov 
2143a2e17cc2SDavid Stevens 	/*
2144a2e17cc2SDavid Stevens 	 * Mark hpage as uptodate before inserting it into the page cache so
2145a2e17cc2SDavid Stevens 	 * that it isn't mistaken for an fallocated but unwritten page.
2146a2e17cc2SDavid Stevens 	 */
2147284a344eSVishal Moola (Oracle) 	folio_mark_uptodate(folio);
2148284a344eSVishal Moola (Oracle) 	folio_ref_add(folio, HPAGE_PMD_NR - 1);
2149284a344eSVishal Moola (Oracle) 
21506058eaecSJohannes Weiner 	if (is_shmem)
2151284a344eSVishal Moola (Oracle) 		folio_mark_dirty(folio);
2152284a344eSVishal Moola (Oracle) 	folio_add_lru(folio);
2153f3f0e1d2SKirill A. Shutemov 
2154a2e17cc2SDavid Stevens 	/* Join all the small entries into a single multi-index entry. */
2155a2e17cc2SDavid Stevens 	xas_set_order(&xas, start, HPAGE_PMD_ORDER);
2156b54d60b1SMatthew Wilcox (Oracle) 	xas_store(&xas, folio);
21570175ab61SHugh Dickins 	WARN_ON_ONCE(xas_error(&xas));
2158a2e17cc2SDavid Stevens 	xas_unlock_irq(&xas);
2159a2e17cc2SDavid Stevens 
2160042a3082SHugh Dickins 	/*
2161042a3082SHugh Dickins 	 * Remove pte page tables, so we can re-fault the page as huge.
21621d65b771SHugh Dickins 	 * If MADV_COLLAPSE, adjust result to call collapse_pte_mapped_thp().
2163042a3082SHugh Dickins 	 */
21641d65b771SHugh Dickins 	retract_page_tables(mapping, start);
21651d65b771SHugh Dickins 	if (cc && !cc->is_khugepaged)
21661d65b771SHugh Dickins 		result = SCAN_PTE_MAPPED_HUGEPAGE;
2167b54d60b1SMatthew Wilcox (Oracle) 	folio_unlock(folio);
2168ac492b9cSDavid Stevens 
2169ac492b9cSDavid Stevens 	/*
2170ac492b9cSDavid Stevens 	 * The collapse has succeeded, so free the old pages.
2171ac492b9cSDavid Stevens 	 */
2172ac492b9cSDavid Stevens 	list_for_each_entry_safe(page, tmp, &pagelist, lru) {
2173ac492b9cSDavid Stevens 		list_del(&page->lru);
2174ac492b9cSDavid Stevens 		page->mapping = NULL;
2175ac492b9cSDavid Stevens 		ClearPageActive(page);
2176ac492b9cSDavid Stevens 		ClearPageUnevictable(page);
2177ac492b9cSDavid Stevens 		unlock_page(page);
2178a2e17cc2SDavid Stevens 		folio_put_refs(page_folio(page), 3);
2179ac492b9cSDavid Stevens 	}
2180ac492b9cSDavid Stevens 
2181cae106ddSDavid Stevens 	goto out;
2182cae106ddSDavid Stevens 
2183cae106ddSDavid Stevens rollback:
218477da9389SMatthew Wilcox 	/* Something went wrong: roll back page cache changes */
21852f55f070SMiaohe Lin 	if (nr_none) {
2186a2e17cc2SDavid Stevens 		xas_lock_irq(&xas);
2187aaa52e34SHugh Dickins 		mapping->nrpages -= nr_none;
218877da9389SMatthew Wilcox 		xas_unlock_irq(&xas);
2189509f0069SHugh Dickins 		shmem_uncharge(mapping->host, nr_none);
2190a2e17cc2SDavid Stevens 	}
2191a2e17cc2SDavid Stevens 
2192a2e17cc2SDavid Stevens 	list_for_each_entry_safe(page, tmp, &pagelist, lru) {
2193a2e17cc2SDavid Stevens 		list_del(&page->lru);
2194f3f0e1d2SKirill A. Shutemov 		unlock_page(page);
2195042a3082SHugh Dickins 		putback_lru_page(page);
2196a2e17cc2SDavid Stevens 		put_page(page);
2197f3f0e1d2SKirill A. Shutemov 	}
219812904d95SJiaqi Yan 	/*
219912904d95SJiaqi Yan 	 * Undo the updates of filemap_nr_thps_inc for non-SHMEM
220012904d95SJiaqi Yan 	 * file only. This undo is not needed unless failure is
220112904d95SJiaqi Yan 	 * due to SCAN_COPY_MC.
220212904d95SJiaqi Yan 	 */
220312904d95SJiaqi Yan 	if (!is_shmem && result == SCAN_COPY_MC) {
220412904d95SJiaqi Yan 		filemap_nr_thps_dec(mapping);
220512904d95SJiaqi Yan 		/*
220612904d95SJiaqi Yan 		 * Paired with smp_mb() in do_dentry_open() to
220712904d95SJiaqi Yan 		 * ensure the update to nr_thps is visible.
220812904d95SJiaqi Yan 		 */
220912904d95SJiaqi Yan 		smp_mb();
221012904d95SJiaqi Yan 	}
221112904d95SJiaqi Yan 
221250ad2f24SZach O'Keefe 	hpage->mapping = NULL;
2213042a3082SHugh Dickins 
221450ad2f24SZach O'Keefe 	unlock_page(hpage);
2215cae106ddSDavid Stevens 	put_page(hpage);
2216f3f0e1d2SKirill A. Shutemov out:
2217f3f0e1d2SKirill A. Shutemov 	VM_BUG_ON(!list_empty(&pagelist));
22184c9473e8SGautam Menghani 	trace_mm_khugepaged_collapse_file(mm, hpage, index, is_shmem, addr, file, nr, result);
221950ad2f24SZach O'Keefe 	return result;
2220f3f0e1d2SKirill A. Shutemov }
2221f3f0e1d2SKirill A. Shutemov 
222234488399SZach O'Keefe static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
222334488399SZach O'Keefe 				    struct file *file, pgoff_t start,
222434488399SZach O'Keefe 				    struct collapse_control *cc)
2225f3f0e1d2SKirill A. Shutemov {
2226f3f0e1d2SKirill A. Shutemov 	struct page *page = NULL;
2227579c571eSSong Liu 	struct address_space *mapping = file->f_mapping;
222885b392dbSMatthew Wilcox 	XA_STATE(xas, &mapping->i_pages, start);
2229f3f0e1d2SKirill A. Shutemov 	int present, swap;
2230f3f0e1d2SKirill A. Shutemov 	int node = NUMA_NO_NODE;
2231f3f0e1d2SKirill A. Shutemov 	int result = SCAN_SUCCEED;
2232f3f0e1d2SKirill A. Shutemov 
2233f3f0e1d2SKirill A. Shutemov 	present = 0;
2234f3f0e1d2SKirill A. Shutemov 	swap = 0;
223534d6b470SZach O'Keefe 	memset(cc->node_load, 0, sizeof(cc->node_load));
2236e031ff96SYang Shi 	nodes_clear(cc->alloc_nmask);
2237f3f0e1d2SKirill A. Shutemov 	rcu_read_lock();
223885b392dbSMatthew Wilcox 	xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
223985b392dbSMatthew Wilcox 		if (xas_retry(&xas, page))
2240f3f0e1d2SKirill A. Shutemov 			continue;
2241f3f0e1d2SKirill A. Shutemov 
224285b392dbSMatthew Wilcox 		if (xa_is_value(page)) {
2243d8ea7cc8SZach O'Keefe 			++swap;
2244d8ea7cc8SZach O'Keefe 			if (cc->is_khugepaged &&
2245d8ea7cc8SZach O'Keefe 			    swap > khugepaged_max_ptes_swap) {
2246f3f0e1d2SKirill A. Shutemov 				result = SCAN_EXCEED_SWAP_PTE;
2247e9ea874aSYang Yang 				count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
2248f3f0e1d2SKirill A. Shutemov 				break;
2249f3f0e1d2SKirill A. Shutemov 			}
2250f3f0e1d2SKirill A. Shutemov 			continue;
2251f3f0e1d2SKirill A. Shutemov 		}
2252f3f0e1d2SKirill A. Shutemov 
22536b24ca4aSMatthew Wilcox (Oracle) 		/*
225458ac9a89SZach O'Keefe 		 * TODO: khugepaged should compact smaller compound pages
22556b24ca4aSMatthew Wilcox (Oracle) 		 * into a PMD sized page
22566b24ca4aSMatthew Wilcox (Oracle) 		 */
2257f3f0e1d2SKirill A. Shutemov 		if (PageTransCompound(page)) {
225858ac9a89SZach O'Keefe 			struct page *head = compound_head(page);
225958ac9a89SZach O'Keefe 
226058ac9a89SZach O'Keefe 			result = compound_order(head) == HPAGE_PMD_ORDER &&
226158ac9a89SZach O'Keefe 					head->index == start
226258ac9a89SZach O'Keefe 					/* Maybe PMD-mapped */
226358ac9a89SZach O'Keefe 					? SCAN_PTE_MAPPED_HUGEPAGE
226458ac9a89SZach O'Keefe 					: SCAN_PAGE_COMPOUND;
226558ac9a89SZach O'Keefe 			/*
226658ac9a89SZach O'Keefe 			 * For SCAN_PTE_MAPPED_HUGEPAGE, further processing
226758ac9a89SZach O'Keefe 			 * by the caller won't touch the page cache, and so
226858ac9a89SZach O'Keefe 			 * it's safe to skip LRU and refcount checks before
226958ac9a89SZach O'Keefe 			 * returning.
227058ac9a89SZach O'Keefe 			 */
2271f3f0e1d2SKirill A. Shutemov 			break;
2272f3f0e1d2SKirill A. Shutemov 		}
2273f3f0e1d2SKirill A. Shutemov 
2274f3f0e1d2SKirill A. Shutemov 		node = page_to_nid(page);
22757d2c4385SZach O'Keefe 		if (hpage_collapse_scan_abort(node, cc)) {
2276f3f0e1d2SKirill A. Shutemov 			result = SCAN_SCAN_ABORT;
2277f3f0e1d2SKirill A. Shutemov 			break;
2278f3f0e1d2SKirill A. Shutemov 		}
227934d6b470SZach O'Keefe 		cc->node_load[node]++;
2280f3f0e1d2SKirill A. Shutemov 
2281f3f0e1d2SKirill A. Shutemov 		if (!PageLRU(page)) {
2282f3f0e1d2SKirill A. Shutemov 			result = SCAN_PAGE_LRU;
2283f3f0e1d2SKirill A. Shutemov 			break;
2284f3f0e1d2SKirill A. Shutemov 		}
2285f3f0e1d2SKirill A. Shutemov 
228699cb0dbdSSong Liu 		if (page_count(page) !=
228799cb0dbdSSong Liu 		    1 + page_mapcount(page) + page_has_private(page)) {
2288f3f0e1d2SKirill A. Shutemov 			result = SCAN_PAGE_COUNT;
2289f3f0e1d2SKirill A. Shutemov 			break;
2290f3f0e1d2SKirill A. Shutemov 		}
2291f3f0e1d2SKirill A. Shutemov 
2292f3f0e1d2SKirill A. Shutemov 		/*
2293f3f0e1d2SKirill A. Shutemov 		 * We probably should check if the page is referenced here, but
2294f3f0e1d2SKirill A. Shutemov 		 * nobody would transfer pte_young() to PageReferenced() for us.
2295f3f0e1d2SKirill A. Shutemov 		 * And rmap walk here is just too costly...
2296f3f0e1d2SKirill A. Shutemov 		 */
2297f3f0e1d2SKirill A. Shutemov 
2298f3f0e1d2SKirill A. Shutemov 		present++;
2299f3f0e1d2SKirill A. Shutemov 
2300f3f0e1d2SKirill A. Shutemov 		if (need_resched()) {
230185b392dbSMatthew Wilcox 			xas_pause(&xas);
2302f3f0e1d2SKirill A. Shutemov 			cond_resched_rcu();
2303f3f0e1d2SKirill A. Shutemov 		}
2304f3f0e1d2SKirill A. Shutemov 	}
2305f3f0e1d2SKirill A. Shutemov 	rcu_read_unlock();
2306f3f0e1d2SKirill A. Shutemov 
2307f3f0e1d2SKirill A. Shutemov 	if (result == SCAN_SUCCEED) {
2308d8ea7cc8SZach O'Keefe 		if (cc->is_khugepaged &&
2309d8ea7cc8SZach O'Keefe 		    present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2310f3f0e1d2SKirill A. Shutemov 			result = SCAN_EXCEED_NONE_PTE;
2311e9ea874aSYang Yang 			count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
2312f3f0e1d2SKirill A. Shutemov 		} else {
231334488399SZach O'Keefe 			result = collapse_file(mm, addr, file, start, cc);
2314f3f0e1d2SKirill A. Shutemov 		}
2315f3f0e1d2SKirill A. Shutemov 	}
2316f3f0e1d2SKirill A. Shutemov 
2317045634ffSGautam Menghani 	trace_mm_khugepaged_scan_file(mm, page, file, present, swap, result);
231850ad2f24SZach O'Keefe 	return result;
2319f3f0e1d2SKirill A. Shutemov }
2320f3f0e1d2SKirill A. Shutemov #else
232134488399SZach O'Keefe static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
232234488399SZach O'Keefe 				    struct file *file, pgoff_t start,
232334488399SZach O'Keefe 				    struct collapse_control *cc)
2324f3f0e1d2SKirill A. Shutemov {
2325f3f0e1d2SKirill A. Shutemov 	BUILD_BUG();
2326f3f0e1d2SKirill A. Shutemov }
2327f3f0e1d2SKirill A. Shutemov #endif
2328f3f0e1d2SKirill A. Shutemov 
232950ad2f24SZach O'Keefe static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
233034d6b470SZach O'Keefe 					    struct collapse_control *cc)
2331b46e756fSKirill A. Shutemov 	__releases(&khugepaged_mm_lock)
2332b46e756fSKirill A. Shutemov 	__acquires(&khugepaged_mm_lock)
2333b46e756fSKirill A. Shutemov {
233468540502SMatthew Wilcox (Oracle) 	struct vma_iterator vmi;
2335b26e2701SQi Zheng 	struct khugepaged_mm_slot *mm_slot;
2336b26e2701SQi Zheng 	struct mm_slot *slot;
2337b46e756fSKirill A. Shutemov 	struct mm_struct *mm;
2338b46e756fSKirill A. Shutemov 	struct vm_area_struct *vma;
2339b46e756fSKirill A. Shutemov 	int progress = 0;
2340b46e756fSKirill A. Shutemov 
2341b46e756fSKirill A. Shutemov 	VM_BUG_ON(!pages);
234235f3aa39SLance Roy 	lockdep_assert_held(&khugepaged_mm_lock);
234350ad2f24SZach O'Keefe 	*result = SCAN_FAIL;
2344b46e756fSKirill A. Shutemov 
2345b26e2701SQi Zheng 	if (khugepaged_scan.mm_slot) {
2346b46e756fSKirill A. Shutemov 		mm_slot = khugepaged_scan.mm_slot;
2347b26e2701SQi Zheng 		slot = &mm_slot->slot;
2348b26e2701SQi Zheng 	} else {
2349b26e2701SQi Zheng 		slot = list_entry(khugepaged_scan.mm_head.next,
2350b46e756fSKirill A. Shutemov 				     struct mm_slot, mm_node);
2351b26e2701SQi Zheng 		mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
2352b46e756fSKirill A. Shutemov 		khugepaged_scan.address = 0;
2353b46e756fSKirill A. Shutemov 		khugepaged_scan.mm_slot = mm_slot;
2354b46e756fSKirill A. Shutemov 	}
2355b46e756fSKirill A. Shutemov 	spin_unlock(&khugepaged_mm_lock);
2356b46e756fSKirill A. Shutemov 
2357b26e2701SQi Zheng 	mm = slot->mm;
23583b454ad3SYang Shi 	/*
23593b454ad3SYang Shi 	 * Don't wait for semaphore (to avoid long wait times).  Just move to
23603b454ad3SYang Shi 	 * the next mm on the list.
23613b454ad3SYang Shi 	 */
2362b46e756fSKirill A. Shutemov 	vma = NULL;
2363d8ed45c5SMichel Lespinasse 	if (unlikely(!mmap_read_trylock(mm)))
2364c1e8d7c6SMichel Lespinasse 		goto breakouterloop_mmap_lock;
2365b46e756fSKirill A. Shutemov 
2366b46e756fSKirill A. Shutemov 	progress++;
2367879c6000SLance Yang 	if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
236868540502SMatthew Wilcox (Oracle) 		goto breakouterloop;
236968540502SMatthew Wilcox (Oracle) 
237068540502SMatthew Wilcox (Oracle) 	vma_iter_init(&vmi, mm, khugepaged_scan.address);
237168540502SMatthew Wilcox (Oracle) 	for_each_vma(vmi, vma) {
2372b46e756fSKirill A. Shutemov 		unsigned long hstart, hend;
2373b46e756fSKirill A. Shutemov 
2374b46e756fSKirill A. Shutemov 		cond_resched();
2375879c6000SLance Yang 		if (unlikely(hpage_collapse_test_exit_or_disable(mm))) {
2376b46e756fSKirill A. Shutemov 			progress++;
2377b46e756fSKirill A. Shutemov 			break;
2378b46e756fSKirill A. Shutemov 		}
23793485b883SRyan Roberts 		if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
23803485b883SRyan Roberts 					     true, PMD_ORDER)) {
2381b46e756fSKirill A. Shutemov skip:
2382b46e756fSKirill A. Shutemov 			progress++;
2383b46e756fSKirill A. Shutemov 			continue;
2384b46e756fSKirill A. Shutemov 		}
23854fa6893fSYang Shi 		hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE);
23864fa6893fSYang Shi 		hend = round_down(vma->vm_end, HPAGE_PMD_SIZE);
2387b46e756fSKirill A. Shutemov 		if (khugepaged_scan.address > hend)
2388b46e756fSKirill A. Shutemov 			goto skip;
2389b46e756fSKirill A. Shutemov 		if (khugepaged_scan.address < hstart)
2390b46e756fSKirill A. Shutemov 			khugepaged_scan.address = hstart;
2391b46e756fSKirill A. Shutemov 		VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2392b46e756fSKirill A. Shutemov 
2393b46e756fSKirill A. Shutemov 		while (khugepaged_scan.address < hend) {
239450ad2f24SZach O'Keefe 			bool mmap_locked = true;
239550ad2f24SZach O'Keefe 
2396b46e756fSKirill A. Shutemov 			cond_resched();
2397879c6000SLance Yang 			if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
2398b46e756fSKirill A. Shutemov 				goto breakouterloop;
2399b46e756fSKirill A. Shutemov 
2400b46e756fSKirill A. Shutemov 			VM_BUG_ON(khugepaged_scan.address < hstart ||
2401b46e756fSKirill A. Shutemov 				  khugepaged_scan.address + HPAGE_PMD_SIZE >
2402b46e756fSKirill A. Shutemov 				  hend);
240399cb0dbdSSong Liu 			if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2404396bcc52SMatthew Wilcox (Oracle) 				struct file *file = get_file(vma->vm_file);
2405f3f0e1d2SKirill A. Shutemov 				pgoff_t pgoff = linear_page_index(vma,
2406f3f0e1d2SKirill A. Shutemov 						khugepaged_scan.address);
240799cb0dbdSSong Liu 
2408d8ed45c5SMichel Lespinasse 				mmap_read_unlock(mm);
240950ad2f24SZach O'Keefe 				mmap_locked = false;
2410d50791c2SHugh Dickins 				*result = hpage_collapse_scan_file(mm,
2411d50791c2SHugh Dickins 					khugepaged_scan.address, file, pgoff, cc);
2412f3f0e1d2SKirill A. Shutemov 				fput(file);
2413d50791c2SHugh Dickins 				if (*result == SCAN_PTE_MAPPED_HUGEPAGE) {
2414d50791c2SHugh Dickins 					mmap_read_lock(mm);
2415879c6000SLance Yang 					if (hpage_collapse_test_exit_or_disable(mm))
2416d50791c2SHugh Dickins 						goto breakouterloop;
2417d50791c2SHugh Dickins 					*result = collapse_pte_mapped_thp(mm,
2418d50791c2SHugh Dickins 						khugepaged_scan.address, false);
2419d50791c2SHugh Dickins 					if (*result == SCAN_PMD_MAPPED)
2420d50791c2SHugh Dickins 						*result = SCAN_SUCCEED;
2421d50791c2SHugh Dickins 					mmap_read_unlock(mm);
2422d50791c2SHugh Dickins 				}
2423f3f0e1d2SKirill A. Shutemov 			} else {
24247d2c4385SZach O'Keefe 				*result = hpage_collapse_scan_pmd(mm, vma,
2425d50791c2SHugh Dickins 					khugepaged_scan.address, &mmap_locked, cc);
2426f3f0e1d2SKirill A. Shutemov 			}
242758ac9a89SZach O'Keefe 
2428d50791c2SHugh Dickins 			if (*result == SCAN_SUCCEED)
242950ad2f24SZach O'Keefe 				++khugepaged_pages_collapsed;
243058ac9a89SZach O'Keefe 
2431b46e756fSKirill A. Shutemov 			/* move to next address */
2432b46e756fSKirill A. Shutemov 			khugepaged_scan.address += HPAGE_PMD_SIZE;
2433b46e756fSKirill A. Shutemov 			progress += HPAGE_PMD_NR;
243450ad2f24SZach O'Keefe 			if (!mmap_locked)
243550ad2f24SZach O'Keefe 				/*
243650ad2f24SZach O'Keefe 				 * We released mmap_lock so break loop.  Note
243750ad2f24SZach O'Keefe 				 * that we drop mmap_lock before all hugepage
243850ad2f24SZach O'Keefe 				 * allocations, so if allocation fails, we are
243950ad2f24SZach O'Keefe 				 * guaranteed to break here and report the
244050ad2f24SZach O'Keefe 				 * correct result back to caller.
244150ad2f24SZach O'Keefe 				 */
2442c1e8d7c6SMichel Lespinasse 				goto breakouterloop_mmap_lock;
2443b46e756fSKirill A. Shutemov 			if (progress >= pages)
2444b46e756fSKirill A. Shutemov 				goto breakouterloop;
2445b46e756fSKirill A. Shutemov 		}
2446b46e756fSKirill A. Shutemov 	}
2447b46e756fSKirill A. Shutemov breakouterloop:
2448d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
2449c1e8d7c6SMichel Lespinasse breakouterloop_mmap_lock:
2450b46e756fSKirill A. Shutemov 
2451b46e756fSKirill A. Shutemov 	spin_lock(&khugepaged_mm_lock);
2452b46e756fSKirill A. Shutemov 	VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2453b46e756fSKirill A. Shutemov 	/*
2454b46e756fSKirill A. Shutemov 	 * Release the current mm_slot if this mm is about to die, or
2455b46e756fSKirill A. Shutemov 	 * if we scanned all vmas of this mm.
2456b46e756fSKirill A. Shutemov 	 */
24575dad6048SLance Yang 	if (hpage_collapse_test_exit(mm) || !vma) {
2458b46e756fSKirill A. Shutemov 		/*
2459b46e756fSKirill A. Shutemov 		 * Make sure that if mm_users is reaching zero while
2460b46e756fSKirill A. Shutemov 		 * khugepaged runs here, khugepaged_exit will find
2461b46e756fSKirill A. Shutemov 		 * mm_slot not pointing to the exiting mm.
2462b46e756fSKirill A. Shutemov 		 */
2463b26e2701SQi Zheng 		if (slot->mm_node.next != &khugepaged_scan.mm_head) {
2464b26e2701SQi Zheng 			slot = list_entry(slot->mm_node.next,
2465b46e756fSKirill A. Shutemov 					  struct mm_slot, mm_node);
2466b26e2701SQi Zheng 			khugepaged_scan.mm_slot =
2467b26e2701SQi Zheng 				mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
2468b46e756fSKirill A. Shutemov 			khugepaged_scan.address = 0;
2469b46e756fSKirill A. Shutemov 		} else {
2470b46e756fSKirill A. Shutemov 			khugepaged_scan.mm_slot = NULL;
2471b46e756fSKirill A. Shutemov 			khugepaged_full_scans++;
2472b46e756fSKirill A. Shutemov 		}
2473b46e756fSKirill A. Shutemov 
2474b46e756fSKirill A. Shutemov 		collect_mm_slot(mm_slot);
2475b46e756fSKirill A. Shutemov 	}
2476b46e756fSKirill A. Shutemov 
2477b46e756fSKirill A. Shutemov 	return progress;
2478b46e756fSKirill A. Shutemov }
2479b46e756fSKirill A. Shutemov 
2480b46e756fSKirill A. Shutemov static int khugepaged_has_work(void)
2481b46e756fSKirill A. Shutemov {
2482b46e756fSKirill A. Shutemov 	return !list_empty(&khugepaged_scan.mm_head) &&
24831064026bSYang Shi 		hugepage_flags_enabled();
2484b46e756fSKirill A. Shutemov }
2485b46e756fSKirill A. Shutemov 
2486b46e756fSKirill A. Shutemov static int khugepaged_wait_event(void)
2487b46e756fSKirill A. Shutemov {
2488b46e756fSKirill A. Shutemov 	return !list_empty(&khugepaged_scan.mm_head) ||
2489b46e756fSKirill A. Shutemov 		kthread_should_stop();
2490b46e756fSKirill A. Shutemov }
2491b46e756fSKirill A. Shutemov 
249234d6b470SZach O'Keefe static void khugepaged_do_scan(struct collapse_control *cc)
2493b46e756fSKirill A. Shutemov {
2494b46e756fSKirill A. Shutemov 	unsigned int progress = 0, pass_through_head = 0;
249589dc6a96SYanfei Xu 	unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
2496b46e756fSKirill A. Shutemov 	bool wait = true;
249750ad2f24SZach O'Keefe 	int result = SCAN_SUCCEED;
2498b46e756fSKirill A. Shutemov 
2499a980df33SKirill A. Shutemov 	lru_add_drain_all();
2500a980df33SKirill A. Shutemov 
2501c6a7f445SYang Shi 	while (true) {
2502b46e756fSKirill A. Shutemov 		cond_resched();
2503b46e756fSKirill A. Shutemov 
2504b39ca208SKevin Hao 		if (unlikely(kthread_should_stop()))
2505b46e756fSKirill A. Shutemov 			break;
2506b46e756fSKirill A. Shutemov 
2507b46e756fSKirill A. Shutemov 		spin_lock(&khugepaged_mm_lock);
2508b46e756fSKirill A. Shutemov 		if (!khugepaged_scan.mm_slot)
2509b46e756fSKirill A. Shutemov 			pass_through_head++;
2510b46e756fSKirill A. Shutemov 		if (khugepaged_has_work() &&
2511b46e756fSKirill A. Shutemov 		    pass_through_head < 2)
2512b46e756fSKirill A. Shutemov 			progress += khugepaged_scan_mm_slot(pages - progress,
251350ad2f24SZach O'Keefe 							    &result, cc);
2514b46e756fSKirill A. Shutemov 		else
2515b46e756fSKirill A. Shutemov 			progress = pages;
2516b46e756fSKirill A. Shutemov 		spin_unlock(&khugepaged_mm_lock);
2517b46e756fSKirill A. Shutemov 
2518c6a7f445SYang Shi 		if (progress >= pages)
2519c6a7f445SYang Shi 			break;
2520c6a7f445SYang Shi 
252150ad2f24SZach O'Keefe 		if (result == SCAN_ALLOC_HUGE_PAGE_FAIL) {
2522c6a7f445SYang Shi 			/*
2523c6a7f445SYang Shi 			 * If fail to allocate the first time, try to sleep for
2524c6a7f445SYang Shi 			 * a while.  When hit again, cancel the scan.
2525c6a7f445SYang Shi 			 */
2526c6a7f445SYang Shi 			if (!wait)
2527c6a7f445SYang Shi 				break;
2528c6a7f445SYang Shi 			wait = false;
2529c6a7f445SYang Shi 			khugepaged_alloc_sleep();
2530c6a7f445SYang Shi 		}
2531c6a7f445SYang Shi 	}
2532b46e756fSKirill A. Shutemov }
2533b46e756fSKirill A. Shutemov 
2534b46e756fSKirill A. Shutemov static bool khugepaged_should_wakeup(void)
2535b46e756fSKirill A. Shutemov {
2536b46e756fSKirill A. Shutemov 	return kthread_should_stop() ||
2537b46e756fSKirill A. Shutemov 	       time_after_eq(jiffies, khugepaged_sleep_expire);
2538b46e756fSKirill A. Shutemov }
2539b46e756fSKirill A. Shutemov 
2540b46e756fSKirill A. Shutemov static void khugepaged_wait_work(void)
2541b46e756fSKirill A. Shutemov {
2542b46e756fSKirill A. Shutemov 	if (khugepaged_has_work()) {
2543b46e756fSKirill A. Shutemov 		const unsigned long scan_sleep_jiffies =
2544b46e756fSKirill A. Shutemov 			msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2545b46e756fSKirill A. Shutemov 
2546b46e756fSKirill A. Shutemov 		if (!scan_sleep_jiffies)
2547b46e756fSKirill A. Shutemov 			return;
2548b46e756fSKirill A. Shutemov 
2549b46e756fSKirill A. Shutemov 		khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2550b46e756fSKirill A. Shutemov 		wait_event_freezable_timeout(khugepaged_wait,
2551b46e756fSKirill A. Shutemov 					     khugepaged_should_wakeup(),
2552b46e756fSKirill A. Shutemov 					     scan_sleep_jiffies);
2553b46e756fSKirill A. Shutemov 		return;
2554b46e756fSKirill A. Shutemov 	}
2555b46e756fSKirill A. Shutemov 
25561064026bSYang Shi 	if (hugepage_flags_enabled())
2557b46e756fSKirill A. Shutemov 		wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2558b46e756fSKirill A. Shutemov }
2559b46e756fSKirill A. Shutemov 
2560b46e756fSKirill A. Shutemov static int khugepaged(void *none)
2561b46e756fSKirill A. Shutemov {
2562b26e2701SQi Zheng 	struct khugepaged_mm_slot *mm_slot;
2563b46e756fSKirill A. Shutemov 
2564b46e756fSKirill A. Shutemov 	set_freezable();
2565b46e756fSKirill A. Shutemov 	set_user_nice(current, MAX_NICE);
2566b46e756fSKirill A. Shutemov 
2567b46e756fSKirill A. Shutemov 	while (!kthread_should_stop()) {
256834d6b470SZach O'Keefe 		khugepaged_do_scan(&khugepaged_collapse_control);
2569b46e756fSKirill A. Shutemov 		khugepaged_wait_work();
2570b46e756fSKirill A. Shutemov 	}
2571b46e756fSKirill A. Shutemov 
2572b46e756fSKirill A. Shutemov 	spin_lock(&khugepaged_mm_lock);
2573b46e756fSKirill A. Shutemov 	mm_slot = khugepaged_scan.mm_slot;
2574b46e756fSKirill A. Shutemov 	khugepaged_scan.mm_slot = NULL;
2575b46e756fSKirill A. Shutemov 	if (mm_slot)
2576b46e756fSKirill A. Shutemov 		collect_mm_slot(mm_slot);
2577b46e756fSKirill A. Shutemov 	spin_unlock(&khugepaged_mm_lock);
2578b46e756fSKirill A. Shutemov 	return 0;
2579b46e756fSKirill A. Shutemov }
2580b46e756fSKirill A. Shutemov 
2581b46e756fSKirill A. Shutemov static void set_recommended_min_free_kbytes(void)
2582b46e756fSKirill A. Shutemov {
2583b46e756fSKirill A. Shutemov 	struct zone *zone;
2584b46e756fSKirill A. Shutemov 	int nr_zones = 0;
2585b46e756fSKirill A. Shutemov 	unsigned long recommended_min;
2586b46e756fSKirill A. Shutemov 
25871064026bSYang Shi 	if (!hugepage_flags_enabled()) {
2588bd3400eaSLiangcai Fan 		calculate_min_free_kbytes();
2589bd3400eaSLiangcai Fan 		goto update_wmarks;
2590bd3400eaSLiangcai Fan 	}
2591bd3400eaSLiangcai Fan 
2592b7d349c7SJoonsoo Kim 	for_each_populated_zone(zone) {
2593b7d349c7SJoonsoo Kim 		/*
2594b7d349c7SJoonsoo Kim 		 * We don't need to worry about fragmentation of
2595b7d349c7SJoonsoo Kim 		 * ZONE_MOVABLE since it only has movable pages.
2596b7d349c7SJoonsoo Kim 		 */
2597b7d349c7SJoonsoo Kim 		if (zone_idx(zone) > gfp_zone(GFP_USER))
2598b7d349c7SJoonsoo Kim 			continue;
2599b7d349c7SJoonsoo Kim 
2600b46e756fSKirill A. Shutemov 		nr_zones++;
2601b7d349c7SJoonsoo Kim 	}
2602b46e756fSKirill A. Shutemov 
2603b46e756fSKirill A. Shutemov 	/* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2604b46e756fSKirill A. Shutemov 	recommended_min = pageblock_nr_pages * nr_zones * 2;
2605b46e756fSKirill A. Shutemov 
2606b46e756fSKirill A. Shutemov 	/*
2607b46e756fSKirill A. Shutemov 	 * Make sure that on average at least two pageblocks are almost free
2608b46e756fSKirill A. Shutemov 	 * of another type, one for a migratetype to fall back to and a
2609b46e756fSKirill A. Shutemov 	 * second to avoid subsequent fallbacks of other types There are 3
2610b46e756fSKirill A. Shutemov 	 * MIGRATE_TYPES we care about.
2611b46e756fSKirill A. Shutemov 	 */
2612b46e756fSKirill A. Shutemov 	recommended_min += pageblock_nr_pages * nr_zones *
2613b46e756fSKirill A. Shutemov 			   MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2614b46e756fSKirill A. Shutemov 
2615b46e756fSKirill A. Shutemov 	/* don't ever allow to reserve more than 5% of the lowmem */
2616b46e756fSKirill A. Shutemov 	recommended_min = min(recommended_min,
2617b46e756fSKirill A. Shutemov 			      (unsigned long) nr_free_buffer_pages() / 20);
2618b46e756fSKirill A. Shutemov 	recommended_min <<= (PAGE_SHIFT-10);
2619b46e756fSKirill A. Shutemov 
2620b46e756fSKirill A. Shutemov 	if (recommended_min > min_free_kbytes) {
2621b46e756fSKirill A. Shutemov 		if (user_min_free_kbytes >= 0)
2622b46e756fSKirill A. Shutemov 			pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2623b46e756fSKirill A. Shutemov 				min_free_kbytes, recommended_min);
2624b46e756fSKirill A. Shutemov 
2625b46e756fSKirill A. Shutemov 		min_free_kbytes = recommended_min;
2626b46e756fSKirill A. Shutemov 	}
2627bd3400eaSLiangcai Fan 
2628bd3400eaSLiangcai Fan update_wmarks:
2629b46e756fSKirill A. Shutemov 	setup_per_zone_wmarks();
2630b46e756fSKirill A. Shutemov }
2631b46e756fSKirill A. Shutemov 
2632b46e756fSKirill A. Shutemov int start_stop_khugepaged(void)
2633b46e756fSKirill A. Shutemov {
2634b46e756fSKirill A. Shutemov 	int err = 0;
2635b46e756fSKirill A. Shutemov 
2636b46e756fSKirill A. Shutemov 	mutex_lock(&khugepaged_mutex);
26371064026bSYang Shi 	if (hugepage_flags_enabled()) {
2638b46e756fSKirill A. Shutemov 		if (!khugepaged_thread)
2639b46e756fSKirill A. Shutemov 			khugepaged_thread = kthread_run(khugepaged, NULL,
2640b46e756fSKirill A. Shutemov 							"khugepaged");
2641b46e756fSKirill A. Shutemov 		if (IS_ERR(khugepaged_thread)) {
2642b46e756fSKirill A. Shutemov 			pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2643b46e756fSKirill A. Shutemov 			err = PTR_ERR(khugepaged_thread);
2644b46e756fSKirill A. Shutemov 			khugepaged_thread = NULL;
2645b46e756fSKirill A. Shutemov 			goto fail;
2646b46e756fSKirill A. Shutemov 		}
2647b46e756fSKirill A. Shutemov 
2648b46e756fSKirill A. Shutemov 		if (!list_empty(&khugepaged_scan.mm_head))
2649b46e756fSKirill A. Shutemov 			wake_up_interruptible(&khugepaged_wait);
2650b46e756fSKirill A. Shutemov 	} else if (khugepaged_thread) {
2651b46e756fSKirill A. Shutemov 		kthread_stop(khugepaged_thread);
2652b46e756fSKirill A. Shutemov 		khugepaged_thread = NULL;
2653b46e756fSKirill A. Shutemov 	}
2654bd3400eaSLiangcai Fan 	set_recommended_min_free_kbytes();
2655b46e756fSKirill A. Shutemov fail:
2656b46e756fSKirill A. Shutemov 	mutex_unlock(&khugepaged_mutex);
2657b46e756fSKirill A. Shutemov 	return err;
2658b46e756fSKirill A. Shutemov }
26594aab2be0SVijay Balakrishna 
26604aab2be0SVijay Balakrishna void khugepaged_min_free_kbytes_update(void)
26614aab2be0SVijay Balakrishna {
26624aab2be0SVijay Balakrishna 	mutex_lock(&khugepaged_mutex);
26631064026bSYang Shi 	if (hugepage_flags_enabled() && khugepaged_thread)
26644aab2be0SVijay Balakrishna 		set_recommended_min_free_kbytes();
26654aab2be0SVijay Balakrishna 	mutex_unlock(&khugepaged_mutex);
26664aab2be0SVijay Balakrishna }
26677d8faaf1SZach O'Keefe 
266857e9cc50SJohannes Weiner bool current_is_khugepaged(void)
266957e9cc50SJohannes Weiner {
267057e9cc50SJohannes Weiner 	return kthread_func(current) == khugepaged;
267157e9cc50SJohannes Weiner }
267257e9cc50SJohannes Weiner 
26737d8faaf1SZach O'Keefe static int madvise_collapse_errno(enum scan_result r)
26747d8faaf1SZach O'Keefe {
26757d8faaf1SZach O'Keefe 	/*
26767d8faaf1SZach O'Keefe 	 * MADV_COLLAPSE breaks from existing madvise(2) conventions to provide
26777d8faaf1SZach O'Keefe 	 * actionable feedback to caller, so they may take an appropriate
26787d8faaf1SZach O'Keefe 	 * fallback measure depending on the nature of the failure.
26797d8faaf1SZach O'Keefe 	 */
26807d8faaf1SZach O'Keefe 	switch (r) {
26817d8faaf1SZach O'Keefe 	case SCAN_ALLOC_HUGE_PAGE_FAIL:
26827d8faaf1SZach O'Keefe 		return -ENOMEM;
26837d8faaf1SZach O'Keefe 	case SCAN_CGROUP_CHARGE_FAIL:
2684ac492b9cSDavid Stevens 	case SCAN_EXCEED_NONE_PTE:
26857d8faaf1SZach O'Keefe 		return -EBUSY;
26867d8faaf1SZach O'Keefe 	/* Resource temporary unavailable - trying again might succeed */
2687ae63c898SZach O'Keefe 	case SCAN_PAGE_COUNT:
26887d8faaf1SZach O'Keefe 	case SCAN_PAGE_LOCK:
26897d8faaf1SZach O'Keefe 	case SCAN_PAGE_LRU:
26900f3e2a2cSZach O'Keefe 	case SCAN_DEL_PAGE_LRU:
2691ac492b9cSDavid Stevens 	case SCAN_PAGE_FILLED:
26927d8faaf1SZach O'Keefe 		return -EAGAIN;
26937d8faaf1SZach O'Keefe 	/*
26947d8faaf1SZach O'Keefe 	 * Other: Trying again likely not to succeed / error intrinsic to
26957d8faaf1SZach O'Keefe 	 * specified memory range. khugepaged likely won't be able to collapse
26967d8faaf1SZach O'Keefe 	 * either.
26977d8faaf1SZach O'Keefe 	 */
26987d8faaf1SZach O'Keefe 	default:
26997d8faaf1SZach O'Keefe 		return -EINVAL;
27007d8faaf1SZach O'Keefe 	}
27017d8faaf1SZach O'Keefe }
27027d8faaf1SZach O'Keefe 
27037d8faaf1SZach O'Keefe int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
27047d8faaf1SZach O'Keefe 		     unsigned long start, unsigned long end)
27057d8faaf1SZach O'Keefe {
27067d8faaf1SZach O'Keefe 	struct collapse_control *cc;
27077d8faaf1SZach O'Keefe 	struct mm_struct *mm = vma->vm_mm;
27087d8faaf1SZach O'Keefe 	unsigned long hstart, hend, addr;
27097d8faaf1SZach O'Keefe 	int thps = 0, last_fail = SCAN_FAIL;
27107d8faaf1SZach O'Keefe 	bool mmap_locked = true;
27117d8faaf1SZach O'Keefe 
27127d8faaf1SZach O'Keefe 	BUG_ON(vma->vm_start > start);
27137d8faaf1SZach O'Keefe 	BUG_ON(vma->vm_end < end);
27147d8faaf1SZach O'Keefe 
27157d8faaf1SZach O'Keefe 	*prev = vma;
27167d8faaf1SZach O'Keefe 
27173485b883SRyan Roberts 	if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
27183485b883SRyan Roberts 				     PMD_ORDER))
27197d8faaf1SZach O'Keefe 		return -EINVAL;
27207d8faaf1SZach O'Keefe 
27217d8faaf1SZach O'Keefe 	cc = kmalloc(sizeof(*cc), GFP_KERNEL);
27227d8faaf1SZach O'Keefe 	if (!cc)
27237d8faaf1SZach O'Keefe 		return -ENOMEM;
27247d8faaf1SZach O'Keefe 	cc->is_khugepaged = false;
27257d8faaf1SZach O'Keefe 
27267d8faaf1SZach O'Keefe 	mmgrab(mm);
27277d8faaf1SZach O'Keefe 	lru_add_drain_all();
27287d8faaf1SZach O'Keefe 
27297d8faaf1SZach O'Keefe 	hstart = (start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
27307d8faaf1SZach O'Keefe 	hend = end & HPAGE_PMD_MASK;
27317d8faaf1SZach O'Keefe 
27327d8faaf1SZach O'Keefe 	for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) {
27337d8faaf1SZach O'Keefe 		int result = SCAN_FAIL;
27347d8faaf1SZach O'Keefe 
27357d8faaf1SZach O'Keefe 		if (!mmap_locked) {
27367d8faaf1SZach O'Keefe 			cond_resched();
27377d8faaf1SZach O'Keefe 			mmap_read_lock(mm);
27387d8faaf1SZach O'Keefe 			mmap_locked = true;
273934488399SZach O'Keefe 			result = hugepage_vma_revalidate(mm, addr, false, &vma,
274034488399SZach O'Keefe 							 cc);
27417d8faaf1SZach O'Keefe 			if (result  != SCAN_SUCCEED) {
27427d8faaf1SZach O'Keefe 				last_fail = result;
27437d8faaf1SZach O'Keefe 				goto out_nolock;
27447d8faaf1SZach O'Keefe 			}
27454d24de94SYang Shi 
274652dc0310SZach O'Keefe 			hend = min(hend, vma->vm_end & HPAGE_PMD_MASK);
27477d8faaf1SZach O'Keefe 		}
27487d8faaf1SZach O'Keefe 		mmap_assert_locked(mm);
27497d8faaf1SZach O'Keefe 		memset(cc->node_load, 0, sizeof(cc->node_load));
2750e031ff96SYang Shi 		nodes_clear(cc->alloc_nmask);
275134488399SZach O'Keefe 		if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
275234488399SZach O'Keefe 			struct file *file = get_file(vma->vm_file);
275334488399SZach O'Keefe 			pgoff_t pgoff = linear_page_index(vma, addr);
275434488399SZach O'Keefe 
275534488399SZach O'Keefe 			mmap_read_unlock(mm);
275634488399SZach O'Keefe 			mmap_locked = false;
275734488399SZach O'Keefe 			result = hpage_collapse_scan_file(mm, addr, file, pgoff,
27587d2c4385SZach O'Keefe 							  cc);
275934488399SZach O'Keefe 			fput(file);
276034488399SZach O'Keefe 		} else {
276134488399SZach O'Keefe 			result = hpage_collapse_scan_pmd(mm, vma, addr,
276234488399SZach O'Keefe 							 &mmap_locked, cc);
276334488399SZach O'Keefe 		}
27647d8faaf1SZach O'Keefe 		if (!mmap_locked)
27657d8faaf1SZach O'Keefe 			*prev = NULL;  /* Tell caller we dropped mmap_lock */
27667d8faaf1SZach O'Keefe 
276734488399SZach O'Keefe handle_result:
27687d8faaf1SZach O'Keefe 		switch (result) {
27697d8faaf1SZach O'Keefe 		case SCAN_SUCCEED:
27707d8faaf1SZach O'Keefe 		case SCAN_PMD_MAPPED:
27717d8faaf1SZach O'Keefe 			++thps;
27727d8faaf1SZach O'Keefe 			break;
277334488399SZach O'Keefe 		case SCAN_PTE_MAPPED_HUGEPAGE:
277434488399SZach O'Keefe 			BUG_ON(mmap_locked);
277534488399SZach O'Keefe 			BUG_ON(*prev);
27761043173eSHugh Dickins 			mmap_read_lock(mm);
277734488399SZach O'Keefe 			result = collapse_pte_mapped_thp(mm, addr, true);
27781043173eSHugh Dickins 			mmap_read_unlock(mm);
277934488399SZach O'Keefe 			goto handle_result;
27807d8faaf1SZach O'Keefe 		/* Whitelisted set of results where continuing OK */
27817d8faaf1SZach O'Keefe 		case SCAN_PMD_NULL:
27827d8faaf1SZach O'Keefe 		case SCAN_PTE_NON_PRESENT:
27837d8faaf1SZach O'Keefe 		case SCAN_PTE_UFFD_WP:
27847d8faaf1SZach O'Keefe 		case SCAN_PAGE_RO:
27857d8faaf1SZach O'Keefe 		case SCAN_LACK_REFERENCED_PAGE:
27867d8faaf1SZach O'Keefe 		case SCAN_PAGE_NULL:
27877d8faaf1SZach O'Keefe 		case SCAN_PAGE_COUNT:
27887d8faaf1SZach O'Keefe 		case SCAN_PAGE_LOCK:
27897d8faaf1SZach O'Keefe 		case SCAN_PAGE_COMPOUND:
27907d8faaf1SZach O'Keefe 		case SCAN_PAGE_LRU:
27910f3e2a2cSZach O'Keefe 		case SCAN_DEL_PAGE_LRU:
27927d8faaf1SZach O'Keefe 			last_fail = result;
27937d8faaf1SZach O'Keefe 			break;
27947d8faaf1SZach O'Keefe 		default:
27957d8faaf1SZach O'Keefe 			last_fail = result;
27967d8faaf1SZach O'Keefe 			/* Other error, exit */
27977d8faaf1SZach O'Keefe 			goto out_maybelock;
27987d8faaf1SZach O'Keefe 		}
27997d8faaf1SZach O'Keefe 	}
28007d8faaf1SZach O'Keefe 
28017d8faaf1SZach O'Keefe out_maybelock:
28027d8faaf1SZach O'Keefe 	/* Caller expects us to hold mmap_lock on return */
28037d8faaf1SZach O'Keefe 	if (!mmap_locked)
28047d8faaf1SZach O'Keefe 		mmap_read_lock(mm);
28057d8faaf1SZach O'Keefe out_nolock:
28067d8faaf1SZach O'Keefe 	mmap_assert_locked(mm);
28077d8faaf1SZach O'Keefe 	mmdrop(mm);
28087d8faaf1SZach O'Keefe 	kfree(cc);
28097d8faaf1SZach O'Keefe 
28107d8faaf1SZach O'Keefe 	return thps == ((hend - hstart) >> HPAGE_PMD_SHIFT) ? 0
28117d8faaf1SZach O'Keefe 			: madvise_collapse_errno(last_fail);
28127d8faaf1SZach O'Keefe }
2813