xref: /linux/mm/khugepaged.c (revision 7d2c4385c3417cab8c08ac4c86a3852b9a851980)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2b46e756fSKirill A. Shutemov #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3b46e756fSKirill A. Shutemov 
4b46e756fSKirill A. Shutemov #include <linux/mm.h>
5b46e756fSKirill A. Shutemov #include <linux/sched.h>
66e84f315SIngo Molnar #include <linux/sched/mm.h>
7f7ccbae4SIngo Molnar #include <linux/sched/coredump.h>
8b46e756fSKirill A. Shutemov #include <linux/mmu_notifier.h>
9b46e756fSKirill A. Shutemov #include <linux/rmap.h>
10b46e756fSKirill A. Shutemov #include <linux/swap.h>
11b46e756fSKirill A. Shutemov #include <linux/mm_inline.h>
12b46e756fSKirill A. Shutemov #include <linux/kthread.h>
13b46e756fSKirill A. Shutemov #include <linux/khugepaged.h>
14b46e756fSKirill A. Shutemov #include <linux/freezer.h>
15b46e756fSKirill A. Shutemov #include <linux/mman.h>
16b46e756fSKirill A. Shutemov #include <linux/hashtable.h>
17b46e756fSKirill A. Shutemov #include <linux/userfaultfd_k.h>
18b46e756fSKirill A. Shutemov #include <linux/page_idle.h>
1980110bbfSPasha Tatashin #include <linux/page_table_check.h>
20b46e756fSKirill A. Shutemov #include <linux/swapops.h>
21f3f0e1d2SKirill A. Shutemov #include <linux/shmem_fs.h>
22b46e756fSKirill A. Shutemov 
23b46e756fSKirill A. Shutemov #include <asm/tlb.h>
24b46e756fSKirill A. Shutemov #include <asm/pgalloc.h>
25b46e756fSKirill A. Shutemov #include "internal.h"
26b46e756fSKirill A. Shutemov 
27b46e756fSKirill A. Shutemov enum scan_result {
28b46e756fSKirill A. Shutemov 	SCAN_FAIL,
29b46e756fSKirill A. Shutemov 	SCAN_SUCCEED,
30b46e756fSKirill A. Shutemov 	SCAN_PMD_NULL,
3150722804SZach O'Keefe 	SCAN_PMD_MAPPED,
32b46e756fSKirill A. Shutemov 	SCAN_EXCEED_NONE_PTE,
3371a2c112SKirill A. Shutemov 	SCAN_EXCEED_SWAP_PTE,
3471a2c112SKirill A. Shutemov 	SCAN_EXCEED_SHARED_PTE,
35b46e756fSKirill A. Shutemov 	SCAN_PTE_NON_PRESENT,
36e1e267c7SPeter Xu 	SCAN_PTE_UFFD_WP,
37b46e756fSKirill A. Shutemov 	SCAN_PAGE_RO,
380db501f7SEbru Akagunduz 	SCAN_LACK_REFERENCED_PAGE,
39b46e756fSKirill A. Shutemov 	SCAN_PAGE_NULL,
40b46e756fSKirill A. Shutemov 	SCAN_SCAN_ABORT,
41b46e756fSKirill A. Shutemov 	SCAN_PAGE_COUNT,
42b46e756fSKirill A. Shutemov 	SCAN_PAGE_LRU,
43b46e756fSKirill A. Shutemov 	SCAN_PAGE_LOCK,
44b46e756fSKirill A. Shutemov 	SCAN_PAGE_ANON,
45b46e756fSKirill A. Shutemov 	SCAN_PAGE_COMPOUND,
46b46e756fSKirill A. Shutemov 	SCAN_ANY_PROCESS,
47b46e756fSKirill A. Shutemov 	SCAN_VMA_NULL,
48b46e756fSKirill A. Shutemov 	SCAN_VMA_CHECK,
49b46e756fSKirill A. Shutemov 	SCAN_ADDRESS_RANGE,
50b46e756fSKirill A. Shutemov 	SCAN_DEL_PAGE_LRU,
51b46e756fSKirill A. Shutemov 	SCAN_ALLOC_HUGE_PAGE_FAIL,
52b46e756fSKirill A. Shutemov 	SCAN_CGROUP_CHARGE_FAIL,
53f3f0e1d2SKirill A. Shutemov 	SCAN_TRUNCATED,
5499cb0dbdSSong Liu 	SCAN_PAGE_HAS_PRIVATE,
55b46e756fSKirill A. Shutemov };
56b46e756fSKirill A. Shutemov 
57b46e756fSKirill A. Shutemov #define CREATE_TRACE_POINTS
58b46e756fSKirill A. Shutemov #include <trace/events/huge_memory.h>
59b46e756fSKirill A. Shutemov 
604aab2be0SVijay Balakrishna static struct task_struct *khugepaged_thread __read_mostly;
614aab2be0SVijay Balakrishna static DEFINE_MUTEX(khugepaged_mutex);
624aab2be0SVijay Balakrishna 
63b46e756fSKirill A. Shutemov /* default scan 8*512 pte (or vmas) every 30 second */
64b46e756fSKirill A. Shutemov static unsigned int khugepaged_pages_to_scan __read_mostly;
65b46e756fSKirill A. Shutemov static unsigned int khugepaged_pages_collapsed;
66b46e756fSKirill A. Shutemov static unsigned int khugepaged_full_scans;
67b46e756fSKirill A. Shutemov static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
68b46e756fSKirill A. Shutemov /* during fragmentation poll the hugepage allocator once every minute */
69b46e756fSKirill A. Shutemov static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
70b46e756fSKirill A. Shutemov static unsigned long khugepaged_sleep_expire;
71b46e756fSKirill A. Shutemov static DEFINE_SPINLOCK(khugepaged_mm_lock);
72b46e756fSKirill A. Shutemov static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
73b46e756fSKirill A. Shutemov /*
74b46e756fSKirill A. Shutemov  * default collapse hugepages if there is at least one pte mapped like
75b46e756fSKirill A. Shutemov  * it would have happened if the vma was large enough during page
76b46e756fSKirill A. Shutemov  * fault.
77d8ea7cc8SZach O'Keefe  *
78d8ea7cc8SZach O'Keefe  * Note that these are only respected if collapse was initiated by khugepaged.
79b46e756fSKirill A. Shutemov  */
80b46e756fSKirill A. Shutemov static unsigned int khugepaged_max_ptes_none __read_mostly;
81b46e756fSKirill A. Shutemov static unsigned int khugepaged_max_ptes_swap __read_mostly;
8271a2c112SKirill A. Shutemov static unsigned int khugepaged_max_ptes_shared __read_mostly;
83b46e756fSKirill A. Shutemov 
84b46e756fSKirill A. Shutemov #define MM_SLOTS_HASH_BITS 10
85b46e756fSKirill A. Shutemov static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
86b46e756fSKirill A. Shutemov 
87b46e756fSKirill A. Shutemov static struct kmem_cache *mm_slot_cache __read_mostly;
88b46e756fSKirill A. Shutemov 
8927e1f827SSong Liu #define MAX_PTE_MAPPED_THP 8
9027e1f827SSong Liu 
9134d6b470SZach O'Keefe struct collapse_control {
92d8ea7cc8SZach O'Keefe 	bool is_khugepaged;
93d8ea7cc8SZach O'Keefe 
9434d6b470SZach O'Keefe 	/* Num pages scanned per node */
9534d6b470SZach O'Keefe 	u32 node_load[MAX_NUMNODES];
9634d6b470SZach O'Keefe 
97*7d2c4385SZach O'Keefe 	/* Last target selected in hpage_collapse_find_target_node() */
9834d6b470SZach O'Keefe 	int last_target_node;
9934d6b470SZach O'Keefe };
10034d6b470SZach O'Keefe 
101b46e756fSKirill A. Shutemov /**
102b46e756fSKirill A. Shutemov  * struct mm_slot - hash lookup from mm to mm_slot
103b46e756fSKirill A. Shutemov  * @hash: hash collision list
104b46e756fSKirill A. Shutemov  * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
105b46e756fSKirill A. Shutemov  * @mm: the mm that this information is valid for
106336e6b53SAlex Shi  * @nr_pte_mapped_thp: number of pte mapped THP
107336e6b53SAlex Shi  * @pte_mapped_thp: address array corresponding pte mapped THP
108b46e756fSKirill A. Shutemov  */
109b46e756fSKirill A. Shutemov struct mm_slot {
110b46e756fSKirill A. Shutemov 	struct hlist_node hash;
111b46e756fSKirill A. Shutemov 	struct list_head mm_node;
112b46e756fSKirill A. Shutemov 	struct mm_struct *mm;
11327e1f827SSong Liu 
11427e1f827SSong Liu 	/* pte-mapped THP in this mm */
11527e1f827SSong Liu 	int nr_pte_mapped_thp;
11627e1f827SSong Liu 	unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
117b46e756fSKirill A. Shutemov };
118b46e756fSKirill A. Shutemov 
119b46e756fSKirill A. Shutemov /**
120b46e756fSKirill A. Shutemov  * struct khugepaged_scan - cursor for scanning
121b46e756fSKirill A. Shutemov  * @mm_head: the head of the mm list to scan
122b46e756fSKirill A. Shutemov  * @mm_slot: the current mm_slot we are scanning
123b46e756fSKirill A. Shutemov  * @address: the next address inside that to be scanned
124b46e756fSKirill A. Shutemov  *
125b46e756fSKirill A. Shutemov  * There is only the one khugepaged_scan instance of this cursor structure.
126b46e756fSKirill A. Shutemov  */
127b46e756fSKirill A. Shutemov struct khugepaged_scan {
128b46e756fSKirill A. Shutemov 	struct list_head mm_head;
129b46e756fSKirill A. Shutemov 	struct mm_slot *mm_slot;
130b46e756fSKirill A. Shutemov 	unsigned long address;
131b46e756fSKirill A. Shutemov };
132b46e756fSKirill A. Shutemov 
133b46e756fSKirill A. Shutemov static struct khugepaged_scan khugepaged_scan = {
134b46e756fSKirill A. Shutemov 	.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
135b46e756fSKirill A. Shutemov };
136b46e756fSKirill A. Shutemov 
137e1465d12SJérémy Lefaure #ifdef CONFIG_SYSFS
138b46e756fSKirill A. Shutemov static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
139b46e756fSKirill A. Shutemov 					 struct kobj_attribute *attr,
140b46e756fSKirill A. Shutemov 					 char *buf)
141b46e756fSKirill A. Shutemov {
142ae7a927dSJoe Perches 	return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
143b46e756fSKirill A. Shutemov }
144b46e756fSKirill A. Shutemov 
145b46e756fSKirill A. Shutemov static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
146b46e756fSKirill A. Shutemov 					  struct kobj_attribute *attr,
147b46e756fSKirill A. Shutemov 					  const char *buf, size_t count)
148b46e756fSKirill A. Shutemov {
149dfefd226SAlexey Dobriyan 	unsigned int msecs;
150b46e756fSKirill A. Shutemov 	int err;
151b46e756fSKirill A. Shutemov 
152dfefd226SAlexey Dobriyan 	err = kstrtouint(buf, 10, &msecs);
153dfefd226SAlexey Dobriyan 	if (err)
154b46e756fSKirill A. Shutemov 		return -EINVAL;
155b46e756fSKirill A. Shutemov 
156b46e756fSKirill A. Shutemov 	khugepaged_scan_sleep_millisecs = msecs;
157b46e756fSKirill A. Shutemov 	khugepaged_sleep_expire = 0;
158b46e756fSKirill A. Shutemov 	wake_up_interruptible(&khugepaged_wait);
159b46e756fSKirill A. Shutemov 
160b46e756fSKirill A. Shutemov 	return count;
161b46e756fSKirill A. Shutemov }
162b46e756fSKirill A. Shutemov static struct kobj_attribute scan_sleep_millisecs_attr =
1636dcdc94dSMiaohe Lin 	__ATTR_RW(scan_sleep_millisecs);
164b46e756fSKirill A. Shutemov 
165b46e756fSKirill A. Shutemov static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
166b46e756fSKirill A. Shutemov 					  struct kobj_attribute *attr,
167b46e756fSKirill A. Shutemov 					  char *buf)
168b46e756fSKirill A. Shutemov {
169ae7a927dSJoe Perches 	return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
170b46e756fSKirill A. Shutemov }
171b46e756fSKirill A. Shutemov 
172b46e756fSKirill A. Shutemov static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
173b46e756fSKirill A. Shutemov 					   struct kobj_attribute *attr,
174b46e756fSKirill A. Shutemov 					   const char *buf, size_t count)
175b46e756fSKirill A. Shutemov {
176dfefd226SAlexey Dobriyan 	unsigned int msecs;
177b46e756fSKirill A. Shutemov 	int err;
178b46e756fSKirill A. Shutemov 
179dfefd226SAlexey Dobriyan 	err = kstrtouint(buf, 10, &msecs);
180dfefd226SAlexey Dobriyan 	if (err)
181b46e756fSKirill A. Shutemov 		return -EINVAL;
182b46e756fSKirill A. Shutemov 
183b46e756fSKirill A. Shutemov 	khugepaged_alloc_sleep_millisecs = msecs;
184b46e756fSKirill A. Shutemov 	khugepaged_sleep_expire = 0;
185b46e756fSKirill A. Shutemov 	wake_up_interruptible(&khugepaged_wait);
186b46e756fSKirill A. Shutemov 
187b46e756fSKirill A. Shutemov 	return count;
188b46e756fSKirill A. Shutemov }
189b46e756fSKirill A. Shutemov static struct kobj_attribute alloc_sleep_millisecs_attr =
1906dcdc94dSMiaohe Lin 	__ATTR_RW(alloc_sleep_millisecs);
191b46e756fSKirill A. Shutemov 
192b46e756fSKirill A. Shutemov static ssize_t pages_to_scan_show(struct kobject *kobj,
193b46e756fSKirill A. Shutemov 				  struct kobj_attribute *attr,
194b46e756fSKirill A. Shutemov 				  char *buf)
195b46e756fSKirill A. Shutemov {
196ae7a927dSJoe Perches 	return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
197b46e756fSKirill A. Shutemov }
198b46e756fSKirill A. Shutemov static ssize_t pages_to_scan_store(struct kobject *kobj,
199b46e756fSKirill A. Shutemov 				   struct kobj_attribute *attr,
200b46e756fSKirill A. Shutemov 				   const char *buf, size_t count)
201b46e756fSKirill A. Shutemov {
202dfefd226SAlexey Dobriyan 	unsigned int pages;
203b46e756fSKirill A. Shutemov 	int err;
204b46e756fSKirill A. Shutemov 
205dfefd226SAlexey Dobriyan 	err = kstrtouint(buf, 10, &pages);
206dfefd226SAlexey Dobriyan 	if (err || !pages)
207b46e756fSKirill A. Shutemov 		return -EINVAL;
208b46e756fSKirill A. Shutemov 
209b46e756fSKirill A. Shutemov 	khugepaged_pages_to_scan = pages;
210b46e756fSKirill A. Shutemov 
211b46e756fSKirill A. Shutemov 	return count;
212b46e756fSKirill A. Shutemov }
213b46e756fSKirill A. Shutemov static struct kobj_attribute pages_to_scan_attr =
2146dcdc94dSMiaohe Lin 	__ATTR_RW(pages_to_scan);
215b46e756fSKirill A. Shutemov 
216b46e756fSKirill A. Shutemov static ssize_t pages_collapsed_show(struct kobject *kobj,
217b46e756fSKirill A. Shutemov 				    struct kobj_attribute *attr,
218b46e756fSKirill A. Shutemov 				    char *buf)
219b46e756fSKirill A. Shutemov {
220ae7a927dSJoe Perches 	return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
221b46e756fSKirill A. Shutemov }
222b46e756fSKirill A. Shutemov static struct kobj_attribute pages_collapsed_attr =
223b46e756fSKirill A. Shutemov 	__ATTR_RO(pages_collapsed);
224b46e756fSKirill A. Shutemov 
225b46e756fSKirill A. Shutemov static ssize_t full_scans_show(struct kobject *kobj,
226b46e756fSKirill A. Shutemov 			       struct kobj_attribute *attr,
227b46e756fSKirill A. Shutemov 			       char *buf)
228b46e756fSKirill A. Shutemov {
229ae7a927dSJoe Perches 	return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
230b46e756fSKirill A. Shutemov }
231b46e756fSKirill A. Shutemov static struct kobj_attribute full_scans_attr =
232b46e756fSKirill A. Shutemov 	__ATTR_RO(full_scans);
233b46e756fSKirill A. Shutemov 
2346dcdc94dSMiaohe Lin static ssize_t defrag_show(struct kobject *kobj,
235b46e756fSKirill A. Shutemov 			   struct kobj_attribute *attr, char *buf)
236b46e756fSKirill A. Shutemov {
237b46e756fSKirill A. Shutemov 	return single_hugepage_flag_show(kobj, attr, buf,
238b46e756fSKirill A. Shutemov 					 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
239b46e756fSKirill A. Shutemov }
2406dcdc94dSMiaohe Lin static ssize_t defrag_store(struct kobject *kobj,
241b46e756fSKirill A. Shutemov 			    struct kobj_attribute *attr,
242b46e756fSKirill A. Shutemov 			    const char *buf, size_t count)
243b46e756fSKirill A. Shutemov {
244b46e756fSKirill A. Shutemov 	return single_hugepage_flag_store(kobj, attr, buf, count,
245b46e756fSKirill A. Shutemov 				 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
246b46e756fSKirill A. Shutemov }
247b46e756fSKirill A. Shutemov static struct kobj_attribute khugepaged_defrag_attr =
2486dcdc94dSMiaohe Lin 	__ATTR_RW(defrag);
249b46e756fSKirill A. Shutemov 
250b46e756fSKirill A. Shutemov /*
251b46e756fSKirill A. Shutemov  * max_ptes_none controls if khugepaged should collapse hugepages over
252b46e756fSKirill A. Shutemov  * any unmapped ptes in turn potentially increasing the memory
253b46e756fSKirill A. Shutemov  * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
254b46e756fSKirill A. Shutemov  * reduce the available free memory in the system as it
255b46e756fSKirill A. Shutemov  * runs. Increasing max_ptes_none will instead potentially reduce the
256b46e756fSKirill A. Shutemov  * free memory in the system during the khugepaged scan.
257b46e756fSKirill A. Shutemov  */
2586dcdc94dSMiaohe Lin static ssize_t max_ptes_none_show(struct kobject *kobj,
259b46e756fSKirill A. Shutemov 				  struct kobj_attribute *attr,
260b46e756fSKirill A. Shutemov 				  char *buf)
261b46e756fSKirill A. Shutemov {
262ae7a927dSJoe Perches 	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
263b46e756fSKirill A. Shutemov }
2646dcdc94dSMiaohe Lin static ssize_t max_ptes_none_store(struct kobject *kobj,
265b46e756fSKirill A. Shutemov 				   struct kobj_attribute *attr,
266b46e756fSKirill A. Shutemov 				   const char *buf, size_t count)
267b46e756fSKirill A. Shutemov {
268b46e756fSKirill A. Shutemov 	int err;
269b46e756fSKirill A. Shutemov 	unsigned long max_ptes_none;
270b46e756fSKirill A. Shutemov 
271b46e756fSKirill A. Shutemov 	err = kstrtoul(buf, 10, &max_ptes_none);
272b46e756fSKirill A. Shutemov 	if (err || max_ptes_none > HPAGE_PMD_NR - 1)
273b46e756fSKirill A. Shutemov 		return -EINVAL;
274b46e756fSKirill A. Shutemov 
275b46e756fSKirill A. Shutemov 	khugepaged_max_ptes_none = max_ptes_none;
276b46e756fSKirill A. Shutemov 
277b46e756fSKirill A. Shutemov 	return count;
278b46e756fSKirill A. Shutemov }
279b46e756fSKirill A. Shutemov static struct kobj_attribute khugepaged_max_ptes_none_attr =
2806dcdc94dSMiaohe Lin 	__ATTR_RW(max_ptes_none);
281b46e756fSKirill A. Shutemov 
2826dcdc94dSMiaohe Lin static ssize_t max_ptes_swap_show(struct kobject *kobj,
283b46e756fSKirill A. Shutemov 				  struct kobj_attribute *attr,
284b46e756fSKirill A. Shutemov 				  char *buf)
285b46e756fSKirill A. Shutemov {
286ae7a927dSJoe Perches 	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
287b46e756fSKirill A. Shutemov }
288b46e756fSKirill A. Shutemov 
2896dcdc94dSMiaohe Lin static ssize_t max_ptes_swap_store(struct kobject *kobj,
290b46e756fSKirill A. Shutemov 				   struct kobj_attribute *attr,
291b46e756fSKirill A. Shutemov 				   const char *buf, size_t count)
292b46e756fSKirill A. Shutemov {
293b46e756fSKirill A. Shutemov 	int err;
294b46e756fSKirill A. Shutemov 	unsigned long max_ptes_swap;
295b46e756fSKirill A. Shutemov 
296b46e756fSKirill A. Shutemov 	err  = kstrtoul(buf, 10, &max_ptes_swap);
297b46e756fSKirill A. Shutemov 	if (err || max_ptes_swap > HPAGE_PMD_NR - 1)
298b46e756fSKirill A. Shutemov 		return -EINVAL;
299b46e756fSKirill A. Shutemov 
300b46e756fSKirill A. Shutemov 	khugepaged_max_ptes_swap = max_ptes_swap;
301b46e756fSKirill A. Shutemov 
302b46e756fSKirill A. Shutemov 	return count;
303b46e756fSKirill A. Shutemov }
304b46e756fSKirill A. Shutemov 
305b46e756fSKirill A. Shutemov static struct kobj_attribute khugepaged_max_ptes_swap_attr =
3066dcdc94dSMiaohe Lin 	__ATTR_RW(max_ptes_swap);
307b46e756fSKirill A. Shutemov 
3086dcdc94dSMiaohe Lin static ssize_t max_ptes_shared_show(struct kobject *kobj,
30971a2c112SKirill A. Shutemov 				    struct kobj_attribute *attr,
31071a2c112SKirill A. Shutemov 				    char *buf)
31171a2c112SKirill A. Shutemov {
312ae7a927dSJoe Perches 	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
31371a2c112SKirill A. Shutemov }
31471a2c112SKirill A. Shutemov 
3156dcdc94dSMiaohe Lin static ssize_t max_ptes_shared_store(struct kobject *kobj,
31671a2c112SKirill A. Shutemov 				     struct kobj_attribute *attr,
31771a2c112SKirill A. Shutemov 				     const char *buf, size_t count)
31871a2c112SKirill A. Shutemov {
31971a2c112SKirill A. Shutemov 	int err;
32071a2c112SKirill A. Shutemov 	unsigned long max_ptes_shared;
32171a2c112SKirill A. Shutemov 
32271a2c112SKirill A. Shutemov 	err  = kstrtoul(buf, 10, &max_ptes_shared);
32371a2c112SKirill A. Shutemov 	if (err || max_ptes_shared > HPAGE_PMD_NR - 1)
32471a2c112SKirill A. Shutemov 		return -EINVAL;
32571a2c112SKirill A. Shutemov 
32671a2c112SKirill A. Shutemov 	khugepaged_max_ptes_shared = max_ptes_shared;
32771a2c112SKirill A. Shutemov 
32871a2c112SKirill A. Shutemov 	return count;
32971a2c112SKirill A. Shutemov }
33071a2c112SKirill A. Shutemov 
33171a2c112SKirill A. Shutemov static struct kobj_attribute khugepaged_max_ptes_shared_attr =
3326dcdc94dSMiaohe Lin 	__ATTR_RW(max_ptes_shared);
33371a2c112SKirill A. Shutemov 
334b46e756fSKirill A. Shutemov static struct attribute *khugepaged_attr[] = {
335b46e756fSKirill A. Shutemov 	&khugepaged_defrag_attr.attr,
336b46e756fSKirill A. Shutemov 	&khugepaged_max_ptes_none_attr.attr,
33771a2c112SKirill A. Shutemov 	&khugepaged_max_ptes_swap_attr.attr,
33871a2c112SKirill A. Shutemov 	&khugepaged_max_ptes_shared_attr.attr,
339b46e756fSKirill A. Shutemov 	&pages_to_scan_attr.attr,
340b46e756fSKirill A. Shutemov 	&pages_collapsed_attr.attr,
341b46e756fSKirill A. Shutemov 	&full_scans_attr.attr,
342b46e756fSKirill A. Shutemov 	&scan_sleep_millisecs_attr.attr,
343b46e756fSKirill A. Shutemov 	&alloc_sleep_millisecs_attr.attr,
344b46e756fSKirill A. Shutemov 	NULL,
345b46e756fSKirill A. Shutemov };
346b46e756fSKirill A. Shutemov 
347b46e756fSKirill A. Shutemov struct attribute_group khugepaged_attr_group = {
348b46e756fSKirill A. Shutemov 	.attrs = khugepaged_attr,
349b46e756fSKirill A. Shutemov 	.name = "khugepaged",
350b46e756fSKirill A. Shutemov };
351e1465d12SJérémy Lefaure #endif /* CONFIG_SYSFS */
352b46e756fSKirill A. Shutemov 
353b46e756fSKirill A. Shutemov int hugepage_madvise(struct vm_area_struct *vma,
354b46e756fSKirill A. Shutemov 		     unsigned long *vm_flags, int advice)
355b46e756fSKirill A. Shutemov {
356b46e756fSKirill A. Shutemov 	switch (advice) {
357b46e756fSKirill A. Shutemov 	case MADV_HUGEPAGE:
358b46e756fSKirill A. Shutemov #ifdef CONFIG_S390
359b46e756fSKirill A. Shutemov 		/*
360b46e756fSKirill A. Shutemov 		 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
361b46e756fSKirill A. Shutemov 		 * can't handle this properly after s390_enable_sie, so we simply
362b46e756fSKirill A. Shutemov 		 * ignore the madvise to prevent qemu from causing a SIGSEGV.
363b46e756fSKirill A. Shutemov 		 */
364b46e756fSKirill A. Shutemov 		if (mm_has_pgste(vma->vm_mm))
365b46e756fSKirill A. Shutemov 			return 0;
366b46e756fSKirill A. Shutemov #endif
367b46e756fSKirill A. Shutemov 		*vm_flags &= ~VM_NOHUGEPAGE;
368b46e756fSKirill A. Shutemov 		*vm_flags |= VM_HUGEPAGE;
369b46e756fSKirill A. Shutemov 		/*
370b46e756fSKirill A. Shutemov 		 * If the vma become good for khugepaged to scan,
371b46e756fSKirill A. Shutemov 		 * register it here without waiting a page fault that
372b46e756fSKirill A. Shutemov 		 * may not happen any time soon.
373b46e756fSKirill A. Shutemov 		 */
374c791576cSYang Shi 		khugepaged_enter_vma(vma, *vm_flags);
375b46e756fSKirill A. Shutemov 		break;
376b46e756fSKirill A. Shutemov 	case MADV_NOHUGEPAGE:
377b46e756fSKirill A. Shutemov 		*vm_flags &= ~VM_HUGEPAGE;
378b46e756fSKirill A. Shutemov 		*vm_flags |= VM_NOHUGEPAGE;
379b46e756fSKirill A. Shutemov 		/*
380b46e756fSKirill A. Shutemov 		 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
381b46e756fSKirill A. Shutemov 		 * this vma even if we leave the mm registered in khugepaged if
382b46e756fSKirill A. Shutemov 		 * it got registered before VM_NOHUGEPAGE was set.
383b46e756fSKirill A. Shutemov 		 */
384b46e756fSKirill A. Shutemov 		break;
385b46e756fSKirill A. Shutemov 	}
386b46e756fSKirill A. Shutemov 
387b46e756fSKirill A. Shutemov 	return 0;
388b46e756fSKirill A. Shutemov }
389b46e756fSKirill A. Shutemov 
390b46e756fSKirill A. Shutemov int __init khugepaged_init(void)
391b46e756fSKirill A. Shutemov {
392b46e756fSKirill A. Shutemov 	mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
393b46e756fSKirill A. Shutemov 					  sizeof(struct mm_slot),
394b46e756fSKirill A. Shutemov 					  __alignof__(struct mm_slot), 0, NULL);
395b46e756fSKirill A. Shutemov 	if (!mm_slot_cache)
396b46e756fSKirill A. Shutemov 		return -ENOMEM;
397b46e756fSKirill A. Shutemov 
398b46e756fSKirill A. Shutemov 	khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
399b46e756fSKirill A. Shutemov 	khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
400b46e756fSKirill A. Shutemov 	khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
40171a2c112SKirill A. Shutemov 	khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
402b46e756fSKirill A. Shutemov 
403b46e756fSKirill A. Shutemov 	return 0;
404b46e756fSKirill A. Shutemov }
405b46e756fSKirill A. Shutemov 
406b46e756fSKirill A. Shutemov void __init khugepaged_destroy(void)
407b46e756fSKirill A. Shutemov {
408b46e756fSKirill A. Shutemov 	kmem_cache_destroy(mm_slot_cache);
409b46e756fSKirill A. Shutemov }
410b46e756fSKirill A. Shutemov 
411b46e756fSKirill A. Shutemov static inline struct mm_slot *alloc_mm_slot(void)
412b46e756fSKirill A. Shutemov {
413b46e756fSKirill A. Shutemov 	if (!mm_slot_cache)	/* initialization failed */
414b46e756fSKirill A. Shutemov 		return NULL;
415b46e756fSKirill A. Shutemov 	return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
416b46e756fSKirill A. Shutemov }
417b46e756fSKirill A. Shutemov 
418b46e756fSKirill A. Shutemov static inline void free_mm_slot(struct mm_slot *mm_slot)
419b46e756fSKirill A. Shutemov {
420b46e756fSKirill A. Shutemov 	kmem_cache_free(mm_slot_cache, mm_slot);
421b46e756fSKirill A. Shutemov }
422b46e756fSKirill A. Shutemov 
423b46e756fSKirill A. Shutemov static struct mm_slot *get_mm_slot(struct mm_struct *mm)
424b46e756fSKirill A. Shutemov {
425b46e756fSKirill A. Shutemov 	struct mm_slot *mm_slot;
426b46e756fSKirill A. Shutemov 
427b46e756fSKirill A. Shutemov 	hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
428b46e756fSKirill A. Shutemov 		if (mm == mm_slot->mm)
429b46e756fSKirill A. Shutemov 			return mm_slot;
430b46e756fSKirill A. Shutemov 
431b46e756fSKirill A. Shutemov 	return NULL;
432b46e756fSKirill A. Shutemov }
433b46e756fSKirill A. Shutemov 
434b46e756fSKirill A. Shutemov static void insert_to_mm_slots_hash(struct mm_struct *mm,
435b46e756fSKirill A. Shutemov 				    struct mm_slot *mm_slot)
436b46e756fSKirill A. Shutemov {
437b46e756fSKirill A. Shutemov 	mm_slot->mm = mm;
438b46e756fSKirill A. Shutemov 	hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
439b46e756fSKirill A. Shutemov }
440b46e756fSKirill A. Shutemov 
441*7d2c4385SZach O'Keefe static inline int hpage_collapse_test_exit(struct mm_struct *mm)
442b46e756fSKirill A. Shutemov {
4434d45e75aSJann Horn 	return atomic_read(&mm->mm_users) == 0;
444b46e756fSKirill A. Shutemov }
445b46e756fSKirill A. Shutemov 
446d2081b2bSYang Shi void __khugepaged_enter(struct mm_struct *mm)
447b46e756fSKirill A. Shutemov {
448b46e756fSKirill A. Shutemov 	struct mm_slot *mm_slot;
449b46e756fSKirill A. Shutemov 	int wakeup;
450b46e756fSKirill A. Shutemov 
451b46e756fSKirill A. Shutemov 	mm_slot = alloc_mm_slot();
452b46e756fSKirill A. Shutemov 	if (!mm_slot)
453d2081b2bSYang Shi 		return;
454b46e756fSKirill A. Shutemov 
455b46e756fSKirill A. Shutemov 	/* __khugepaged_exit() must not run from under us */
456*7d2c4385SZach O'Keefe 	VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm);
457b46e756fSKirill A. Shutemov 	if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
458b46e756fSKirill A. Shutemov 		free_mm_slot(mm_slot);
459d2081b2bSYang Shi 		return;
460b46e756fSKirill A. Shutemov 	}
461b46e756fSKirill A. Shutemov 
462b46e756fSKirill A. Shutemov 	spin_lock(&khugepaged_mm_lock);
463b46e756fSKirill A. Shutemov 	insert_to_mm_slots_hash(mm, mm_slot);
464b46e756fSKirill A. Shutemov 	/*
465b46e756fSKirill A. Shutemov 	 * Insert just behind the scanning cursor, to let the area settle
466b46e756fSKirill A. Shutemov 	 * down a little.
467b46e756fSKirill A. Shutemov 	 */
468b46e756fSKirill A. Shutemov 	wakeup = list_empty(&khugepaged_scan.mm_head);
469b46e756fSKirill A. Shutemov 	list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
470b46e756fSKirill A. Shutemov 	spin_unlock(&khugepaged_mm_lock);
471b46e756fSKirill A. Shutemov 
472f1f10076SVegard Nossum 	mmgrab(mm);
473b46e756fSKirill A. Shutemov 	if (wakeup)
474b46e756fSKirill A. Shutemov 		wake_up_interruptible(&khugepaged_wait);
475b46e756fSKirill A. Shutemov }
476b46e756fSKirill A. Shutemov 
477c791576cSYang Shi void khugepaged_enter_vma(struct vm_area_struct *vma,
478b46e756fSKirill A. Shutemov 			  unsigned long vm_flags)
479b46e756fSKirill A. Shutemov {
4802647d11bSYang Shi 	if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
4811064026bSYang Shi 	    hugepage_flags_enabled()) {
482a7f4e6e4SZach O'Keefe 		if (hugepage_vma_check(vma, vm_flags, false, false, true))
4832647d11bSYang Shi 			__khugepaged_enter(vma->vm_mm);
4842647d11bSYang Shi 	}
485b46e756fSKirill A. Shutemov }
486b46e756fSKirill A. Shutemov 
487b46e756fSKirill A. Shutemov void __khugepaged_exit(struct mm_struct *mm)
488b46e756fSKirill A. Shutemov {
489b46e756fSKirill A. Shutemov 	struct mm_slot *mm_slot;
490b46e756fSKirill A. Shutemov 	int free = 0;
491b46e756fSKirill A. Shutemov 
492b46e756fSKirill A. Shutemov 	spin_lock(&khugepaged_mm_lock);
493b46e756fSKirill A. Shutemov 	mm_slot = get_mm_slot(mm);
494b46e756fSKirill A. Shutemov 	if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
495b46e756fSKirill A. Shutemov 		hash_del(&mm_slot->hash);
496b46e756fSKirill A. Shutemov 		list_del(&mm_slot->mm_node);
497b46e756fSKirill A. Shutemov 		free = 1;
498b46e756fSKirill A. Shutemov 	}
499b46e756fSKirill A. Shutemov 	spin_unlock(&khugepaged_mm_lock);
500b46e756fSKirill A. Shutemov 
501b46e756fSKirill A. Shutemov 	if (free) {
502b46e756fSKirill A. Shutemov 		clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
503b46e756fSKirill A. Shutemov 		free_mm_slot(mm_slot);
504b46e756fSKirill A. Shutemov 		mmdrop(mm);
505b46e756fSKirill A. Shutemov 	} else if (mm_slot) {
506b46e756fSKirill A. Shutemov 		/*
507b46e756fSKirill A. Shutemov 		 * This is required to serialize against
508*7d2c4385SZach O'Keefe 		 * hpage_collapse_test_exit() (which is guaranteed to run
509*7d2c4385SZach O'Keefe 		 * under mmap sem read mode). Stop here (after we return all
510*7d2c4385SZach O'Keefe 		 * pagetables will be destroyed) until khugepaged has finished
511*7d2c4385SZach O'Keefe 		 * working on the pagetables under the mmap_lock.
512b46e756fSKirill A. Shutemov 		 */
513d8ed45c5SMichel Lespinasse 		mmap_write_lock(mm);
514d8ed45c5SMichel Lespinasse 		mmap_write_unlock(mm);
515b46e756fSKirill A. Shutemov 	}
516b46e756fSKirill A. Shutemov }
517b46e756fSKirill A. Shutemov 
518b46e756fSKirill A. Shutemov static void release_pte_page(struct page *page)
519b46e756fSKirill A. Shutemov {
5205503fbf2SKirill A. Shutemov 	mod_node_page_state(page_pgdat(page),
5215503fbf2SKirill A. Shutemov 			NR_ISOLATED_ANON + page_is_file_lru(page),
5225503fbf2SKirill A. Shutemov 			-compound_nr(page));
523b46e756fSKirill A. Shutemov 	unlock_page(page);
524b46e756fSKirill A. Shutemov 	putback_lru_page(page);
525b46e756fSKirill A. Shutemov }
526b46e756fSKirill A. Shutemov 
5275503fbf2SKirill A. Shutemov static void release_pte_pages(pte_t *pte, pte_t *_pte,
5285503fbf2SKirill A. Shutemov 		struct list_head *compound_pagelist)
529b46e756fSKirill A. Shutemov {
5305503fbf2SKirill A. Shutemov 	struct page *page, *tmp;
5315503fbf2SKirill A. Shutemov 
532b46e756fSKirill A. Shutemov 	while (--_pte >= pte) {
533b46e756fSKirill A. Shutemov 		pte_t pteval = *_pte;
5345503fbf2SKirill A. Shutemov 
5355503fbf2SKirill A. Shutemov 		page = pte_page(pteval);
5365503fbf2SKirill A. Shutemov 		if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) &&
5375503fbf2SKirill A. Shutemov 				!PageCompound(page))
5385503fbf2SKirill A. Shutemov 			release_pte_page(page);
5395503fbf2SKirill A. Shutemov 	}
5405503fbf2SKirill A. Shutemov 
5415503fbf2SKirill A. Shutemov 	list_for_each_entry_safe(page, tmp, compound_pagelist, lru) {
5425503fbf2SKirill A. Shutemov 		list_del(&page->lru);
5435503fbf2SKirill A. Shutemov 		release_pte_page(page);
544b46e756fSKirill A. Shutemov 	}
545b46e756fSKirill A. Shutemov }
546b46e756fSKirill A. Shutemov 
5479445689fSKirill A. Shutemov static bool is_refcount_suitable(struct page *page)
5489445689fSKirill A. Shutemov {
5499445689fSKirill A. Shutemov 	int expected_refcount;
5509445689fSKirill A. Shutemov 
5519445689fSKirill A. Shutemov 	expected_refcount = total_mapcount(page);
5529445689fSKirill A. Shutemov 	if (PageSwapCache(page))
5539445689fSKirill A. Shutemov 		expected_refcount += compound_nr(page);
5549445689fSKirill A. Shutemov 
5559445689fSKirill A. Shutemov 	return page_count(page) == expected_refcount;
5569445689fSKirill A. Shutemov }
5579445689fSKirill A. Shutemov 
558b46e756fSKirill A. Shutemov static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
559b46e756fSKirill A. Shutemov 					unsigned long address,
5605503fbf2SKirill A. Shutemov 					pte_t *pte,
561d8ea7cc8SZach O'Keefe 					struct collapse_control *cc,
5625503fbf2SKirill A. Shutemov 					struct list_head *compound_pagelist)
563b46e756fSKirill A. Shutemov {
564b46e756fSKirill A. Shutemov 	struct page *page = NULL;
565b46e756fSKirill A. Shutemov 	pte_t *_pte;
56650ad2f24SZach O'Keefe 	int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
5670db501f7SEbru Akagunduz 	bool writable = false;
568b46e756fSKirill A. Shutemov 
569b46e756fSKirill A. Shutemov 	for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
570b46e756fSKirill A. Shutemov 	     _pte++, address += PAGE_SIZE) {
571b46e756fSKirill A. Shutemov 		pte_t pteval = *_pte;
572b46e756fSKirill A. Shutemov 		if (pte_none(pteval) || (pte_present(pteval) &&
573b46e756fSKirill A. Shutemov 				is_zero_pfn(pte_pfn(pteval)))) {
574d8ea7cc8SZach O'Keefe 			++none_or_zero;
575b46e756fSKirill A. Shutemov 			if (!userfaultfd_armed(vma) &&
576d8ea7cc8SZach O'Keefe 			    (!cc->is_khugepaged ||
577d8ea7cc8SZach O'Keefe 			     none_or_zero <= khugepaged_max_ptes_none)) {
578b46e756fSKirill A. Shutemov 				continue;
579b46e756fSKirill A. Shutemov 			} else {
580b46e756fSKirill A. Shutemov 				result = SCAN_EXCEED_NONE_PTE;
581e9ea874aSYang Yang 				count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
582b46e756fSKirill A. Shutemov 				goto out;
583b46e756fSKirill A. Shutemov 			}
584b46e756fSKirill A. Shutemov 		}
585b46e756fSKirill A. Shutemov 		if (!pte_present(pteval)) {
586b46e756fSKirill A. Shutemov 			result = SCAN_PTE_NON_PRESENT;
587b46e756fSKirill A. Shutemov 			goto out;
588b46e756fSKirill A. Shutemov 		}
589b46e756fSKirill A. Shutemov 		page = vm_normal_page(vma, address, pteval);
5903218f871SAlex Sierra 		if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
591b46e756fSKirill A. Shutemov 			result = SCAN_PAGE_NULL;
592b46e756fSKirill A. Shutemov 			goto out;
593b46e756fSKirill A. Shutemov 		}
594b46e756fSKirill A. Shutemov 
595b46e756fSKirill A. Shutemov 		VM_BUG_ON_PAGE(!PageAnon(page), page);
596b46e756fSKirill A. Shutemov 
597d8ea7cc8SZach O'Keefe 		if (page_mapcount(page) > 1) {
598d8ea7cc8SZach O'Keefe 			++shared;
599d8ea7cc8SZach O'Keefe 			if (cc->is_khugepaged &&
600d8ea7cc8SZach O'Keefe 			    shared > khugepaged_max_ptes_shared) {
60171a2c112SKirill A. Shutemov 				result = SCAN_EXCEED_SHARED_PTE;
602e9ea874aSYang Yang 				count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
60371a2c112SKirill A. Shutemov 				goto out;
60471a2c112SKirill A. Shutemov 			}
605d8ea7cc8SZach O'Keefe 		}
60671a2c112SKirill A. Shutemov 
6075503fbf2SKirill A. Shutemov 		if (PageCompound(page)) {
6085503fbf2SKirill A. Shutemov 			struct page *p;
6095503fbf2SKirill A. Shutemov 			page = compound_head(page);
6105503fbf2SKirill A. Shutemov 
6115503fbf2SKirill A. Shutemov 			/*
6125503fbf2SKirill A. Shutemov 			 * Check if we have dealt with the compound page
6135503fbf2SKirill A. Shutemov 			 * already
6145503fbf2SKirill A. Shutemov 			 */
6155503fbf2SKirill A. Shutemov 			list_for_each_entry(p, compound_pagelist, lru) {
6165503fbf2SKirill A. Shutemov 				if (page == p)
6175503fbf2SKirill A. Shutemov 					goto next;
6185503fbf2SKirill A. Shutemov 			}
6195503fbf2SKirill A. Shutemov 		}
6205503fbf2SKirill A. Shutemov 
621b46e756fSKirill A. Shutemov 		/*
622b46e756fSKirill A. Shutemov 		 * We can do it before isolate_lru_page because the
623b46e756fSKirill A. Shutemov 		 * page can't be freed from under us. NOTE: PG_lock
624b46e756fSKirill A. Shutemov 		 * is needed to serialize against split_huge_page
625b46e756fSKirill A. Shutemov 		 * when invoked from the VM.
626b46e756fSKirill A. Shutemov 		 */
627b46e756fSKirill A. Shutemov 		if (!trylock_page(page)) {
628b46e756fSKirill A. Shutemov 			result = SCAN_PAGE_LOCK;
629b46e756fSKirill A. Shutemov 			goto out;
630b46e756fSKirill A. Shutemov 		}
631b46e756fSKirill A. Shutemov 
632b46e756fSKirill A. Shutemov 		/*
6339445689fSKirill A. Shutemov 		 * Check if the page has any GUP (or other external) pins.
6349445689fSKirill A. Shutemov 		 *
6359445689fSKirill A. Shutemov 		 * The page table that maps the page has been already unlinked
6369445689fSKirill A. Shutemov 		 * from the page table tree and this process cannot get
637f0953a1bSIngo Molnar 		 * an additional pin on the page.
6389445689fSKirill A. Shutemov 		 *
6399445689fSKirill A. Shutemov 		 * New pins can come later if the page is shared across fork,
6409445689fSKirill A. Shutemov 		 * but not from this process. The other process cannot write to
6419445689fSKirill A. Shutemov 		 * the page, only trigger CoW.
642b46e756fSKirill A. Shutemov 		 */
6439445689fSKirill A. Shutemov 		if (!is_refcount_suitable(page)) {
644b46e756fSKirill A. Shutemov 			unlock_page(page);
645b46e756fSKirill A. Shutemov 			result = SCAN_PAGE_COUNT;
646b46e756fSKirill A. Shutemov 			goto out;
647b46e756fSKirill A. Shutemov 		}
648b46e756fSKirill A. Shutemov 
649b46e756fSKirill A. Shutemov 		/*
650b46e756fSKirill A. Shutemov 		 * Isolate the page to avoid collapsing an hugepage
651b46e756fSKirill A. Shutemov 		 * currently in use by the VM.
652b46e756fSKirill A. Shutemov 		 */
653b46e756fSKirill A. Shutemov 		if (isolate_lru_page(page)) {
654b46e756fSKirill A. Shutemov 			unlock_page(page);
655b46e756fSKirill A. Shutemov 			result = SCAN_DEL_PAGE_LRU;
656b46e756fSKirill A. Shutemov 			goto out;
657b46e756fSKirill A. Shutemov 		}
6585503fbf2SKirill A. Shutemov 		mod_node_page_state(page_pgdat(page),
6595503fbf2SKirill A. Shutemov 				NR_ISOLATED_ANON + page_is_file_lru(page),
6605503fbf2SKirill A. Shutemov 				compound_nr(page));
661b46e756fSKirill A. Shutemov 		VM_BUG_ON_PAGE(!PageLocked(page), page);
662b46e756fSKirill A. Shutemov 		VM_BUG_ON_PAGE(PageLRU(page), page);
663b46e756fSKirill A. Shutemov 
6645503fbf2SKirill A. Shutemov 		if (PageCompound(page))
6655503fbf2SKirill A. Shutemov 			list_add_tail(&page->lru, compound_pagelist);
6665503fbf2SKirill A. Shutemov next:
667d8ea7cc8SZach O'Keefe 		/*
668d8ea7cc8SZach O'Keefe 		 * If collapse was initiated by khugepaged, check that there is
669d8ea7cc8SZach O'Keefe 		 * enough young pte to justify collapsing the page
670d8ea7cc8SZach O'Keefe 		 */
671d8ea7cc8SZach O'Keefe 		if (cc->is_khugepaged &&
672d8ea7cc8SZach O'Keefe 		    (pte_young(pteval) || page_is_young(page) ||
673d8ea7cc8SZach O'Keefe 		     PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
674d8ea7cc8SZach O'Keefe 								     address)))
6750db501f7SEbru Akagunduz 			referenced++;
6765503fbf2SKirill A. Shutemov 
6775503fbf2SKirill A. Shutemov 		if (pte_write(pteval))
6785503fbf2SKirill A. Shutemov 			writable = true;
679b46e756fSKirill A. Shutemov 	}
68074e579bfSMiaohe Lin 
68174e579bfSMiaohe Lin 	if (unlikely(!writable)) {
68274e579bfSMiaohe Lin 		result = SCAN_PAGE_RO;
683d8ea7cc8SZach O'Keefe 	} else if (unlikely(cc->is_khugepaged && !referenced)) {
68474e579bfSMiaohe Lin 		result = SCAN_LACK_REFERENCED_PAGE;
68574e579bfSMiaohe Lin 	} else {
686b46e756fSKirill A. Shutemov 		result = SCAN_SUCCEED;
687b46e756fSKirill A. Shutemov 		trace_mm_collapse_huge_page_isolate(page, none_or_zero,
688b46e756fSKirill A. Shutemov 						    referenced, writable, result);
68950ad2f24SZach O'Keefe 		return result;
690b46e756fSKirill A. Shutemov 	}
691b46e756fSKirill A. Shutemov out:
6925503fbf2SKirill A. Shutemov 	release_pte_pages(pte, _pte, compound_pagelist);
693b46e756fSKirill A. Shutemov 	trace_mm_collapse_huge_page_isolate(page, none_or_zero,
694b46e756fSKirill A. Shutemov 					    referenced, writable, result);
69550ad2f24SZach O'Keefe 	return result;
696b46e756fSKirill A. Shutemov }
697b46e756fSKirill A. Shutemov 
698b46e756fSKirill A. Shutemov static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
699b46e756fSKirill A. Shutemov 				      struct vm_area_struct *vma,
700b46e756fSKirill A. Shutemov 				      unsigned long address,
7015503fbf2SKirill A. Shutemov 				      spinlock_t *ptl,
7025503fbf2SKirill A. Shutemov 				      struct list_head *compound_pagelist)
703b46e756fSKirill A. Shutemov {
7045503fbf2SKirill A. Shutemov 	struct page *src_page, *tmp;
705b46e756fSKirill A. Shutemov 	pte_t *_pte;
706338a16baSDavid Rientjes 	for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
707338a16baSDavid Rientjes 				_pte++, page++, address += PAGE_SIZE) {
708b46e756fSKirill A. Shutemov 		pte_t pteval = *_pte;
709b46e756fSKirill A. Shutemov 
710b46e756fSKirill A. Shutemov 		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
711b46e756fSKirill A. Shutemov 			clear_user_highpage(page, address);
712b46e756fSKirill A. Shutemov 			add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
713b46e756fSKirill A. Shutemov 			if (is_zero_pfn(pte_pfn(pteval))) {
714b46e756fSKirill A. Shutemov 				/*
715b46e756fSKirill A. Shutemov 				 * ptl mostly unnecessary.
716b46e756fSKirill A. Shutemov 				 */
717b46e756fSKirill A. Shutemov 				spin_lock(ptl);
71808d5b29eSPasha Tatashin 				ptep_clear(vma->vm_mm, address, _pte);
719b46e756fSKirill A. Shutemov 				spin_unlock(ptl);
720b46e756fSKirill A. Shutemov 			}
721b46e756fSKirill A. Shutemov 		} else {
722b46e756fSKirill A. Shutemov 			src_page = pte_page(pteval);
723b46e756fSKirill A. Shutemov 			copy_user_highpage(page, src_page, address, vma);
7245503fbf2SKirill A. Shutemov 			if (!PageCompound(src_page))
725b46e756fSKirill A. Shutemov 				release_pte_page(src_page);
726b46e756fSKirill A. Shutemov 			/*
727b46e756fSKirill A. Shutemov 			 * ptl mostly unnecessary, but preempt has to
728b46e756fSKirill A. Shutemov 			 * be disabled to update the per-cpu stats
729b46e756fSKirill A. Shutemov 			 * inside page_remove_rmap().
730b46e756fSKirill A. Shutemov 			 */
731b46e756fSKirill A. Shutemov 			spin_lock(ptl);
73208d5b29eSPasha Tatashin 			ptep_clear(vma->vm_mm, address, _pte);
733cea86fe2SHugh Dickins 			page_remove_rmap(src_page, vma, false);
734b46e756fSKirill A. Shutemov 			spin_unlock(ptl);
735b46e756fSKirill A. Shutemov 			free_page_and_swap_cache(src_page);
736b46e756fSKirill A. Shutemov 		}
737b46e756fSKirill A. Shutemov 	}
7385503fbf2SKirill A. Shutemov 
7395503fbf2SKirill A. Shutemov 	list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
7405503fbf2SKirill A. Shutemov 		list_del(&src_page->lru);
7411baec203SMiaohe Lin 		mod_node_page_state(page_pgdat(src_page),
7421baec203SMiaohe Lin 				    NR_ISOLATED_ANON + page_is_file_lru(src_page),
7431baec203SMiaohe Lin 				    -compound_nr(src_page));
7441baec203SMiaohe Lin 		unlock_page(src_page);
7451baec203SMiaohe Lin 		free_swap_cache(src_page);
7461baec203SMiaohe Lin 		putback_lru_page(src_page);
7475503fbf2SKirill A. Shutemov 	}
748b46e756fSKirill A. Shutemov }
749b46e756fSKirill A. Shutemov 
750b46e756fSKirill A. Shutemov static void khugepaged_alloc_sleep(void)
751b46e756fSKirill A. Shutemov {
752b46e756fSKirill A. Shutemov 	DEFINE_WAIT(wait);
753b46e756fSKirill A. Shutemov 
754b46e756fSKirill A. Shutemov 	add_wait_queue(&khugepaged_wait, &wait);
755b46e756fSKirill A. Shutemov 	freezable_schedule_timeout_interruptible(
756b46e756fSKirill A. Shutemov 		msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
757b46e756fSKirill A. Shutemov 	remove_wait_queue(&khugepaged_wait, &wait);
758b46e756fSKirill A. Shutemov }
759b46e756fSKirill A. Shutemov 
76034d6b470SZach O'Keefe struct collapse_control khugepaged_collapse_control = {
761d8ea7cc8SZach O'Keefe 	.is_khugepaged = true,
76234d6b470SZach O'Keefe 	.last_target_node = NUMA_NO_NODE,
76334d6b470SZach O'Keefe };
76434d6b470SZach O'Keefe 
765*7d2c4385SZach O'Keefe static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
766b46e756fSKirill A. Shutemov {
767b46e756fSKirill A. Shutemov 	int i;
768b46e756fSKirill A. Shutemov 
769b46e756fSKirill A. Shutemov 	/*
770a5f5f91dSMel Gorman 	 * If node_reclaim_mode is disabled, then no extra effort is made to
771b46e756fSKirill A. Shutemov 	 * allocate memory locally.
772b46e756fSKirill A. Shutemov 	 */
773202e35dbSDave Hansen 	if (!node_reclaim_enabled())
774b46e756fSKirill A. Shutemov 		return false;
775b46e756fSKirill A. Shutemov 
776b46e756fSKirill A. Shutemov 	/* If there is a count for this node already, it must be acceptable */
77734d6b470SZach O'Keefe 	if (cc->node_load[nid])
778b46e756fSKirill A. Shutemov 		return false;
779b46e756fSKirill A. Shutemov 
780b46e756fSKirill A. Shutemov 	for (i = 0; i < MAX_NUMNODES; i++) {
78134d6b470SZach O'Keefe 		if (!cc->node_load[i])
782b46e756fSKirill A. Shutemov 			continue;
783a55c7454SMatt Fleming 		if (node_distance(nid, i) > node_reclaim_distance)
784b46e756fSKirill A. Shutemov 			return true;
785b46e756fSKirill A. Shutemov 	}
786b46e756fSKirill A. Shutemov 	return false;
787b46e756fSKirill A. Shutemov }
788b46e756fSKirill A. Shutemov 
7891064026bSYang Shi #define khugepaged_defrag()					\
7901064026bSYang Shi 	(transparent_hugepage_flags &				\
7911064026bSYang Shi 	 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
7921064026bSYang Shi 
793b46e756fSKirill A. Shutemov /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
794b46e756fSKirill A. Shutemov static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
795b46e756fSKirill A. Shutemov {
79625160354SVlastimil Babka 	return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
797b46e756fSKirill A. Shutemov }
798b46e756fSKirill A. Shutemov 
799b46e756fSKirill A. Shutemov #ifdef CONFIG_NUMA
800*7d2c4385SZach O'Keefe static int hpage_collapse_find_target_node(struct collapse_control *cc)
801b46e756fSKirill A. Shutemov {
802b46e756fSKirill A. Shutemov 	int nid, target_node = 0, max_value = 0;
803b46e756fSKirill A. Shutemov 
804b46e756fSKirill A. Shutemov 	/* find first node with max normal pages hit */
805b46e756fSKirill A. Shutemov 	for (nid = 0; nid < MAX_NUMNODES; nid++)
80634d6b470SZach O'Keefe 		if (cc->node_load[nid] > max_value) {
80734d6b470SZach O'Keefe 			max_value = cc->node_load[nid];
808b46e756fSKirill A. Shutemov 			target_node = nid;
809b46e756fSKirill A. Shutemov 		}
810b46e756fSKirill A. Shutemov 
811b46e756fSKirill A. Shutemov 	/* do some balance if several nodes have the same hit record */
81234d6b470SZach O'Keefe 	if (target_node <= cc->last_target_node)
81334d6b470SZach O'Keefe 		for (nid = cc->last_target_node + 1; nid < MAX_NUMNODES;
814b46e756fSKirill A. Shutemov 		     nid++)
81534d6b470SZach O'Keefe 			if (max_value == cc->node_load[nid]) {
816b46e756fSKirill A. Shutemov 				target_node = nid;
817b46e756fSKirill A. Shutemov 				break;
818b46e756fSKirill A. Shutemov 			}
819b46e756fSKirill A. Shutemov 
82034d6b470SZach O'Keefe 	cc->last_target_node = target_node;
821b46e756fSKirill A. Shutemov 	return target_node;
822b46e756fSKirill A. Shutemov }
823c6a7f445SYang Shi #else
824*7d2c4385SZach O'Keefe static int hpage_collapse_find_target_node(struct collapse_control *cc)
825b46e756fSKirill A. Shutemov {
826c6a7f445SYang Shi 	return 0;
827b46e756fSKirill A. Shutemov }
828c6a7f445SYang Shi #endif
829b46e756fSKirill A. Shutemov 
830*7d2c4385SZach O'Keefe static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node)
831b46e756fSKirill A. Shutemov {
832b46e756fSKirill A. Shutemov 	*hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
833b46e756fSKirill A. Shutemov 	if (unlikely(!*hpage)) {
834b46e756fSKirill A. Shutemov 		count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
8359710a78aSZach O'Keefe 		return false;
836b46e756fSKirill A. Shutemov 	}
837b46e756fSKirill A. Shutemov 
838b46e756fSKirill A. Shutemov 	prep_transhuge_page(*hpage);
839b46e756fSKirill A. Shutemov 	count_vm_event(THP_COLLAPSE_ALLOC);
8409710a78aSZach O'Keefe 	return true;
841b46e756fSKirill A. Shutemov }
842b46e756fSKirill A. Shutemov 
843b46e756fSKirill A. Shutemov /*
844c1e8d7c6SMichel Lespinasse  * If mmap_lock temporarily dropped, revalidate vma
845c1e8d7c6SMichel Lespinasse  * before taking mmap_lock.
84650ad2f24SZach O'Keefe  * Returns enum scan_result value.
847b46e756fSKirill A. Shutemov  */
848b46e756fSKirill A. Shutemov 
849c131f751SKirill A. Shutemov static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
850a7f4e6e4SZach O'Keefe 				   struct vm_area_struct **vmap,
851a7f4e6e4SZach O'Keefe 				   struct collapse_control *cc)
852b46e756fSKirill A. Shutemov {
853b46e756fSKirill A. Shutemov 	struct vm_area_struct *vma;
854b46e756fSKirill A. Shutemov 
855*7d2c4385SZach O'Keefe 	if (unlikely(hpage_collapse_test_exit(mm)))
856b46e756fSKirill A. Shutemov 		return SCAN_ANY_PROCESS;
857b46e756fSKirill A. Shutemov 
858c131f751SKirill A. Shutemov 	*vmap = vma = find_vma(mm, address);
859b46e756fSKirill A. Shutemov 	if (!vma)
860b46e756fSKirill A. Shutemov 		return SCAN_VMA_NULL;
861b46e756fSKirill A. Shutemov 
8624fa6893fSYang Shi 	if (!transhuge_vma_suitable(vma, address))
863b46e756fSKirill A. Shutemov 		return SCAN_ADDRESS_RANGE;
864a7f4e6e4SZach O'Keefe 	if (!hugepage_vma_check(vma, vma->vm_flags, false, false,
865a7f4e6e4SZach O'Keefe 				cc->is_khugepaged))
866b46e756fSKirill A. Shutemov 		return SCAN_VMA_CHECK;
867f707fa49SYang Shi 	/*
868f707fa49SYang Shi 	 * Anon VMA expected, the address may be unmapped then
869f707fa49SYang Shi 	 * remapped to file after khugepaged reaquired the mmap_lock.
870f707fa49SYang Shi 	 *
871f707fa49SYang Shi 	 * hugepage_vma_check may return true for qualified file
872f707fa49SYang Shi 	 * vmas.
873f707fa49SYang Shi 	 */
87425fa414aSxu xin 	if (!vma->anon_vma || !vma_is_anonymous(vma))
875594cced1SKirill A. Shutemov 		return SCAN_VMA_CHECK;
87650ad2f24SZach O'Keefe 	return SCAN_SUCCEED;
877b46e756fSKirill A. Shutemov }
878b46e756fSKirill A. Shutemov 
87950722804SZach O'Keefe static int find_pmd_or_thp_or_none(struct mm_struct *mm,
88050722804SZach O'Keefe 				   unsigned long address,
88150722804SZach O'Keefe 				   pmd_t **pmd)
88250722804SZach O'Keefe {
88350722804SZach O'Keefe 	pmd_t pmde;
88450722804SZach O'Keefe 
88550722804SZach O'Keefe 	*pmd = mm_find_pmd(mm, address);
88650722804SZach O'Keefe 	if (!*pmd)
88750722804SZach O'Keefe 		return SCAN_PMD_NULL;
88850722804SZach O'Keefe 
88950722804SZach O'Keefe 	pmde = pmd_read_atomic(*pmd);
89050722804SZach O'Keefe 
89150722804SZach O'Keefe #ifdef CONFIG_TRANSPARENT_HUGEPAGE
89250722804SZach O'Keefe 	/* See comments in pmd_none_or_trans_huge_or_clear_bad() */
89350722804SZach O'Keefe 	barrier();
89450722804SZach O'Keefe #endif
89550722804SZach O'Keefe 	if (!pmd_present(pmde))
89650722804SZach O'Keefe 		return SCAN_PMD_NULL;
89750722804SZach O'Keefe 	if (pmd_trans_huge(pmde))
89850722804SZach O'Keefe 		return SCAN_PMD_MAPPED;
89950722804SZach O'Keefe 	if (pmd_bad(pmde))
90050722804SZach O'Keefe 		return SCAN_PMD_NULL;
90150722804SZach O'Keefe 	return SCAN_SUCCEED;
90250722804SZach O'Keefe }
90350722804SZach O'Keefe 
90450722804SZach O'Keefe static int check_pmd_still_valid(struct mm_struct *mm,
90550722804SZach O'Keefe 				 unsigned long address,
90650722804SZach O'Keefe 				 pmd_t *pmd)
90750722804SZach O'Keefe {
90850722804SZach O'Keefe 	pmd_t *new_pmd;
90950722804SZach O'Keefe 	int result = find_pmd_or_thp_or_none(mm, address, &new_pmd);
91050722804SZach O'Keefe 
91150722804SZach O'Keefe 	if (result != SCAN_SUCCEED)
91250722804SZach O'Keefe 		return result;
91350722804SZach O'Keefe 	if (new_pmd != pmd)
91450722804SZach O'Keefe 		return SCAN_FAIL;
91550722804SZach O'Keefe 	return SCAN_SUCCEED;
91650722804SZach O'Keefe }
91750722804SZach O'Keefe 
918b46e756fSKirill A. Shutemov /*
919b46e756fSKirill A. Shutemov  * Bring missing pages in from swap, to complete THP collapse.
920*7d2c4385SZach O'Keefe  * Only done if hpage_collapse_scan_pmd believes it is worthwhile.
921b46e756fSKirill A. Shutemov  *
9224d928e20SMiaohe Lin  * Called and returns without pte mapped or spinlocks held.
9234d928e20SMiaohe Lin  * Note that if false is returned, mmap_lock will be released.
924b46e756fSKirill A. Shutemov  */
925b46e756fSKirill A. Shutemov 
92650ad2f24SZach O'Keefe static int __collapse_huge_page_swapin(struct mm_struct *mm,
927b46e756fSKirill A. Shutemov 				       struct vm_area_struct *vma,
9282b635dd3SWill Deacon 				       unsigned long haddr, pmd_t *pmd,
9290db501f7SEbru Akagunduz 				       int referenced)
930b46e756fSKirill A. Shutemov {
9312b740303SSouptick Joarder 	int swapped_in = 0;
9322b740303SSouptick Joarder 	vm_fault_t ret = 0;
9332b635dd3SWill Deacon 	unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
9342b635dd3SWill Deacon 
9352b635dd3SWill Deacon 	for (address = haddr; address < end; address += PAGE_SIZE) {
93682b0f8c3SJan Kara 		struct vm_fault vmf = {
937b46e756fSKirill A. Shutemov 			.vma = vma,
938b46e756fSKirill A. Shutemov 			.address = address,
9392b635dd3SWill Deacon 			.pgoff = linear_page_index(vma, haddr),
940b46e756fSKirill A. Shutemov 			.flags = FAULT_FLAG_ALLOW_RETRY,
941b46e756fSKirill A. Shutemov 			.pmd = pmd,
942b46e756fSKirill A. Shutemov 		};
943b46e756fSKirill A. Shutemov 
94482b0f8c3SJan Kara 		vmf.pte = pte_offset_map(pmd, address);
9452994302bSJan Kara 		vmf.orig_pte = *vmf.pte;
9462b635dd3SWill Deacon 		if (!is_swap_pte(vmf.orig_pte)) {
9472b635dd3SWill Deacon 			pte_unmap(vmf.pte);
948b46e756fSKirill A. Shutemov 			continue;
9492b635dd3SWill Deacon 		}
9502994302bSJan Kara 		ret = do_swap_page(&vmf);
9510db501f7SEbru Akagunduz 
9524d928e20SMiaohe Lin 		/*
9534d928e20SMiaohe Lin 		 * do_swap_page returns VM_FAULT_RETRY with released mmap_lock.
9544d928e20SMiaohe Lin 		 * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because
9554d928e20SMiaohe Lin 		 * we do not retry here and swap entry will remain in pagetable
9564d928e20SMiaohe Lin 		 * resulting in later failure.
9574d928e20SMiaohe Lin 		 */
958b46e756fSKirill A. Shutemov 		if (ret & VM_FAULT_RETRY) {
9590db501f7SEbru Akagunduz 			trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
96050ad2f24SZach O'Keefe 			/* Likely, but not guaranteed, that page lock failed */
96150ad2f24SZach O'Keefe 			return SCAN_PAGE_LOCK;
96247f863eaSEbru Akagunduz 		}
963b46e756fSKirill A. Shutemov 		if (ret & VM_FAULT_ERROR) {
9644d928e20SMiaohe Lin 			mmap_read_unlock(mm);
9650db501f7SEbru Akagunduz 			trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
96650ad2f24SZach O'Keefe 			return SCAN_FAIL;
967b46e756fSKirill A. Shutemov 		}
9684d928e20SMiaohe Lin 		swapped_in++;
969b46e756fSKirill A. Shutemov 	}
970ae2c5d80SKirill A. Shutemov 
971ae2c5d80SKirill A. Shutemov 	/* Drain LRU add pagevec to remove extra pin on the swapped in pages */
972ae2c5d80SKirill A. Shutemov 	if (swapped_in)
973ae2c5d80SKirill A. Shutemov 		lru_add_drain();
974ae2c5d80SKirill A. Shutemov 
9750db501f7SEbru Akagunduz 	trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
97650ad2f24SZach O'Keefe 	return SCAN_SUCCEED;
977b46e756fSKirill A. Shutemov }
978b46e756fSKirill A. Shutemov 
9799710a78aSZach O'Keefe static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
9809710a78aSZach O'Keefe 			      struct collapse_control *cc)
9819710a78aSZach O'Keefe {
9829710a78aSZach O'Keefe 	/* Only allocate from the target node */
9837d8faaf1SZach O'Keefe 	gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
9847d8faaf1SZach O'Keefe 		     GFP_TRANSHUGE) | __GFP_THISNODE;
985*7d2c4385SZach O'Keefe 	int node = hpage_collapse_find_target_node(cc);
9869710a78aSZach O'Keefe 
987*7d2c4385SZach O'Keefe 	if (!hpage_collapse_alloc_page(hpage, gfp, node))
9889710a78aSZach O'Keefe 		return SCAN_ALLOC_HUGE_PAGE_FAIL;
9899710a78aSZach O'Keefe 	if (unlikely(mem_cgroup_charge(page_folio(*hpage), mm, gfp)))
9909710a78aSZach O'Keefe 		return SCAN_CGROUP_CHARGE_FAIL;
9919710a78aSZach O'Keefe 	count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC);
9929710a78aSZach O'Keefe 	return SCAN_SUCCEED;
9939710a78aSZach O'Keefe }
9949710a78aSZach O'Keefe 
99550ad2f24SZach O'Keefe static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
99650ad2f24SZach O'Keefe 			      int referenced, int unmapped,
99750ad2f24SZach O'Keefe 			      struct collapse_control *cc)
998b46e756fSKirill A. Shutemov {
9995503fbf2SKirill A. Shutemov 	LIST_HEAD(compound_pagelist);
1000b46e756fSKirill A. Shutemov 	pmd_t *pmd, _pmd;
1001b46e756fSKirill A. Shutemov 	pte_t *pte;
1002b46e756fSKirill A. Shutemov 	pgtable_t pgtable;
100350ad2f24SZach O'Keefe 	struct page *hpage;
1004b46e756fSKirill A. Shutemov 	spinlock_t *pmd_ptl, *pte_ptl;
100550ad2f24SZach O'Keefe 	int result = SCAN_FAIL;
1006c131f751SKirill A. Shutemov 	struct vm_area_struct *vma;
1007ac46d4f3SJérôme Glisse 	struct mmu_notifier_range range;
1008b46e756fSKirill A. Shutemov 
1009b46e756fSKirill A. Shutemov 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1010b46e756fSKirill A. Shutemov 
1011988ddb71SKirill A. Shutemov 	/*
1012c1e8d7c6SMichel Lespinasse 	 * Before allocating the hugepage, release the mmap_lock read lock.
1013988ddb71SKirill A. Shutemov 	 * The allocation can take potentially a long time if it involves
1014c1e8d7c6SMichel Lespinasse 	 * sync compaction, and we do not need to hold the mmap_lock during
1015988ddb71SKirill A. Shutemov 	 * that. We will recheck the vma after taking it again in write mode.
1016988ddb71SKirill A. Shutemov 	 */
1017d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
1018b46e756fSKirill A. Shutemov 
101950ad2f24SZach O'Keefe 	result = alloc_charge_hpage(&hpage, mm, cc);
10209710a78aSZach O'Keefe 	if (result != SCAN_SUCCEED)
1021b46e756fSKirill A. Shutemov 		goto out_nolock;
10229710a78aSZach O'Keefe 
1023d8ed45c5SMichel Lespinasse 	mmap_read_lock(mm);
1024a7f4e6e4SZach O'Keefe 	result = hugepage_vma_revalidate(mm, address, &vma, cc);
102550ad2f24SZach O'Keefe 	if (result != SCAN_SUCCEED) {
1026d8ed45c5SMichel Lespinasse 		mmap_read_unlock(mm);
1027b46e756fSKirill A. Shutemov 		goto out_nolock;
1028b46e756fSKirill A. Shutemov 	}
1029b46e756fSKirill A. Shutemov 
103050722804SZach O'Keefe 	result = find_pmd_or_thp_or_none(mm, address, &pmd);
103150722804SZach O'Keefe 	if (result != SCAN_SUCCEED) {
1032d8ed45c5SMichel Lespinasse 		mmap_read_unlock(mm);
1033b46e756fSKirill A. Shutemov 		goto out_nolock;
1034b46e756fSKirill A. Shutemov 	}
1035b46e756fSKirill A. Shutemov 
103650ad2f24SZach O'Keefe 	if (unmapped) {
1037b46e756fSKirill A. Shutemov 		/*
103850ad2f24SZach O'Keefe 		 * __collapse_huge_page_swapin will return with mmap_lock
103950ad2f24SZach O'Keefe 		 * released when it fails. So we jump out_nolock directly in
104050ad2f24SZach O'Keefe 		 * that case.  Continuing to collapse causes inconsistency.
1041b46e756fSKirill A. Shutemov 		 */
104250ad2f24SZach O'Keefe 		result = __collapse_huge_page_swapin(mm, vma, address, pmd,
104350ad2f24SZach O'Keefe 						     referenced);
104450ad2f24SZach O'Keefe 		if (result != SCAN_SUCCEED)
1045b46e756fSKirill A. Shutemov 			goto out_nolock;
1046b46e756fSKirill A. Shutemov 	}
1047b46e756fSKirill A. Shutemov 
1048d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
1049b46e756fSKirill A. Shutemov 	/*
1050b46e756fSKirill A. Shutemov 	 * Prevent all access to pagetables with the exception of
1051b46e756fSKirill A. Shutemov 	 * gup_fast later handled by the ptep_clear_flush and the VM
1052b46e756fSKirill A. Shutemov 	 * handled by the anon_vma lock + PG_lock.
1053b46e756fSKirill A. Shutemov 	 */
1054d8ed45c5SMichel Lespinasse 	mmap_write_lock(mm);
1055a7f4e6e4SZach O'Keefe 	result = hugepage_vma_revalidate(mm, address, &vma, cc);
105650ad2f24SZach O'Keefe 	if (result != SCAN_SUCCEED)
105718d24a7cSMiaohe Lin 		goto out_up_write;
1058b46e756fSKirill A. Shutemov 	/* check if the pmd is still valid */
105950722804SZach O'Keefe 	result = check_pmd_still_valid(mm, address, pmd);
106050722804SZach O'Keefe 	if (result != SCAN_SUCCEED)
106118d24a7cSMiaohe Lin 		goto out_up_write;
1062b46e756fSKirill A. Shutemov 
1063b46e756fSKirill A. Shutemov 	anon_vma_lock_write(vma->anon_vma);
1064b46e756fSKirill A. Shutemov 
10657269f999SJérôme Glisse 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
10666f4f13e8SJérôme Glisse 				address, address + HPAGE_PMD_SIZE);
1067ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_start(&range);
1068ec649c9dSVille Syrjälä 
1069ec649c9dSVille Syrjälä 	pte = pte_offset_map(pmd, address);
1070ec649c9dSVille Syrjälä 	pte_ptl = pte_lockptr(mm, pmd);
1071ec649c9dSVille Syrjälä 
1072b46e756fSKirill A. Shutemov 	pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1073b46e756fSKirill A. Shutemov 	/*
1074b46e756fSKirill A. Shutemov 	 * After this gup_fast can't run anymore. This also removes
1075b46e756fSKirill A. Shutemov 	 * any huge TLB entry from the CPU so we won't allow
1076b46e756fSKirill A. Shutemov 	 * huge and small TLB entries for the same virtual address
1077b46e756fSKirill A. Shutemov 	 * to avoid the risk of CPU bugs in that area.
1078b46e756fSKirill A. Shutemov 	 */
1079b46e756fSKirill A. Shutemov 	_pmd = pmdp_collapse_flush(vma, address, pmd);
1080b46e756fSKirill A. Shutemov 	spin_unlock(pmd_ptl);
1081ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_end(&range);
1082b46e756fSKirill A. Shutemov 
1083b46e756fSKirill A. Shutemov 	spin_lock(pte_ptl);
1084d8ea7cc8SZach O'Keefe 	result =  __collapse_huge_page_isolate(vma, address, pte, cc,
10855503fbf2SKirill A. Shutemov 					       &compound_pagelist);
1086b46e756fSKirill A. Shutemov 	spin_unlock(pte_ptl);
1087b46e756fSKirill A. Shutemov 
108850ad2f24SZach O'Keefe 	if (unlikely(result != SCAN_SUCCEED)) {
1089b46e756fSKirill A. Shutemov 		pte_unmap(pte);
1090b46e756fSKirill A. Shutemov 		spin_lock(pmd_ptl);
1091b46e756fSKirill A. Shutemov 		BUG_ON(!pmd_none(*pmd));
1092b46e756fSKirill A. Shutemov 		/*
1093b46e756fSKirill A. Shutemov 		 * We can only use set_pmd_at when establishing
1094b46e756fSKirill A. Shutemov 		 * hugepmds and never for establishing regular pmds that
1095b46e756fSKirill A. Shutemov 		 * points to regular pagetables. Use pmd_populate for that
1096b46e756fSKirill A. Shutemov 		 */
1097b46e756fSKirill A. Shutemov 		pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1098b46e756fSKirill A. Shutemov 		spin_unlock(pmd_ptl);
1099b46e756fSKirill A. Shutemov 		anon_vma_unlock_write(vma->anon_vma);
110018d24a7cSMiaohe Lin 		goto out_up_write;
1101b46e756fSKirill A. Shutemov 	}
1102b46e756fSKirill A. Shutemov 
1103b46e756fSKirill A. Shutemov 	/*
1104b46e756fSKirill A. Shutemov 	 * All pages are isolated and locked so anon_vma rmap
1105b46e756fSKirill A. Shutemov 	 * can't run anymore.
1106b46e756fSKirill A. Shutemov 	 */
1107b46e756fSKirill A. Shutemov 	anon_vma_unlock_write(vma->anon_vma);
1108b46e756fSKirill A. Shutemov 
110950ad2f24SZach O'Keefe 	__collapse_huge_page_copy(pte, hpage, vma, address, pte_ptl,
11105503fbf2SKirill A. Shutemov 				  &compound_pagelist);
1111b46e756fSKirill A. Shutemov 	pte_unmap(pte);
1112588d01f9SMiaohe Lin 	/*
1113588d01f9SMiaohe Lin 	 * spin_lock() below is not the equivalent of smp_wmb(), but
1114588d01f9SMiaohe Lin 	 * the smp_wmb() inside __SetPageUptodate() can be reused to
1115588d01f9SMiaohe Lin 	 * avoid the copy_huge_page writes to become visible after
1116588d01f9SMiaohe Lin 	 * the set_pmd_at() write.
1117588d01f9SMiaohe Lin 	 */
111850ad2f24SZach O'Keefe 	__SetPageUptodate(hpage);
1119b46e756fSKirill A. Shutemov 	pgtable = pmd_pgtable(_pmd);
1120b46e756fSKirill A. Shutemov 
112150ad2f24SZach O'Keefe 	_pmd = mk_huge_pmd(hpage, vma->vm_page_prot);
1122f55e1014SLinus Torvalds 	_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1123b46e756fSKirill A. Shutemov 
1124b46e756fSKirill A. Shutemov 	spin_lock(pmd_ptl);
1125b46e756fSKirill A. Shutemov 	BUG_ON(!pmd_none(*pmd));
112650ad2f24SZach O'Keefe 	page_add_new_anon_rmap(hpage, vma, address);
112750ad2f24SZach O'Keefe 	lru_cache_add_inactive_or_unevictable(hpage, vma);
1128b46e756fSKirill A. Shutemov 	pgtable_trans_huge_deposit(mm, pmd, pgtable);
1129b46e756fSKirill A. Shutemov 	set_pmd_at(mm, address, pmd, _pmd);
1130b46e756fSKirill A. Shutemov 	update_mmu_cache_pmd(vma, address, pmd);
1131b46e756fSKirill A. Shutemov 	spin_unlock(pmd_ptl);
1132b46e756fSKirill A. Shutemov 
113350ad2f24SZach O'Keefe 	hpage = NULL;
1134b46e756fSKirill A. Shutemov 
1135b46e756fSKirill A. Shutemov 	result = SCAN_SUCCEED;
1136b46e756fSKirill A. Shutemov out_up_write:
1137d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
1138b46e756fSKirill A. Shutemov out_nolock:
113950ad2f24SZach O'Keefe 	if (hpage) {
114050ad2f24SZach O'Keefe 		mem_cgroup_uncharge(page_folio(hpage));
114150ad2f24SZach O'Keefe 		put_page(hpage);
1142c6a7f445SYang Shi 	}
114350ad2f24SZach O'Keefe 	trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result);
114450ad2f24SZach O'Keefe 	return result;
1145b46e756fSKirill A. Shutemov }
1146b46e756fSKirill A. Shutemov 
1147*7d2c4385SZach O'Keefe static int hpage_collapse_scan_pmd(struct mm_struct *mm,
1148*7d2c4385SZach O'Keefe 				   struct vm_area_struct *vma,
114950ad2f24SZach O'Keefe 				   unsigned long address, bool *mmap_locked,
115034d6b470SZach O'Keefe 				   struct collapse_control *cc)
1151b46e756fSKirill A. Shutemov {
1152b46e756fSKirill A. Shutemov 	pmd_t *pmd;
1153b46e756fSKirill A. Shutemov 	pte_t *pte, *_pte;
115450ad2f24SZach O'Keefe 	int result = SCAN_FAIL, referenced = 0;
115571a2c112SKirill A. Shutemov 	int none_or_zero = 0, shared = 0;
1156b46e756fSKirill A. Shutemov 	struct page *page = NULL;
1157b46e756fSKirill A. Shutemov 	unsigned long _address;
1158b46e756fSKirill A. Shutemov 	spinlock_t *ptl;
1159b46e756fSKirill A. Shutemov 	int node = NUMA_NO_NODE, unmapped = 0;
11600db501f7SEbru Akagunduz 	bool writable = false;
1161b46e756fSKirill A. Shutemov 
1162b46e756fSKirill A. Shutemov 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1163b46e756fSKirill A. Shutemov 
116450722804SZach O'Keefe 	result = find_pmd_or_thp_or_none(mm, address, &pmd);
116550722804SZach O'Keefe 	if (result != SCAN_SUCCEED)
1166b46e756fSKirill A. Shutemov 		goto out;
1167b46e756fSKirill A. Shutemov 
116834d6b470SZach O'Keefe 	memset(cc->node_load, 0, sizeof(cc->node_load));
1169b46e756fSKirill A. Shutemov 	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1170b46e756fSKirill A. Shutemov 	for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
1171b46e756fSKirill A. Shutemov 	     _pte++, _address += PAGE_SIZE) {
1172b46e756fSKirill A. Shutemov 		pte_t pteval = *_pte;
1173b46e756fSKirill A. Shutemov 		if (is_swap_pte(pteval)) {
1174d8ea7cc8SZach O'Keefe 			++unmapped;
1175d8ea7cc8SZach O'Keefe 			if (!cc->is_khugepaged ||
1176d8ea7cc8SZach O'Keefe 			    unmapped <= khugepaged_max_ptes_swap) {
1177e1e267c7SPeter Xu 				/*
1178e1e267c7SPeter Xu 				 * Always be strict with uffd-wp
1179e1e267c7SPeter Xu 				 * enabled swap entries.  Please see
1180e1e267c7SPeter Xu 				 * comment below for pte_uffd_wp().
1181e1e267c7SPeter Xu 				 */
1182e1e267c7SPeter Xu 				if (pte_swp_uffd_wp(pteval)) {
1183e1e267c7SPeter Xu 					result = SCAN_PTE_UFFD_WP;
1184e1e267c7SPeter Xu 					goto out_unmap;
1185e1e267c7SPeter Xu 				}
1186b46e756fSKirill A. Shutemov 				continue;
1187b46e756fSKirill A. Shutemov 			} else {
1188b46e756fSKirill A. Shutemov 				result = SCAN_EXCEED_SWAP_PTE;
1189e9ea874aSYang Yang 				count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
1190b46e756fSKirill A. Shutemov 				goto out_unmap;
1191b46e756fSKirill A. Shutemov 			}
1192b46e756fSKirill A. Shutemov 		}
1193b46e756fSKirill A. Shutemov 		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1194d8ea7cc8SZach O'Keefe 			++none_or_zero;
1195b46e756fSKirill A. Shutemov 			if (!userfaultfd_armed(vma) &&
1196d8ea7cc8SZach O'Keefe 			    (!cc->is_khugepaged ||
1197d8ea7cc8SZach O'Keefe 			     none_or_zero <= khugepaged_max_ptes_none)) {
1198b46e756fSKirill A. Shutemov 				continue;
1199b46e756fSKirill A. Shutemov 			} else {
1200b46e756fSKirill A. Shutemov 				result = SCAN_EXCEED_NONE_PTE;
1201e9ea874aSYang Yang 				count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
1202b46e756fSKirill A. Shutemov 				goto out_unmap;
1203b46e756fSKirill A. Shutemov 			}
1204b46e756fSKirill A. Shutemov 		}
1205e1e267c7SPeter Xu 		if (pte_uffd_wp(pteval)) {
1206e1e267c7SPeter Xu 			/*
1207e1e267c7SPeter Xu 			 * Don't collapse the page if any of the small
1208e1e267c7SPeter Xu 			 * PTEs are armed with uffd write protection.
1209e1e267c7SPeter Xu 			 * Here we can also mark the new huge pmd as
1210e1e267c7SPeter Xu 			 * write protected if any of the small ones is
12118958b249SHaitao Shi 			 * marked but that could bring unknown
1212e1e267c7SPeter Xu 			 * userfault messages that falls outside of
1213e1e267c7SPeter Xu 			 * the registered range.  So, just be simple.
1214e1e267c7SPeter Xu 			 */
1215e1e267c7SPeter Xu 			result = SCAN_PTE_UFFD_WP;
1216e1e267c7SPeter Xu 			goto out_unmap;
1217e1e267c7SPeter Xu 		}
1218b46e756fSKirill A. Shutemov 		if (pte_write(pteval))
1219b46e756fSKirill A. Shutemov 			writable = true;
1220b46e756fSKirill A. Shutemov 
1221b46e756fSKirill A. Shutemov 		page = vm_normal_page(vma, _address, pteval);
12223218f871SAlex Sierra 		if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
1223b46e756fSKirill A. Shutemov 			result = SCAN_PAGE_NULL;
1224b46e756fSKirill A. Shutemov 			goto out_unmap;
1225b46e756fSKirill A. Shutemov 		}
1226b46e756fSKirill A. Shutemov 
1227d8ea7cc8SZach O'Keefe 		if (page_mapcount(page) > 1) {
1228d8ea7cc8SZach O'Keefe 			++shared;
1229d8ea7cc8SZach O'Keefe 			if (cc->is_khugepaged &&
1230d8ea7cc8SZach O'Keefe 			    shared > khugepaged_max_ptes_shared) {
123171a2c112SKirill A. Shutemov 				result = SCAN_EXCEED_SHARED_PTE;
1232e9ea874aSYang Yang 				count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
123371a2c112SKirill A. Shutemov 				goto out_unmap;
123471a2c112SKirill A. Shutemov 			}
1235d8ea7cc8SZach O'Keefe 		}
123671a2c112SKirill A. Shutemov 
12375503fbf2SKirill A. Shutemov 		page = compound_head(page);
1238b46e756fSKirill A. Shutemov 
1239b46e756fSKirill A. Shutemov 		/*
1240b46e756fSKirill A. Shutemov 		 * Record which node the original page is from and save this
124134d6b470SZach O'Keefe 		 * information to cc->node_load[].
12420b8f0d87SQuanfa Fu 		 * Khugepaged will allocate hugepage from the node has the max
1243b46e756fSKirill A. Shutemov 		 * hit record.
1244b46e756fSKirill A. Shutemov 		 */
1245b46e756fSKirill A. Shutemov 		node = page_to_nid(page);
1246*7d2c4385SZach O'Keefe 		if (hpage_collapse_scan_abort(node, cc)) {
1247b46e756fSKirill A. Shutemov 			result = SCAN_SCAN_ABORT;
1248b46e756fSKirill A. Shutemov 			goto out_unmap;
1249b46e756fSKirill A. Shutemov 		}
125034d6b470SZach O'Keefe 		cc->node_load[node]++;
1251b46e756fSKirill A. Shutemov 		if (!PageLRU(page)) {
1252b46e756fSKirill A. Shutemov 			result = SCAN_PAGE_LRU;
1253b46e756fSKirill A. Shutemov 			goto out_unmap;
1254b46e756fSKirill A. Shutemov 		}
1255b46e756fSKirill A. Shutemov 		if (PageLocked(page)) {
1256b46e756fSKirill A. Shutemov 			result = SCAN_PAGE_LOCK;
1257b46e756fSKirill A. Shutemov 			goto out_unmap;
1258b46e756fSKirill A. Shutemov 		}
1259b46e756fSKirill A. Shutemov 		if (!PageAnon(page)) {
1260b46e756fSKirill A. Shutemov 			result = SCAN_PAGE_ANON;
1261b46e756fSKirill A. Shutemov 			goto out_unmap;
1262b46e756fSKirill A. Shutemov 		}
1263b46e756fSKirill A. Shutemov 
1264b46e756fSKirill A. Shutemov 		/*
12659445689fSKirill A. Shutemov 		 * Check if the page has any GUP (or other external) pins.
12669445689fSKirill A. Shutemov 		 *
126736ee2c78SMiaohe Lin 		 * Here the check is racy it may see total_mapcount > refcount
12689445689fSKirill A. Shutemov 		 * in some cases.
12699445689fSKirill A. Shutemov 		 * For example, one process with one forked child process.
12709445689fSKirill A. Shutemov 		 * The parent has the PMD split due to MADV_DONTNEED, then
12719445689fSKirill A. Shutemov 		 * the child is trying unmap the whole PMD, but khugepaged
12729445689fSKirill A. Shutemov 		 * may be scanning the parent between the child has
12739445689fSKirill A. Shutemov 		 * PageDoubleMap flag cleared and dec the mapcount.  So
12749445689fSKirill A. Shutemov 		 * khugepaged may see total_mapcount > refcount.
12759445689fSKirill A. Shutemov 		 *
12769445689fSKirill A. Shutemov 		 * But such case is ephemeral we could always retry collapse
12779445689fSKirill A. Shutemov 		 * later.  However it may report false positive if the page
12789445689fSKirill A. Shutemov 		 * has excessive GUP pins (i.e. 512).  Anyway the same check
12799445689fSKirill A. Shutemov 		 * will be done again later the risk seems low.
1280b46e756fSKirill A. Shutemov 		 */
12819445689fSKirill A. Shutemov 		if (!is_refcount_suitable(page)) {
1282b46e756fSKirill A. Shutemov 			result = SCAN_PAGE_COUNT;
1283b46e756fSKirill A. Shutemov 			goto out_unmap;
1284b46e756fSKirill A. Shutemov 		}
1285d8ea7cc8SZach O'Keefe 
1286d8ea7cc8SZach O'Keefe 		/*
1287d8ea7cc8SZach O'Keefe 		 * If collapse was initiated by khugepaged, check that there is
1288d8ea7cc8SZach O'Keefe 		 * enough young pte to justify collapsing the page
1289d8ea7cc8SZach O'Keefe 		 */
1290d8ea7cc8SZach O'Keefe 		if (cc->is_khugepaged &&
1291d8ea7cc8SZach O'Keefe 		    (pte_young(pteval) || page_is_young(page) ||
1292d8ea7cc8SZach O'Keefe 		     PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
1293d8ea7cc8SZach O'Keefe 								     address)))
12940db501f7SEbru Akagunduz 			referenced++;
1295b46e756fSKirill A. Shutemov 	}
1296ffe945e6SKirill A. Shutemov 	if (!writable) {
1297ffe945e6SKirill A. Shutemov 		result = SCAN_PAGE_RO;
1298d8ea7cc8SZach O'Keefe 	} else if (cc->is_khugepaged &&
1299d8ea7cc8SZach O'Keefe 		   (!referenced ||
1300d8ea7cc8SZach O'Keefe 		    (unmapped && referenced < HPAGE_PMD_NR / 2))) {
1301ffe945e6SKirill A. Shutemov 		result = SCAN_LACK_REFERENCED_PAGE;
1302ffe945e6SKirill A. Shutemov 	} else {
1303b46e756fSKirill A. Shutemov 		result = SCAN_SUCCEED;
1304b46e756fSKirill A. Shutemov 	}
1305b46e756fSKirill A. Shutemov out_unmap:
1306b46e756fSKirill A. Shutemov 	pte_unmap_unlock(pte, ptl);
130750ad2f24SZach O'Keefe 	if (result == SCAN_SUCCEED) {
130850ad2f24SZach O'Keefe 		result = collapse_huge_page(mm, address, referenced,
130950ad2f24SZach O'Keefe 					    unmapped, cc);
1310c1e8d7c6SMichel Lespinasse 		/* collapse_huge_page will return with the mmap_lock released */
131150ad2f24SZach O'Keefe 		*mmap_locked = false;
1312b46e756fSKirill A. Shutemov 	}
1313b46e756fSKirill A. Shutemov out:
1314b46e756fSKirill A. Shutemov 	trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1315b46e756fSKirill A. Shutemov 				     none_or_zero, result, unmapped);
131650ad2f24SZach O'Keefe 	return result;
1317b46e756fSKirill A. Shutemov }
1318b46e756fSKirill A. Shutemov 
1319b46e756fSKirill A. Shutemov static void collect_mm_slot(struct mm_slot *mm_slot)
1320b46e756fSKirill A. Shutemov {
1321b46e756fSKirill A. Shutemov 	struct mm_struct *mm = mm_slot->mm;
1322b46e756fSKirill A. Shutemov 
132335f3aa39SLance Roy 	lockdep_assert_held(&khugepaged_mm_lock);
1324b46e756fSKirill A. Shutemov 
1325*7d2c4385SZach O'Keefe 	if (hpage_collapse_test_exit(mm)) {
1326b46e756fSKirill A. Shutemov 		/* free mm_slot */
1327b46e756fSKirill A. Shutemov 		hash_del(&mm_slot->hash);
1328b46e756fSKirill A. Shutemov 		list_del(&mm_slot->mm_node);
1329b46e756fSKirill A. Shutemov 
1330b46e756fSKirill A. Shutemov 		/*
1331b46e756fSKirill A. Shutemov 		 * Not strictly needed because the mm exited already.
1332b46e756fSKirill A. Shutemov 		 *
1333b46e756fSKirill A. Shutemov 		 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1334b46e756fSKirill A. Shutemov 		 */
1335b46e756fSKirill A. Shutemov 
1336b46e756fSKirill A. Shutemov 		/* khugepaged_mm_lock actually not necessary for the below */
1337b46e756fSKirill A. Shutemov 		free_mm_slot(mm_slot);
1338b46e756fSKirill A. Shutemov 		mmdrop(mm);
1339b46e756fSKirill A. Shutemov 	}
1340b46e756fSKirill A. Shutemov }
1341b46e756fSKirill A. Shutemov 
1342396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_SHMEM
134327e1f827SSong Liu /*
134427e1f827SSong Liu  * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
134527e1f827SSong Liu  * khugepaged should try to collapse the page table.
134627e1f827SSong Liu  */
1347081c3256SMiaohe Lin static void khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
134827e1f827SSong Liu 					  unsigned long addr)
134927e1f827SSong Liu {
135027e1f827SSong Liu 	struct mm_slot *mm_slot;
135127e1f827SSong Liu 
135227e1f827SSong Liu 	VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
135327e1f827SSong Liu 
135427e1f827SSong Liu 	spin_lock(&khugepaged_mm_lock);
135527e1f827SSong Liu 	mm_slot = get_mm_slot(mm);
135627e1f827SSong Liu 	if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
135727e1f827SSong Liu 		mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
135827e1f827SSong Liu 	spin_unlock(&khugepaged_mm_lock);
135927e1f827SSong Liu }
136027e1f827SSong Liu 
1361e59a47b8SPasha Tatashin static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
1362e59a47b8SPasha Tatashin 				  unsigned long addr, pmd_t *pmdp)
1363e59a47b8SPasha Tatashin {
1364e59a47b8SPasha Tatashin 	spinlock_t *ptl;
1365e59a47b8SPasha Tatashin 	pmd_t pmd;
1366e59a47b8SPasha Tatashin 
136780110bbfSPasha Tatashin 	mmap_assert_write_locked(mm);
1368e59a47b8SPasha Tatashin 	ptl = pmd_lock(vma->vm_mm, pmdp);
1369e59a47b8SPasha Tatashin 	pmd = pmdp_collapse_flush(vma, addr, pmdp);
1370e59a47b8SPasha Tatashin 	spin_unlock(ptl);
1371e59a47b8SPasha Tatashin 	mm_dec_nr_ptes(mm);
137280110bbfSPasha Tatashin 	page_table_check_pte_clear_range(mm, addr, pmd);
1373e59a47b8SPasha Tatashin 	pte_free(mm, pmd_pgtable(pmd));
1374e59a47b8SPasha Tatashin }
1375e59a47b8SPasha Tatashin 
137627e1f827SSong Liu /**
1377336e6b53SAlex Shi  * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1378336e6b53SAlex Shi  * address haddr.
1379336e6b53SAlex Shi  *
1380336e6b53SAlex Shi  * @mm: process address space where collapse happens
1381336e6b53SAlex Shi  * @addr: THP collapse address
138227e1f827SSong Liu  *
138327e1f827SSong Liu  * This function checks whether all the PTEs in the PMD are pointing to the
138427e1f827SSong Liu  * right THP. If so, retract the page table so the THP can refault in with
138527e1f827SSong Liu  * as pmd-mapped.
138627e1f827SSong Liu  */
138727e1f827SSong Liu void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
138827e1f827SSong Liu {
138927e1f827SSong Liu 	unsigned long haddr = addr & HPAGE_PMD_MASK;
139027e1f827SSong Liu 	struct vm_area_struct *vma = find_vma(mm, haddr);
1391119a5fc1SHugh Dickins 	struct page *hpage;
139227e1f827SSong Liu 	pte_t *start_pte, *pte;
1393e59a47b8SPasha Tatashin 	pmd_t *pmd;
139427e1f827SSong Liu 	spinlock_t *ptl;
139527e1f827SSong Liu 	int count = 0;
139627e1f827SSong Liu 	int i;
139727e1f827SSong Liu 
139827e1f827SSong Liu 	if (!vma || !vma->vm_file ||
1399fef792a4SMiaohe Lin 	    !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
140027e1f827SSong Liu 		return;
140127e1f827SSong Liu 
140227e1f827SSong Liu 	/*
1403a7f4e6e4SZach O'Keefe 	 * If we are here, we've succeeded in replacing all the native pages
1404a7f4e6e4SZach O'Keefe 	 * in the page cache with a single hugepage. If a mm were to fault-in
1405a7f4e6e4SZach O'Keefe 	 * this memory (mapped by a suitably aligned VMA), we'd get the hugepage
1406a7f4e6e4SZach O'Keefe 	 * and map it by a PMD, regardless of sysfs THP settings. As such, let's
1407a7f4e6e4SZach O'Keefe 	 * analogously elide sysfs THP settings here.
140827e1f827SSong Liu 	 */
1409a7f4e6e4SZach O'Keefe 	if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
141027e1f827SSong Liu 		return;
141127e1f827SSong Liu 
1412deb4c93aSPeter Xu 	/* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
1413deb4c93aSPeter Xu 	if (userfaultfd_wp(vma))
1414deb4c93aSPeter Xu 		return;
1415deb4c93aSPeter Xu 
1416119a5fc1SHugh Dickins 	hpage = find_lock_page(vma->vm_file->f_mapping,
1417119a5fc1SHugh Dickins 			       linear_page_index(vma, haddr));
1418119a5fc1SHugh Dickins 	if (!hpage)
1419119a5fc1SHugh Dickins 		return;
1420119a5fc1SHugh Dickins 
1421119a5fc1SHugh Dickins 	if (!PageHead(hpage))
1422119a5fc1SHugh Dickins 		goto drop_hpage;
1423119a5fc1SHugh Dickins 
142450722804SZach O'Keefe 	if (find_pmd_or_thp_or_none(mm, haddr, &pmd) != SCAN_SUCCEED)
1425119a5fc1SHugh Dickins 		goto drop_hpage;
142627e1f827SSong Liu 
142727e1f827SSong Liu 	start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
142827e1f827SSong Liu 
142927e1f827SSong Liu 	/* step 1: check all mapped PTEs are to the right huge page */
143027e1f827SSong Liu 	for (i = 0, addr = haddr, pte = start_pte;
143127e1f827SSong Liu 	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
143227e1f827SSong Liu 		struct page *page;
143327e1f827SSong Liu 
143427e1f827SSong Liu 		/* empty pte, skip */
143527e1f827SSong Liu 		if (pte_none(*pte))
143627e1f827SSong Liu 			continue;
143727e1f827SSong Liu 
143827e1f827SSong Liu 		/* page swapped out, abort */
143927e1f827SSong Liu 		if (!pte_present(*pte))
144027e1f827SSong Liu 			goto abort;
144127e1f827SSong Liu 
144227e1f827SSong Liu 		page = vm_normal_page(vma, addr, *pte);
14433218f871SAlex Sierra 		if (WARN_ON_ONCE(page && is_zone_device_page(page)))
14443218f871SAlex Sierra 			page = NULL;
144527e1f827SSong Liu 		/*
1446119a5fc1SHugh Dickins 		 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1447119a5fc1SHugh Dickins 		 * page table, but the new page will not be a subpage of hpage.
144827e1f827SSong Liu 		 */
1449119a5fc1SHugh Dickins 		if (hpage + i != page)
145027e1f827SSong Liu 			goto abort;
145127e1f827SSong Liu 		count++;
145227e1f827SSong Liu 	}
145327e1f827SSong Liu 
145427e1f827SSong Liu 	/* step 2: adjust rmap */
145527e1f827SSong Liu 	for (i = 0, addr = haddr, pte = start_pte;
145627e1f827SSong Liu 	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
145727e1f827SSong Liu 		struct page *page;
145827e1f827SSong Liu 
145927e1f827SSong Liu 		if (pte_none(*pte))
146027e1f827SSong Liu 			continue;
146127e1f827SSong Liu 		page = vm_normal_page(vma, addr, *pte);
14623218f871SAlex Sierra 		if (WARN_ON_ONCE(page && is_zone_device_page(page)))
14633218f871SAlex Sierra 			goto abort;
1464cea86fe2SHugh Dickins 		page_remove_rmap(page, vma, false);
146527e1f827SSong Liu 	}
146627e1f827SSong Liu 
146727e1f827SSong Liu 	pte_unmap_unlock(start_pte, ptl);
146827e1f827SSong Liu 
146927e1f827SSong Liu 	/* step 3: set proper refcount and mm_counters. */
1470119a5fc1SHugh Dickins 	if (count) {
147127e1f827SSong Liu 		page_ref_sub(hpage, count);
147227e1f827SSong Liu 		add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
147327e1f827SSong Liu 	}
147427e1f827SSong Liu 
147527e1f827SSong Liu 	/* step 4: collapse pmd */
1476e59a47b8SPasha Tatashin 	collapse_and_free_pmd(mm, vma, haddr, pmd);
1477119a5fc1SHugh Dickins drop_hpage:
1478119a5fc1SHugh Dickins 	unlock_page(hpage);
1479119a5fc1SHugh Dickins 	put_page(hpage);
148027e1f827SSong Liu 	return;
148127e1f827SSong Liu 
148227e1f827SSong Liu abort:
148327e1f827SSong Liu 	pte_unmap_unlock(start_pte, ptl);
1484119a5fc1SHugh Dickins 	goto drop_hpage;
148527e1f827SSong Liu }
148627e1f827SSong Liu 
14870edf61e5SMiaohe Lin static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
148827e1f827SSong Liu {
148927e1f827SSong Liu 	struct mm_struct *mm = mm_slot->mm;
149027e1f827SSong Liu 	int i;
149127e1f827SSong Liu 
149227e1f827SSong Liu 	if (likely(mm_slot->nr_pte_mapped_thp == 0))
14930edf61e5SMiaohe Lin 		return;
149427e1f827SSong Liu 
1495d8ed45c5SMichel Lespinasse 	if (!mmap_write_trylock(mm))
14960edf61e5SMiaohe Lin 		return;
149727e1f827SSong Liu 
1498*7d2c4385SZach O'Keefe 	if (unlikely(hpage_collapse_test_exit(mm)))
149927e1f827SSong Liu 		goto out;
150027e1f827SSong Liu 
150127e1f827SSong Liu 	for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
150227e1f827SSong Liu 		collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
150327e1f827SSong Liu 
150427e1f827SSong Liu out:
150527e1f827SSong Liu 	mm_slot->nr_pte_mapped_thp = 0;
1506d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
150727e1f827SSong Liu }
150827e1f827SSong Liu 
1509f3f0e1d2SKirill A. Shutemov static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1510f3f0e1d2SKirill A. Shutemov {
1511f3f0e1d2SKirill A. Shutemov 	struct vm_area_struct *vma;
151218e77600SHugh Dickins 	struct mm_struct *mm;
1513f3f0e1d2SKirill A. Shutemov 	unsigned long addr;
1514e59a47b8SPasha Tatashin 	pmd_t *pmd;
1515f3f0e1d2SKirill A. Shutemov 
1516f3f0e1d2SKirill A. Shutemov 	i_mmap_lock_write(mapping);
1517f3f0e1d2SKirill A. Shutemov 	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
151827e1f827SSong Liu 		/*
151927e1f827SSong Liu 		 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
152027e1f827SSong Liu 		 * got written to. These VMAs are likely not worth investing
15213e4e28c5SMichel Lespinasse 		 * mmap_write_lock(mm) as PMD-mapping is likely to be split
152227e1f827SSong Liu 		 * later.
152327e1f827SSong Liu 		 *
152436ee2c78SMiaohe Lin 		 * Note that vma->anon_vma check is racy: it can be set up after
1525c1e8d7c6SMichel Lespinasse 		 * the check but before we took mmap_lock by the fault path.
152627e1f827SSong Liu 		 * But page lock would prevent establishing any new ptes of the
152727e1f827SSong Liu 		 * page, so we are safe.
152827e1f827SSong Liu 		 *
152927e1f827SSong Liu 		 * An alternative would be drop the check, but check that page
153027e1f827SSong Liu 		 * table is clear before calling pmdp_collapse_flush() under
153127e1f827SSong Liu 		 * ptl. It has higher chance to recover THP for the VMA, but
153227e1f827SSong Liu 		 * has higher cost too.
153327e1f827SSong Liu 		 */
1534f3f0e1d2SKirill A. Shutemov 		if (vma->anon_vma)
1535f3f0e1d2SKirill A. Shutemov 			continue;
1536f3f0e1d2SKirill A. Shutemov 		addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1537f3f0e1d2SKirill A. Shutemov 		if (addr & ~HPAGE_PMD_MASK)
1538f3f0e1d2SKirill A. Shutemov 			continue;
1539f3f0e1d2SKirill A. Shutemov 		if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1540f3f0e1d2SKirill A. Shutemov 			continue;
154118e77600SHugh Dickins 		mm = vma->vm_mm;
154250722804SZach O'Keefe 		if (find_pmd_or_thp_or_none(mm, addr, &pmd) != SCAN_SUCCEED)
1543f3f0e1d2SKirill A. Shutemov 			continue;
1544f3f0e1d2SKirill A. Shutemov 		/*
1545c1e8d7c6SMichel Lespinasse 		 * We need exclusive mmap_lock to retract page table.
154627e1f827SSong Liu 		 *
154727e1f827SSong Liu 		 * We use trylock due to lock inversion: we need to acquire
1548c1e8d7c6SMichel Lespinasse 		 * mmap_lock while holding page lock. Fault path does it in
154927e1f827SSong Liu 		 * reverse order. Trylock is a way to avoid deadlock.
1550f3f0e1d2SKirill A. Shutemov 		 */
155118e77600SHugh Dickins 		if (mmap_write_trylock(mm)) {
1552deb4c93aSPeter Xu 			/*
1553deb4c93aSPeter Xu 			 * When a vma is registered with uffd-wp, we can't
1554deb4c93aSPeter Xu 			 * recycle the pmd pgtable because there can be pte
1555deb4c93aSPeter Xu 			 * markers installed.  Skip it only, so the rest mm/vma
1556deb4c93aSPeter Xu 			 * can still have the same file mapped hugely, however
1557deb4c93aSPeter Xu 			 * it'll always mapped in small page size for uffd-wp
1558deb4c93aSPeter Xu 			 * registered ranges.
1559deb4c93aSPeter Xu 			 */
1560*7d2c4385SZach O'Keefe 			if (!hpage_collapse_test_exit(mm) &&
1561*7d2c4385SZach O'Keefe 			    !userfaultfd_wp(vma))
1562e59a47b8SPasha Tatashin 				collapse_and_free_pmd(mm, vma, addr, pmd);
156318e77600SHugh Dickins 			mmap_write_unlock(mm);
156427e1f827SSong Liu 		} else {
156527e1f827SSong Liu 			/* Try again later */
156618e77600SHugh Dickins 			khugepaged_add_pte_mapped_thp(mm, addr);
1567f3f0e1d2SKirill A. Shutemov 		}
1568f3f0e1d2SKirill A. Shutemov 	}
1569f3f0e1d2SKirill A. Shutemov 	i_mmap_unlock_write(mapping);
1570f3f0e1d2SKirill A. Shutemov }
1571f3f0e1d2SKirill A. Shutemov 
1572f3f0e1d2SKirill A. Shutemov /**
157399cb0dbdSSong Liu  * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1574f3f0e1d2SKirill A. Shutemov  *
1575336e6b53SAlex Shi  * @mm: process address space where collapse happens
1576336e6b53SAlex Shi  * @file: file that collapse on
1577336e6b53SAlex Shi  * @start: collapse start address
15789710a78aSZach O'Keefe  * @cc: collapse context and scratchpad
1579336e6b53SAlex Shi  *
1580f3f0e1d2SKirill A. Shutemov  * Basic scheme is simple, details are more complex:
158187c460a0SHugh Dickins  *  - allocate and lock a new huge page;
158277da9389SMatthew Wilcox  *  - scan page cache replacing old pages with the new one
158399cb0dbdSSong Liu  *    + swap/gup in pages if necessary;
1584f3f0e1d2SKirill A. Shutemov  *    + fill in gaps;
158577da9389SMatthew Wilcox  *    + keep old pages around in case rollback is required;
158677da9389SMatthew Wilcox  *  - if replacing succeeds:
1587f3f0e1d2SKirill A. Shutemov  *    + copy data over;
1588f3f0e1d2SKirill A. Shutemov  *    + free old pages;
158987c460a0SHugh Dickins  *    + unlock huge page;
1590f3f0e1d2SKirill A. Shutemov  *  - if replacing failed;
1591f3f0e1d2SKirill A. Shutemov  *    + put all pages back and unfreeze them;
159277da9389SMatthew Wilcox  *    + restore gaps in the page cache;
159387c460a0SHugh Dickins  *    + unlock and free huge page;
1594f3f0e1d2SKirill A. Shutemov  */
159550ad2f24SZach O'Keefe static int collapse_file(struct mm_struct *mm, struct file *file,
159650ad2f24SZach O'Keefe 			 pgoff_t start, struct collapse_control *cc)
1597f3f0e1d2SKirill A. Shutemov {
1598579c571eSSong Liu 	struct address_space *mapping = file->f_mapping;
159950ad2f24SZach O'Keefe 	struct page *hpage;
1600f3f0e1d2SKirill A. Shutemov 	pgoff_t index, end = start + HPAGE_PMD_NR;
1601f3f0e1d2SKirill A. Shutemov 	LIST_HEAD(pagelist);
160277da9389SMatthew Wilcox 	XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
1603f3f0e1d2SKirill A. Shutemov 	int nr_none = 0, result = SCAN_SUCCEED;
160499cb0dbdSSong Liu 	bool is_shmem = shmem_file(file);
1605bf9eceadSMuchun Song 	int nr;
1606f3f0e1d2SKirill A. Shutemov 
160799cb0dbdSSong Liu 	VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1608f3f0e1d2SKirill A. Shutemov 	VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1609f3f0e1d2SKirill A. Shutemov 
161050ad2f24SZach O'Keefe 	result = alloc_charge_hpage(&hpage, mm, cc);
16119710a78aSZach O'Keefe 	if (result != SCAN_SUCCEED)
1612f3f0e1d2SKirill A. Shutemov 		goto out;
1613f3f0e1d2SKirill A. Shutemov 
16146b24ca4aSMatthew Wilcox (Oracle) 	/*
16156b24ca4aSMatthew Wilcox (Oracle) 	 * Ensure we have slots for all the pages in the range.  This is
16166b24ca4aSMatthew Wilcox (Oracle) 	 * almost certainly a no-op because most of the pages must be present
16176b24ca4aSMatthew Wilcox (Oracle) 	 */
161895feeabbSHugh Dickins 	do {
161995feeabbSHugh Dickins 		xas_lock_irq(&xas);
162095feeabbSHugh Dickins 		xas_create_range(&xas);
162195feeabbSHugh Dickins 		if (!xas_error(&xas))
162295feeabbSHugh Dickins 			break;
162395feeabbSHugh Dickins 		xas_unlock_irq(&xas);
162495feeabbSHugh Dickins 		if (!xas_nomem(&xas, GFP_KERNEL)) {
162595feeabbSHugh Dickins 			result = SCAN_FAIL;
162695feeabbSHugh Dickins 			goto out;
162795feeabbSHugh Dickins 		}
162895feeabbSHugh Dickins 	} while (1);
162995feeabbSHugh Dickins 
163050ad2f24SZach O'Keefe 	__SetPageLocked(hpage);
163199cb0dbdSSong Liu 	if (is_shmem)
163250ad2f24SZach O'Keefe 		__SetPageSwapBacked(hpage);
163350ad2f24SZach O'Keefe 	hpage->index = start;
163450ad2f24SZach O'Keefe 	hpage->mapping = mapping;
1635f3f0e1d2SKirill A. Shutemov 
1636f3f0e1d2SKirill A. Shutemov 	/*
163750ad2f24SZach O'Keefe 	 * At this point the hpage is locked and not up-to-date.
163887c460a0SHugh Dickins 	 * It's safe to insert it into the page cache, because nobody would
163987c460a0SHugh Dickins 	 * be able to map it or use it in another way until we unlock it.
1640f3f0e1d2SKirill A. Shutemov 	 */
1641f3f0e1d2SKirill A. Shutemov 
164277da9389SMatthew Wilcox 	xas_set(&xas, start);
164377da9389SMatthew Wilcox 	for (index = start; index < end; index++) {
164477da9389SMatthew Wilcox 		struct page *page = xas_next(&xas);
164577da9389SMatthew Wilcox 
164677da9389SMatthew Wilcox 		VM_BUG_ON(index != xas.xa_index);
164799cb0dbdSSong Liu 		if (is_shmem) {
164877da9389SMatthew Wilcox 			if (!page) {
1649701270faSHugh Dickins 				/*
165099cb0dbdSSong Liu 				 * Stop if extent has been truncated or
165199cb0dbdSSong Liu 				 * hole-punched, and is now completely
165299cb0dbdSSong Liu 				 * empty.
1653701270faSHugh Dickins 				 */
1654701270faSHugh Dickins 				if (index == start) {
1655701270faSHugh Dickins 					if (!xas_next_entry(&xas, end - 1)) {
1656701270faSHugh Dickins 						result = SCAN_TRUNCATED;
1657042a3082SHugh Dickins 						goto xa_locked;
1658701270faSHugh Dickins 					}
1659701270faSHugh Dickins 					xas_set(&xas, index);
1660701270faSHugh Dickins 				}
166177da9389SMatthew Wilcox 				if (!shmem_charge(mapping->host, 1)) {
1662f3f0e1d2SKirill A. Shutemov 					result = SCAN_FAIL;
1663042a3082SHugh Dickins 					goto xa_locked;
1664f3f0e1d2SKirill A. Shutemov 				}
166550ad2f24SZach O'Keefe 				xas_store(&xas, hpage);
166677da9389SMatthew Wilcox 				nr_none++;
166777da9389SMatthew Wilcox 				continue;
1668f3f0e1d2SKirill A. Shutemov 			}
1669f3f0e1d2SKirill A. Shutemov 
16703159f943SMatthew Wilcox 			if (xa_is_value(page) || !PageUptodate(page)) {
167177da9389SMatthew Wilcox 				xas_unlock_irq(&xas);
1672f3f0e1d2SKirill A. Shutemov 				/* swap in or instantiate fallocated page */
1673f3f0e1d2SKirill A. Shutemov 				if (shmem_getpage(mapping->host, index, &page,
1674acdd9f8eSHugh Dickins 						  SGP_NOALLOC)) {
1675f3f0e1d2SKirill A. Shutemov 					result = SCAN_FAIL;
167677da9389SMatthew Wilcox 					goto xa_unlocked;
1677f3f0e1d2SKirill A. Shutemov 				}
1678f3f0e1d2SKirill A. Shutemov 			} else if (trylock_page(page)) {
1679f3f0e1d2SKirill A. Shutemov 				get_page(page);
1680042a3082SHugh Dickins 				xas_unlock_irq(&xas);
1681f3f0e1d2SKirill A. Shutemov 			} else {
1682f3f0e1d2SKirill A. Shutemov 				result = SCAN_PAGE_LOCK;
1683042a3082SHugh Dickins 				goto xa_locked;
1684f3f0e1d2SKirill A. Shutemov 			}
168599cb0dbdSSong Liu 		} else {	/* !is_shmem */
168699cb0dbdSSong Liu 			if (!page || xa_is_value(page)) {
168799cb0dbdSSong Liu 				xas_unlock_irq(&xas);
168899cb0dbdSSong Liu 				page_cache_sync_readahead(mapping, &file->f_ra,
168999cb0dbdSSong Liu 							  file, index,
1690e5a59d30SDavid Howells 							  end - index);
169199cb0dbdSSong Liu 				/* drain pagevecs to help isolate_lru_page() */
169299cb0dbdSSong Liu 				lru_add_drain();
169399cb0dbdSSong Liu 				page = find_lock_page(mapping, index);
169499cb0dbdSSong Liu 				if (unlikely(page == NULL)) {
169599cb0dbdSSong Liu 					result = SCAN_FAIL;
169699cb0dbdSSong Liu 					goto xa_unlocked;
169799cb0dbdSSong Liu 				}
169875f36069SSong Liu 			} else if (PageDirty(page)) {
169975f36069SSong Liu 				/*
170075f36069SSong Liu 				 * khugepaged only works on read-only fd,
170175f36069SSong Liu 				 * so this page is dirty because it hasn't
170275f36069SSong Liu 				 * been flushed since first write. There
170375f36069SSong Liu 				 * won't be new dirty pages.
170475f36069SSong Liu 				 *
170575f36069SSong Liu 				 * Trigger async flush here and hope the
170675f36069SSong Liu 				 * writeback is done when khugepaged
170775f36069SSong Liu 				 * revisits this page.
170875f36069SSong Liu 				 *
170975f36069SSong Liu 				 * This is a one-off situation. We are not
171075f36069SSong Liu 				 * forcing writeback in loop.
171175f36069SSong Liu 				 */
171275f36069SSong Liu 				xas_unlock_irq(&xas);
171375f36069SSong Liu 				filemap_flush(mapping);
171475f36069SSong Liu 				result = SCAN_FAIL;
171575f36069SSong Liu 				goto xa_unlocked;
171674c42e1bSRongwei Wang 			} else if (PageWriteback(page)) {
171774c42e1bSRongwei Wang 				xas_unlock_irq(&xas);
171874c42e1bSRongwei Wang 				result = SCAN_FAIL;
171974c42e1bSRongwei Wang 				goto xa_unlocked;
172099cb0dbdSSong Liu 			} else if (trylock_page(page)) {
172199cb0dbdSSong Liu 				get_page(page);
172299cb0dbdSSong Liu 				xas_unlock_irq(&xas);
172399cb0dbdSSong Liu 			} else {
172499cb0dbdSSong Liu 				result = SCAN_PAGE_LOCK;
172599cb0dbdSSong Liu 				goto xa_locked;
172699cb0dbdSSong Liu 			}
172799cb0dbdSSong Liu 		}
1728f3f0e1d2SKirill A. Shutemov 
1729f3f0e1d2SKirill A. Shutemov 		/*
1730b93b0163SMatthew Wilcox 		 * The page must be locked, so we can drop the i_pages lock
1731f3f0e1d2SKirill A. Shutemov 		 * without racing with truncate.
1732f3f0e1d2SKirill A. Shutemov 		 */
1733f3f0e1d2SKirill A. Shutemov 		VM_BUG_ON_PAGE(!PageLocked(page), page);
17344655e5e5SSong Liu 
17354655e5e5SSong Liu 		/* make sure the page is up to date */
17364655e5e5SSong Liu 		if (unlikely(!PageUptodate(page))) {
17374655e5e5SSong Liu 			result = SCAN_FAIL;
17384655e5e5SSong Liu 			goto out_unlock;
17394655e5e5SSong Liu 		}
174006a5e126SHugh Dickins 
174106a5e126SHugh Dickins 		/*
174206a5e126SHugh Dickins 		 * If file was truncated then extended, or hole-punched, before
174306a5e126SHugh Dickins 		 * we locked the first page, then a THP might be there already.
174406a5e126SHugh Dickins 		 */
174506a5e126SHugh Dickins 		if (PageTransCompound(page)) {
174606a5e126SHugh Dickins 			result = SCAN_PAGE_COMPOUND;
174706a5e126SHugh Dickins 			goto out_unlock;
174806a5e126SHugh Dickins 		}
1749f3f0e1d2SKirill A. Shutemov 
1750f3f0e1d2SKirill A. Shutemov 		if (page_mapping(page) != mapping) {
1751f3f0e1d2SKirill A. Shutemov 			result = SCAN_TRUNCATED;
1752f3f0e1d2SKirill A. Shutemov 			goto out_unlock;
1753f3f0e1d2SKirill A. Shutemov 		}
1754f3f0e1d2SKirill A. Shutemov 
175574c42e1bSRongwei Wang 		if (!is_shmem && (PageDirty(page) ||
175674c42e1bSRongwei Wang 				  PageWriteback(page))) {
17574655e5e5SSong Liu 			/*
17584655e5e5SSong Liu 			 * khugepaged only works on read-only fd, so this
17594655e5e5SSong Liu 			 * page is dirty because it hasn't been flushed
17604655e5e5SSong Liu 			 * since first write.
17614655e5e5SSong Liu 			 */
17624655e5e5SSong Liu 			result = SCAN_FAIL;
17634655e5e5SSong Liu 			goto out_unlock;
17644655e5e5SSong Liu 		}
17654655e5e5SSong Liu 
1766f3f0e1d2SKirill A. Shutemov 		if (isolate_lru_page(page)) {
1767f3f0e1d2SKirill A. Shutemov 			result = SCAN_DEL_PAGE_LRU;
1768042a3082SHugh Dickins 			goto out_unlock;
1769f3f0e1d2SKirill A. Shutemov 		}
1770f3f0e1d2SKirill A. Shutemov 
177199cb0dbdSSong Liu 		if (page_has_private(page) &&
177299cb0dbdSSong Liu 		    !try_to_release_page(page, GFP_KERNEL)) {
177399cb0dbdSSong Liu 			result = SCAN_PAGE_HAS_PRIVATE;
17742f33a706SHugh Dickins 			putback_lru_page(page);
177599cb0dbdSSong Liu 			goto out_unlock;
177699cb0dbdSSong Liu 		}
177799cb0dbdSSong Liu 
1778f3f0e1d2SKirill A. Shutemov 		if (page_mapped(page))
1779869f7ee6SMatthew Wilcox (Oracle) 			try_to_unmap(page_folio(page),
1780869f7ee6SMatthew Wilcox (Oracle) 					TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
1781f3f0e1d2SKirill A. Shutemov 
178277da9389SMatthew Wilcox 		xas_lock_irq(&xas);
178377da9389SMatthew Wilcox 		xas_set(&xas, index);
1784f3f0e1d2SKirill A. Shutemov 
178577da9389SMatthew Wilcox 		VM_BUG_ON_PAGE(page != xas_load(&xas), page);
1786f3f0e1d2SKirill A. Shutemov 
1787f3f0e1d2SKirill A. Shutemov 		/*
1788f3f0e1d2SKirill A. Shutemov 		 * The page is expected to have page_count() == 3:
1789f3f0e1d2SKirill A. Shutemov 		 *  - we hold a pin on it;
179077da9389SMatthew Wilcox 		 *  - one reference from page cache;
1791f3f0e1d2SKirill A. Shutemov 		 *  - one from isolate_lru_page;
1792f3f0e1d2SKirill A. Shutemov 		 */
1793f3f0e1d2SKirill A. Shutemov 		if (!page_ref_freeze(page, 3)) {
1794f3f0e1d2SKirill A. Shutemov 			result = SCAN_PAGE_COUNT;
1795042a3082SHugh Dickins 			xas_unlock_irq(&xas);
1796042a3082SHugh Dickins 			putback_lru_page(page);
1797042a3082SHugh Dickins 			goto out_unlock;
1798f3f0e1d2SKirill A. Shutemov 		}
1799f3f0e1d2SKirill A. Shutemov 
1800f3f0e1d2SKirill A. Shutemov 		/*
1801f3f0e1d2SKirill A. Shutemov 		 * Add the page to the list to be able to undo the collapse if
1802f3f0e1d2SKirill A. Shutemov 		 * something go wrong.
1803f3f0e1d2SKirill A. Shutemov 		 */
1804f3f0e1d2SKirill A. Shutemov 		list_add_tail(&page->lru, &pagelist);
1805f3f0e1d2SKirill A. Shutemov 
1806f3f0e1d2SKirill A. Shutemov 		/* Finally, replace with the new page. */
180750ad2f24SZach O'Keefe 		xas_store(&xas, hpage);
1808f3f0e1d2SKirill A. Shutemov 		continue;
1809f3f0e1d2SKirill A. Shutemov out_unlock:
1810f3f0e1d2SKirill A. Shutemov 		unlock_page(page);
1811f3f0e1d2SKirill A. Shutemov 		put_page(page);
1812042a3082SHugh Dickins 		goto xa_unlocked;
1813f3f0e1d2SKirill A. Shutemov 	}
181450ad2f24SZach O'Keefe 	nr = thp_nr_pages(hpage);
1815f3f0e1d2SKirill A. Shutemov 
181699cb0dbdSSong Liu 	if (is_shmem)
181750ad2f24SZach O'Keefe 		__mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
181809d91cdaSSong Liu 	else {
181950ad2f24SZach O'Keefe 		__mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
182009d91cdaSSong Liu 		filemap_nr_thps_inc(mapping);
1821eb6ecbedSCollin Fijalkovich 		/*
1822eb6ecbedSCollin Fijalkovich 		 * Paired with smp_mb() in do_dentry_open() to ensure
1823eb6ecbedSCollin Fijalkovich 		 * i_writecount is up to date and the update to nr_thps is
1824eb6ecbedSCollin Fijalkovich 		 * visible. Ensures the page cache will be truncated if the
1825eb6ecbedSCollin Fijalkovich 		 * file is opened writable.
1826eb6ecbedSCollin Fijalkovich 		 */
1827eb6ecbedSCollin Fijalkovich 		smp_mb();
1828eb6ecbedSCollin Fijalkovich 		if (inode_is_open_for_write(mapping->host)) {
1829eb6ecbedSCollin Fijalkovich 			result = SCAN_FAIL;
183050ad2f24SZach O'Keefe 			__mod_lruvec_page_state(hpage, NR_FILE_THPS, -nr);
1831eb6ecbedSCollin Fijalkovich 			filemap_nr_thps_dec(mapping);
1832eb6ecbedSCollin Fijalkovich 			goto xa_locked;
1833eb6ecbedSCollin Fijalkovich 		}
183409d91cdaSSong Liu 	}
183599cb0dbdSSong Liu 
1836042a3082SHugh Dickins 	if (nr_none) {
183750ad2f24SZach O'Keefe 		__mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
18382f55f070SMiaohe Lin 		/* nr_none is always 0 for non-shmem. */
183950ad2f24SZach O'Keefe 		__mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
1840042a3082SHugh Dickins 	}
1841042a3082SHugh Dickins 
18426b24ca4aSMatthew Wilcox (Oracle) 	/* Join all the small entries into a single multi-index entry */
18436b24ca4aSMatthew Wilcox (Oracle) 	xas_set_order(&xas, start, HPAGE_PMD_ORDER);
184450ad2f24SZach O'Keefe 	xas_store(&xas, hpage);
1845042a3082SHugh Dickins xa_locked:
1846042a3082SHugh Dickins 	xas_unlock_irq(&xas);
184777da9389SMatthew Wilcox xa_unlocked:
1848042a3082SHugh Dickins 
18496d9df8a5SHugh Dickins 	/*
18506d9df8a5SHugh Dickins 	 * If collapse is successful, flush must be done now before copying.
18516d9df8a5SHugh Dickins 	 * If collapse is unsuccessful, does flush actually need to be done?
18526d9df8a5SHugh Dickins 	 * Do it anyway, to clear the state.
18536d9df8a5SHugh Dickins 	 */
18546d9df8a5SHugh Dickins 	try_to_unmap_flush();
18556d9df8a5SHugh Dickins 
1856f3f0e1d2SKirill A. Shutemov 	if (result == SCAN_SUCCEED) {
185777da9389SMatthew Wilcox 		struct page *page, *tmp;
1858f3f0e1d2SKirill A. Shutemov 
1859f3f0e1d2SKirill A. Shutemov 		/*
186077da9389SMatthew Wilcox 		 * Replacing old pages with new one has succeeded, now we
186177da9389SMatthew Wilcox 		 * need to copy the content and free the old pages.
1862f3f0e1d2SKirill A. Shutemov 		 */
18632af8ff29SHugh Dickins 		index = start;
1864f3f0e1d2SKirill A. Shutemov 		list_for_each_entry_safe(page, tmp, &pagelist, lru) {
18652af8ff29SHugh Dickins 			while (index < page->index) {
186650ad2f24SZach O'Keefe 				clear_highpage(hpage + (index % HPAGE_PMD_NR));
18672af8ff29SHugh Dickins 				index++;
18682af8ff29SHugh Dickins 			}
186950ad2f24SZach O'Keefe 			copy_highpage(hpage + (page->index % HPAGE_PMD_NR),
1870f3f0e1d2SKirill A. Shutemov 				      page);
1871f3f0e1d2SKirill A. Shutemov 			list_del(&page->lru);
1872f3f0e1d2SKirill A. Shutemov 			page->mapping = NULL;
1873042a3082SHugh Dickins 			page_ref_unfreeze(page, 1);
1874f3f0e1d2SKirill A. Shutemov 			ClearPageActive(page);
1875f3f0e1d2SKirill A. Shutemov 			ClearPageUnevictable(page);
1876042a3082SHugh Dickins 			unlock_page(page);
1877f3f0e1d2SKirill A. Shutemov 			put_page(page);
18782af8ff29SHugh Dickins 			index++;
18792af8ff29SHugh Dickins 		}
18802af8ff29SHugh Dickins 		while (index < end) {
188150ad2f24SZach O'Keefe 			clear_highpage(hpage + (index % HPAGE_PMD_NR));
18822af8ff29SHugh Dickins 			index++;
1883f3f0e1d2SKirill A. Shutemov 		}
1884f3f0e1d2SKirill A. Shutemov 
188550ad2f24SZach O'Keefe 		SetPageUptodate(hpage);
188650ad2f24SZach O'Keefe 		page_ref_add(hpage, HPAGE_PMD_NR - 1);
18876058eaecSJohannes Weiner 		if (is_shmem)
188850ad2f24SZach O'Keefe 			set_page_dirty(hpage);
188950ad2f24SZach O'Keefe 		lru_cache_add(hpage);
1890f3f0e1d2SKirill A. Shutemov 
1891042a3082SHugh Dickins 		/*
1892042a3082SHugh Dickins 		 * Remove pte page tables, so we can re-fault the page as huge.
1893042a3082SHugh Dickins 		 */
1894042a3082SHugh Dickins 		retract_page_tables(mapping, start);
189550ad2f24SZach O'Keefe 		unlock_page(hpage);
189650ad2f24SZach O'Keefe 		hpage = NULL;
1897f3f0e1d2SKirill A. Shutemov 	} else {
189877da9389SMatthew Wilcox 		struct page *page;
1899aaa52e34SHugh Dickins 
190077da9389SMatthew Wilcox 		/* Something went wrong: roll back page cache changes */
190177da9389SMatthew Wilcox 		xas_lock_irq(&xas);
19022f55f070SMiaohe Lin 		if (nr_none) {
1903aaa52e34SHugh Dickins 			mapping->nrpages -= nr_none;
1904aaa52e34SHugh Dickins 			shmem_uncharge(mapping->host, nr_none);
19052f55f070SMiaohe Lin 		}
1906aaa52e34SHugh Dickins 
190777da9389SMatthew Wilcox 		xas_set(&xas, start);
190877da9389SMatthew Wilcox 		xas_for_each(&xas, page, end - 1) {
1909f3f0e1d2SKirill A. Shutemov 			page = list_first_entry_or_null(&pagelist,
1910f3f0e1d2SKirill A. Shutemov 					struct page, lru);
191177da9389SMatthew Wilcox 			if (!page || xas.xa_index < page->index) {
1912f3f0e1d2SKirill A. Shutemov 				if (!nr_none)
1913f3f0e1d2SKirill A. Shutemov 					break;
1914f3f0e1d2SKirill A. Shutemov 				nr_none--;
191559749e6cSJohannes Weiner 				/* Put holes back where they were */
191677da9389SMatthew Wilcox 				xas_store(&xas, NULL);
1917f3f0e1d2SKirill A. Shutemov 				continue;
1918f3f0e1d2SKirill A. Shutemov 			}
1919f3f0e1d2SKirill A. Shutemov 
192077da9389SMatthew Wilcox 			VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
1921f3f0e1d2SKirill A. Shutemov 
1922f3f0e1d2SKirill A. Shutemov 			/* Unfreeze the page. */
1923f3f0e1d2SKirill A. Shutemov 			list_del(&page->lru);
1924f3f0e1d2SKirill A. Shutemov 			page_ref_unfreeze(page, 2);
192577da9389SMatthew Wilcox 			xas_store(&xas, page);
192677da9389SMatthew Wilcox 			xas_pause(&xas);
192777da9389SMatthew Wilcox 			xas_unlock_irq(&xas);
1928f3f0e1d2SKirill A. Shutemov 			unlock_page(page);
1929042a3082SHugh Dickins 			putback_lru_page(page);
193077da9389SMatthew Wilcox 			xas_lock_irq(&xas);
1931f3f0e1d2SKirill A. Shutemov 		}
1932f3f0e1d2SKirill A. Shutemov 		VM_BUG_ON(nr_none);
193377da9389SMatthew Wilcox 		xas_unlock_irq(&xas);
1934f3f0e1d2SKirill A. Shutemov 
193550ad2f24SZach O'Keefe 		hpage->mapping = NULL;
1936f3f0e1d2SKirill A. Shutemov 	}
1937042a3082SHugh Dickins 
193850ad2f24SZach O'Keefe 	if (hpage)
193950ad2f24SZach O'Keefe 		unlock_page(hpage);
1940f3f0e1d2SKirill A. Shutemov out:
1941f3f0e1d2SKirill A. Shutemov 	VM_BUG_ON(!list_empty(&pagelist));
194250ad2f24SZach O'Keefe 	if (hpage) {
194350ad2f24SZach O'Keefe 		mem_cgroup_uncharge(page_folio(hpage));
194450ad2f24SZach O'Keefe 		put_page(hpage);
1945c6a7f445SYang Shi 	}
1946f3f0e1d2SKirill A. Shutemov 	/* TODO: tracepoints */
194750ad2f24SZach O'Keefe 	return result;
1948f3f0e1d2SKirill A. Shutemov }
1949f3f0e1d2SKirill A. Shutemov 
195050ad2f24SZach O'Keefe static int khugepaged_scan_file(struct mm_struct *mm, struct file *file,
195150ad2f24SZach O'Keefe 				pgoff_t start, struct collapse_control *cc)
1952f3f0e1d2SKirill A. Shutemov {
1953f3f0e1d2SKirill A. Shutemov 	struct page *page = NULL;
1954579c571eSSong Liu 	struct address_space *mapping = file->f_mapping;
195585b392dbSMatthew Wilcox 	XA_STATE(xas, &mapping->i_pages, start);
1956f3f0e1d2SKirill A. Shutemov 	int present, swap;
1957f3f0e1d2SKirill A. Shutemov 	int node = NUMA_NO_NODE;
1958f3f0e1d2SKirill A. Shutemov 	int result = SCAN_SUCCEED;
1959f3f0e1d2SKirill A. Shutemov 
1960f3f0e1d2SKirill A. Shutemov 	present = 0;
1961f3f0e1d2SKirill A. Shutemov 	swap = 0;
196234d6b470SZach O'Keefe 	memset(cc->node_load, 0, sizeof(cc->node_load));
1963f3f0e1d2SKirill A. Shutemov 	rcu_read_lock();
196485b392dbSMatthew Wilcox 	xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
196585b392dbSMatthew Wilcox 		if (xas_retry(&xas, page))
1966f3f0e1d2SKirill A. Shutemov 			continue;
1967f3f0e1d2SKirill A. Shutemov 
196885b392dbSMatthew Wilcox 		if (xa_is_value(page)) {
1969d8ea7cc8SZach O'Keefe 			++swap;
1970d8ea7cc8SZach O'Keefe 			if (cc->is_khugepaged &&
1971d8ea7cc8SZach O'Keefe 			    swap > khugepaged_max_ptes_swap) {
1972f3f0e1d2SKirill A. Shutemov 				result = SCAN_EXCEED_SWAP_PTE;
1973e9ea874aSYang Yang 				count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
1974f3f0e1d2SKirill A. Shutemov 				break;
1975f3f0e1d2SKirill A. Shutemov 			}
1976f3f0e1d2SKirill A. Shutemov 			continue;
1977f3f0e1d2SKirill A. Shutemov 		}
1978f3f0e1d2SKirill A. Shutemov 
19796b24ca4aSMatthew Wilcox (Oracle) 		/*
19806b24ca4aSMatthew Wilcox (Oracle) 		 * XXX: khugepaged should compact smaller compound pages
19816b24ca4aSMatthew Wilcox (Oracle) 		 * into a PMD sized page
19826b24ca4aSMatthew Wilcox (Oracle) 		 */
1983f3f0e1d2SKirill A. Shutemov 		if (PageTransCompound(page)) {
1984f3f0e1d2SKirill A. Shutemov 			result = SCAN_PAGE_COMPOUND;
1985f3f0e1d2SKirill A. Shutemov 			break;
1986f3f0e1d2SKirill A. Shutemov 		}
1987f3f0e1d2SKirill A. Shutemov 
1988f3f0e1d2SKirill A. Shutemov 		node = page_to_nid(page);
1989*7d2c4385SZach O'Keefe 		if (hpage_collapse_scan_abort(node, cc)) {
1990f3f0e1d2SKirill A. Shutemov 			result = SCAN_SCAN_ABORT;
1991f3f0e1d2SKirill A. Shutemov 			break;
1992f3f0e1d2SKirill A. Shutemov 		}
199334d6b470SZach O'Keefe 		cc->node_load[node]++;
1994f3f0e1d2SKirill A. Shutemov 
1995f3f0e1d2SKirill A. Shutemov 		if (!PageLRU(page)) {
1996f3f0e1d2SKirill A. Shutemov 			result = SCAN_PAGE_LRU;
1997f3f0e1d2SKirill A. Shutemov 			break;
1998f3f0e1d2SKirill A. Shutemov 		}
1999f3f0e1d2SKirill A. Shutemov 
200099cb0dbdSSong Liu 		if (page_count(page) !=
200199cb0dbdSSong Liu 		    1 + page_mapcount(page) + page_has_private(page)) {
2002f3f0e1d2SKirill A. Shutemov 			result = SCAN_PAGE_COUNT;
2003f3f0e1d2SKirill A. Shutemov 			break;
2004f3f0e1d2SKirill A. Shutemov 		}
2005f3f0e1d2SKirill A. Shutemov 
2006f3f0e1d2SKirill A. Shutemov 		/*
2007f3f0e1d2SKirill A. Shutemov 		 * We probably should check if the page is referenced here, but
2008f3f0e1d2SKirill A. Shutemov 		 * nobody would transfer pte_young() to PageReferenced() for us.
2009f3f0e1d2SKirill A. Shutemov 		 * And rmap walk here is just too costly...
2010f3f0e1d2SKirill A. Shutemov 		 */
2011f3f0e1d2SKirill A. Shutemov 
2012f3f0e1d2SKirill A. Shutemov 		present++;
2013f3f0e1d2SKirill A. Shutemov 
2014f3f0e1d2SKirill A. Shutemov 		if (need_resched()) {
201585b392dbSMatthew Wilcox 			xas_pause(&xas);
2016f3f0e1d2SKirill A. Shutemov 			cond_resched_rcu();
2017f3f0e1d2SKirill A. Shutemov 		}
2018f3f0e1d2SKirill A. Shutemov 	}
2019f3f0e1d2SKirill A. Shutemov 	rcu_read_unlock();
2020f3f0e1d2SKirill A. Shutemov 
2021f3f0e1d2SKirill A. Shutemov 	if (result == SCAN_SUCCEED) {
2022d8ea7cc8SZach O'Keefe 		if (cc->is_khugepaged &&
2023d8ea7cc8SZach O'Keefe 		    present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2024f3f0e1d2SKirill A. Shutemov 			result = SCAN_EXCEED_NONE_PTE;
2025e9ea874aSYang Yang 			count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
2026f3f0e1d2SKirill A. Shutemov 		} else {
202750ad2f24SZach O'Keefe 			result = collapse_file(mm, file, start, cc);
2028f3f0e1d2SKirill A. Shutemov 		}
2029f3f0e1d2SKirill A. Shutemov 	}
2030f3f0e1d2SKirill A. Shutemov 
2031f3f0e1d2SKirill A. Shutemov 	/* TODO: tracepoints */
203250ad2f24SZach O'Keefe 	return result;
2033f3f0e1d2SKirill A. Shutemov }
2034f3f0e1d2SKirill A. Shutemov #else
203550ad2f24SZach O'Keefe static int khugepaged_scan_file(struct mm_struct *mm, struct file *file,
203650ad2f24SZach O'Keefe 				pgoff_t start, struct collapse_control *cc)
2037f3f0e1d2SKirill A. Shutemov {
2038f3f0e1d2SKirill A. Shutemov 	BUILD_BUG();
2039f3f0e1d2SKirill A. Shutemov }
204027e1f827SSong Liu 
20410edf61e5SMiaohe Lin static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
204227e1f827SSong Liu {
204327e1f827SSong Liu }
2044f3f0e1d2SKirill A. Shutemov #endif
2045f3f0e1d2SKirill A. Shutemov 
204650ad2f24SZach O'Keefe static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
204734d6b470SZach O'Keefe 					    struct collapse_control *cc)
2048b46e756fSKirill A. Shutemov 	__releases(&khugepaged_mm_lock)
2049b46e756fSKirill A. Shutemov 	__acquires(&khugepaged_mm_lock)
2050b46e756fSKirill A. Shutemov {
2051b46e756fSKirill A. Shutemov 	struct mm_slot *mm_slot;
2052b46e756fSKirill A. Shutemov 	struct mm_struct *mm;
2053b46e756fSKirill A. Shutemov 	struct vm_area_struct *vma;
2054b46e756fSKirill A. Shutemov 	int progress = 0;
2055b46e756fSKirill A. Shutemov 
2056b46e756fSKirill A. Shutemov 	VM_BUG_ON(!pages);
205735f3aa39SLance Roy 	lockdep_assert_held(&khugepaged_mm_lock);
205850ad2f24SZach O'Keefe 	*result = SCAN_FAIL;
2059b46e756fSKirill A. Shutemov 
2060b46e756fSKirill A. Shutemov 	if (khugepaged_scan.mm_slot)
2061b46e756fSKirill A. Shutemov 		mm_slot = khugepaged_scan.mm_slot;
2062b46e756fSKirill A. Shutemov 	else {
2063b46e756fSKirill A. Shutemov 		mm_slot = list_entry(khugepaged_scan.mm_head.next,
2064b46e756fSKirill A. Shutemov 				     struct mm_slot, mm_node);
2065b46e756fSKirill A. Shutemov 		khugepaged_scan.address = 0;
2066b46e756fSKirill A. Shutemov 		khugepaged_scan.mm_slot = mm_slot;
2067b46e756fSKirill A. Shutemov 	}
2068b46e756fSKirill A. Shutemov 	spin_unlock(&khugepaged_mm_lock);
206927e1f827SSong Liu 	khugepaged_collapse_pte_mapped_thps(mm_slot);
2070b46e756fSKirill A. Shutemov 
2071b46e756fSKirill A. Shutemov 	mm = mm_slot->mm;
20723b454ad3SYang Shi 	/*
20733b454ad3SYang Shi 	 * Don't wait for semaphore (to avoid long wait times).  Just move to
20743b454ad3SYang Shi 	 * the next mm on the list.
20753b454ad3SYang Shi 	 */
2076b46e756fSKirill A. Shutemov 	vma = NULL;
2077d8ed45c5SMichel Lespinasse 	if (unlikely(!mmap_read_trylock(mm)))
2078c1e8d7c6SMichel Lespinasse 		goto breakouterloop_mmap_lock;
2079*7d2c4385SZach O'Keefe 	if (likely(!hpage_collapse_test_exit(mm)))
2080b46e756fSKirill A. Shutemov 		vma = find_vma(mm, khugepaged_scan.address);
2081b46e756fSKirill A. Shutemov 
2082b46e756fSKirill A. Shutemov 	progress++;
2083b46e756fSKirill A. Shutemov 	for (; vma; vma = vma->vm_next) {
2084b46e756fSKirill A. Shutemov 		unsigned long hstart, hend;
2085b46e756fSKirill A. Shutemov 
2086b46e756fSKirill A. Shutemov 		cond_resched();
2087*7d2c4385SZach O'Keefe 		if (unlikely(hpage_collapse_test_exit(mm))) {
2088b46e756fSKirill A. Shutemov 			progress++;
2089b46e756fSKirill A. Shutemov 			break;
2090b46e756fSKirill A. Shutemov 		}
2091a7f4e6e4SZach O'Keefe 		if (!hugepage_vma_check(vma, vma->vm_flags, false, false, true)) {
2092b46e756fSKirill A. Shutemov skip:
2093b46e756fSKirill A. Shutemov 			progress++;
2094b46e756fSKirill A. Shutemov 			continue;
2095b46e756fSKirill A. Shutemov 		}
20964fa6893fSYang Shi 		hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE);
20974fa6893fSYang Shi 		hend = round_down(vma->vm_end, HPAGE_PMD_SIZE);
2098b46e756fSKirill A. Shutemov 		if (khugepaged_scan.address > hend)
2099b46e756fSKirill A. Shutemov 			goto skip;
2100b46e756fSKirill A. Shutemov 		if (khugepaged_scan.address < hstart)
2101b46e756fSKirill A. Shutemov 			khugepaged_scan.address = hstart;
2102b46e756fSKirill A. Shutemov 		VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2103b46e756fSKirill A. Shutemov 
2104b46e756fSKirill A. Shutemov 		while (khugepaged_scan.address < hend) {
210550ad2f24SZach O'Keefe 			bool mmap_locked = true;
210650ad2f24SZach O'Keefe 
2107b46e756fSKirill A. Shutemov 			cond_resched();
2108*7d2c4385SZach O'Keefe 			if (unlikely(hpage_collapse_test_exit(mm)))
2109b46e756fSKirill A. Shutemov 				goto breakouterloop;
2110b46e756fSKirill A. Shutemov 
2111b46e756fSKirill A. Shutemov 			VM_BUG_ON(khugepaged_scan.address < hstart ||
2112b46e756fSKirill A. Shutemov 				  khugepaged_scan.address + HPAGE_PMD_SIZE >
2113b46e756fSKirill A. Shutemov 				  hend);
211499cb0dbdSSong Liu 			if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2115396bcc52SMatthew Wilcox (Oracle) 				struct file *file = get_file(vma->vm_file);
2116f3f0e1d2SKirill A. Shutemov 				pgoff_t pgoff = linear_page_index(vma,
2117f3f0e1d2SKirill A. Shutemov 						khugepaged_scan.address);
211899cb0dbdSSong Liu 
2119d8ed45c5SMichel Lespinasse 				mmap_read_unlock(mm);
212050ad2f24SZach O'Keefe 				*result = khugepaged_scan_file(mm, file, pgoff,
212134d6b470SZach O'Keefe 							       cc);
212250ad2f24SZach O'Keefe 				mmap_locked = false;
2123f3f0e1d2SKirill A. Shutemov 				fput(file);
2124f3f0e1d2SKirill A. Shutemov 			} else {
2125*7d2c4385SZach O'Keefe 				*result = hpage_collapse_scan_pmd(mm, vma,
2126b46e756fSKirill A. Shutemov 								  khugepaged_scan.address,
2127*7d2c4385SZach O'Keefe 								  &mmap_locked,
2128*7d2c4385SZach O'Keefe 								  cc);
2129f3f0e1d2SKirill A. Shutemov 			}
213050ad2f24SZach O'Keefe 			if (*result == SCAN_SUCCEED)
213150ad2f24SZach O'Keefe 				++khugepaged_pages_collapsed;
2132b46e756fSKirill A. Shutemov 			/* move to next address */
2133b46e756fSKirill A. Shutemov 			khugepaged_scan.address += HPAGE_PMD_SIZE;
2134b46e756fSKirill A. Shutemov 			progress += HPAGE_PMD_NR;
213550ad2f24SZach O'Keefe 			if (!mmap_locked)
213650ad2f24SZach O'Keefe 				/*
213750ad2f24SZach O'Keefe 				 * We released mmap_lock so break loop.  Note
213850ad2f24SZach O'Keefe 				 * that we drop mmap_lock before all hugepage
213950ad2f24SZach O'Keefe 				 * allocations, so if allocation fails, we are
214050ad2f24SZach O'Keefe 				 * guaranteed to break here and report the
214150ad2f24SZach O'Keefe 				 * correct result back to caller.
214250ad2f24SZach O'Keefe 				 */
2143c1e8d7c6SMichel Lespinasse 				goto breakouterloop_mmap_lock;
2144b46e756fSKirill A. Shutemov 			if (progress >= pages)
2145b46e756fSKirill A. Shutemov 				goto breakouterloop;
2146b46e756fSKirill A. Shutemov 		}
2147b46e756fSKirill A. Shutemov 	}
2148b46e756fSKirill A. Shutemov breakouterloop:
2149d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
2150c1e8d7c6SMichel Lespinasse breakouterloop_mmap_lock:
2151b46e756fSKirill A. Shutemov 
2152b46e756fSKirill A. Shutemov 	spin_lock(&khugepaged_mm_lock);
2153b46e756fSKirill A. Shutemov 	VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2154b46e756fSKirill A. Shutemov 	/*
2155b46e756fSKirill A. Shutemov 	 * Release the current mm_slot if this mm is about to die, or
2156b46e756fSKirill A. Shutemov 	 * if we scanned all vmas of this mm.
2157b46e756fSKirill A. Shutemov 	 */
2158*7d2c4385SZach O'Keefe 	if (hpage_collapse_test_exit(mm) || !vma) {
2159b46e756fSKirill A. Shutemov 		/*
2160b46e756fSKirill A. Shutemov 		 * Make sure that if mm_users is reaching zero while
2161b46e756fSKirill A. Shutemov 		 * khugepaged runs here, khugepaged_exit will find
2162b46e756fSKirill A. Shutemov 		 * mm_slot not pointing to the exiting mm.
2163b46e756fSKirill A. Shutemov 		 */
2164b46e756fSKirill A. Shutemov 		if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2165b46e756fSKirill A. Shutemov 			khugepaged_scan.mm_slot = list_entry(
2166b46e756fSKirill A. Shutemov 				mm_slot->mm_node.next,
2167b46e756fSKirill A. Shutemov 				struct mm_slot, mm_node);
2168b46e756fSKirill A. Shutemov 			khugepaged_scan.address = 0;
2169b46e756fSKirill A. Shutemov 		} else {
2170b46e756fSKirill A. Shutemov 			khugepaged_scan.mm_slot = NULL;
2171b46e756fSKirill A. Shutemov 			khugepaged_full_scans++;
2172b46e756fSKirill A. Shutemov 		}
2173b46e756fSKirill A. Shutemov 
2174b46e756fSKirill A. Shutemov 		collect_mm_slot(mm_slot);
2175b46e756fSKirill A. Shutemov 	}
2176b46e756fSKirill A. Shutemov 
2177b46e756fSKirill A. Shutemov 	return progress;
2178b46e756fSKirill A. Shutemov }
2179b46e756fSKirill A. Shutemov 
2180b46e756fSKirill A. Shutemov static int khugepaged_has_work(void)
2181b46e756fSKirill A. Shutemov {
2182b46e756fSKirill A. Shutemov 	return !list_empty(&khugepaged_scan.mm_head) &&
21831064026bSYang Shi 		hugepage_flags_enabled();
2184b46e756fSKirill A. Shutemov }
2185b46e756fSKirill A. Shutemov 
2186b46e756fSKirill A. Shutemov static int khugepaged_wait_event(void)
2187b46e756fSKirill A. Shutemov {
2188b46e756fSKirill A. Shutemov 	return !list_empty(&khugepaged_scan.mm_head) ||
2189b46e756fSKirill A. Shutemov 		kthread_should_stop();
2190b46e756fSKirill A. Shutemov }
2191b46e756fSKirill A. Shutemov 
219234d6b470SZach O'Keefe static void khugepaged_do_scan(struct collapse_control *cc)
2193b46e756fSKirill A. Shutemov {
2194b46e756fSKirill A. Shutemov 	unsigned int progress = 0, pass_through_head = 0;
219589dc6a96SYanfei Xu 	unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
2196b46e756fSKirill A. Shutemov 	bool wait = true;
219750ad2f24SZach O'Keefe 	int result = SCAN_SUCCEED;
2198b46e756fSKirill A. Shutemov 
2199a980df33SKirill A. Shutemov 	lru_add_drain_all();
2200a980df33SKirill A. Shutemov 
2201c6a7f445SYang Shi 	while (true) {
2202b46e756fSKirill A. Shutemov 		cond_resched();
2203b46e756fSKirill A. Shutemov 
2204b46e756fSKirill A. Shutemov 		if (unlikely(kthread_should_stop() || try_to_freeze()))
2205b46e756fSKirill A. Shutemov 			break;
2206b46e756fSKirill A. Shutemov 
2207b46e756fSKirill A. Shutemov 		spin_lock(&khugepaged_mm_lock);
2208b46e756fSKirill A. Shutemov 		if (!khugepaged_scan.mm_slot)
2209b46e756fSKirill A. Shutemov 			pass_through_head++;
2210b46e756fSKirill A. Shutemov 		if (khugepaged_has_work() &&
2211b46e756fSKirill A. Shutemov 		    pass_through_head < 2)
2212b46e756fSKirill A. Shutemov 			progress += khugepaged_scan_mm_slot(pages - progress,
221350ad2f24SZach O'Keefe 							    &result, cc);
2214b46e756fSKirill A. Shutemov 		else
2215b46e756fSKirill A. Shutemov 			progress = pages;
2216b46e756fSKirill A. Shutemov 		spin_unlock(&khugepaged_mm_lock);
2217b46e756fSKirill A. Shutemov 
2218c6a7f445SYang Shi 		if (progress >= pages)
2219c6a7f445SYang Shi 			break;
2220c6a7f445SYang Shi 
222150ad2f24SZach O'Keefe 		if (result == SCAN_ALLOC_HUGE_PAGE_FAIL) {
2222c6a7f445SYang Shi 			/*
2223c6a7f445SYang Shi 			 * If fail to allocate the first time, try to sleep for
2224c6a7f445SYang Shi 			 * a while.  When hit again, cancel the scan.
2225c6a7f445SYang Shi 			 */
2226c6a7f445SYang Shi 			if (!wait)
2227c6a7f445SYang Shi 				break;
2228c6a7f445SYang Shi 			wait = false;
2229c6a7f445SYang Shi 			khugepaged_alloc_sleep();
2230c6a7f445SYang Shi 		}
2231c6a7f445SYang Shi 	}
2232b46e756fSKirill A. Shutemov }
2233b46e756fSKirill A. Shutemov 
2234b46e756fSKirill A. Shutemov static bool khugepaged_should_wakeup(void)
2235b46e756fSKirill A. Shutemov {
2236b46e756fSKirill A. Shutemov 	return kthread_should_stop() ||
2237b46e756fSKirill A. Shutemov 	       time_after_eq(jiffies, khugepaged_sleep_expire);
2238b46e756fSKirill A. Shutemov }
2239b46e756fSKirill A. Shutemov 
2240b46e756fSKirill A. Shutemov static void khugepaged_wait_work(void)
2241b46e756fSKirill A. Shutemov {
2242b46e756fSKirill A. Shutemov 	if (khugepaged_has_work()) {
2243b46e756fSKirill A. Shutemov 		const unsigned long scan_sleep_jiffies =
2244b46e756fSKirill A. Shutemov 			msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2245b46e756fSKirill A. Shutemov 
2246b46e756fSKirill A. Shutemov 		if (!scan_sleep_jiffies)
2247b46e756fSKirill A. Shutemov 			return;
2248b46e756fSKirill A. Shutemov 
2249b46e756fSKirill A. Shutemov 		khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2250b46e756fSKirill A. Shutemov 		wait_event_freezable_timeout(khugepaged_wait,
2251b46e756fSKirill A. Shutemov 					     khugepaged_should_wakeup(),
2252b46e756fSKirill A. Shutemov 					     scan_sleep_jiffies);
2253b46e756fSKirill A. Shutemov 		return;
2254b46e756fSKirill A. Shutemov 	}
2255b46e756fSKirill A. Shutemov 
22561064026bSYang Shi 	if (hugepage_flags_enabled())
2257b46e756fSKirill A. Shutemov 		wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2258b46e756fSKirill A. Shutemov }
2259b46e756fSKirill A. Shutemov 
2260b46e756fSKirill A. Shutemov static int khugepaged(void *none)
2261b46e756fSKirill A. Shutemov {
2262b46e756fSKirill A. Shutemov 	struct mm_slot *mm_slot;
2263b46e756fSKirill A. Shutemov 
2264b46e756fSKirill A. Shutemov 	set_freezable();
2265b46e756fSKirill A. Shutemov 	set_user_nice(current, MAX_NICE);
2266b46e756fSKirill A. Shutemov 
2267b46e756fSKirill A. Shutemov 	while (!kthread_should_stop()) {
226834d6b470SZach O'Keefe 		khugepaged_do_scan(&khugepaged_collapse_control);
2269b46e756fSKirill A. Shutemov 		khugepaged_wait_work();
2270b46e756fSKirill A. Shutemov 	}
2271b46e756fSKirill A. Shutemov 
2272b46e756fSKirill A. Shutemov 	spin_lock(&khugepaged_mm_lock);
2273b46e756fSKirill A. Shutemov 	mm_slot = khugepaged_scan.mm_slot;
2274b46e756fSKirill A. Shutemov 	khugepaged_scan.mm_slot = NULL;
2275b46e756fSKirill A. Shutemov 	if (mm_slot)
2276b46e756fSKirill A. Shutemov 		collect_mm_slot(mm_slot);
2277b46e756fSKirill A. Shutemov 	spin_unlock(&khugepaged_mm_lock);
2278b46e756fSKirill A. Shutemov 	return 0;
2279b46e756fSKirill A. Shutemov }
2280b46e756fSKirill A. Shutemov 
2281b46e756fSKirill A. Shutemov static void set_recommended_min_free_kbytes(void)
2282b46e756fSKirill A. Shutemov {
2283b46e756fSKirill A. Shutemov 	struct zone *zone;
2284b46e756fSKirill A. Shutemov 	int nr_zones = 0;
2285b46e756fSKirill A. Shutemov 	unsigned long recommended_min;
2286b46e756fSKirill A. Shutemov 
22871064026bSYang Shi 	if (!hugepage_flags_enabled()) {
2288bd3400eaSLiangcai Fan 		calculate_min_free_kbytes();
2289bd3400eaSLiangcai Fan 		goto update_wmarks;
2290bd3400eaSLiangcai Fan 	}
2291bd3400eaSLiangcai Fan 
2292b7d349c7SJoonsoo Kim 	for_each_populated_zone(zone) {
2293b7d349c7SJoonsoo Kim 		/*
2294b7d349c7SJoonsoo Kim 		 * We don't need to worry about fragmentation of
2295b7d349c7SJoonsoo Kim 		 * ZONE_MOVABLE since it only has movable pages.
2296b7d349c7SJoonsoo Kim 		 */
2297b7d349c7SJoonsoo Kim 		if (zone_idx(zone) > gfp_zone(GFP_USER))
2298b7d349c7SJoonsoo Kim 			continue;
2299b7d349c7SJoonsoo Kim 
2300b46e756fSKirill A. Shutemov 		nr_zones++;
2301b7d349c7SJoonsoo Kim 	}
2302b46e756fSKirill A. Shutemov 
2303b46e756fSKirill A. Shutemov 	/* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2304b46e756fSKirill A. Shutemov 	recommended_min = pageblock_nr_pages * nr_zones * 2;
2305b46e756fSKirill A. Shutemov 
2306b46e756fSKirill A. Shutemov 	/*
2307b46e756fSKirill A. Shutemov 	 * Make sure that on average at least two pageblocks are almost free
2308b46e756fSKirill A. Shutemov 	 * of another type, one for a migratetype to fall back to and a
2309b46e756fSKirill A. Shutemov 	 * second to avoid subsequent fallbacks of other types There are 3
2310b46e756fSKirill A. Shutemov 	 * MIGRATE_TYPES we care about.
2311b46e756fSKirill A. Shutemov 	 */
2312b46e756fSKirill A. Shutemov 	recommended_min += pageblock_nr_pages * nr_zones *
2313b46e756fSKirill A. Shutemov 			   MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2314b46e756fSKirill A. Shutemov 
2315b46e756fSKirill A. Shutemov 	/* don't ever allow to reserve more than 5% of the lowmem */
2316b46e756fSKirill A. Shutemov 	recommended_min = min(recommended_min,
2317b46e756fSKirill A. Shutemov 			      (unsigned long) nr_free_buffer_pages() / 20);
2318b46e756fSKirill A. Shutemov 	recommended_min <<= (PAGE_SHIFT-10);
2319b46e756fSKirill A. Shutemov 
2320b46e756fSKirill A. Shutemov 	if (recommended_min > min_free_kbytes) {
2321b46e756fSKirill A. Shutemov 		if (user_min_free_kbytes >= 0)
2322b46e756fSKirill A. Shutemov 			pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2323b46e756fSKirill A. Shutemov 				min_free_kbytes, recommended_min);
2324b46e756fSKirill A. Shutemov 
2325b46e756fSKirill A. Shutemov 		min_free_kbytes = recommended_min;
2326b46e756fSKirill A. Shutemov 	}
2327bd3400eaSLiangcai Fan 
2328bd3400eaSLiangcai Fan update_wmarks:
2329b46e756fSKirill A. Shutemov 	setup_per_zone_wmarks();
2330b46e756fSKirill A. Shutemov }
2331b46e756fSKirill A. Shutemov 
2332b46e756fSKirill A. Shutemov int start_stop_khugepaged(void)
2333b46e756fSKirill A. Shutemov {
2334b46e756fSKirill A. Shutemov 	int err = 0;
2335b46e756fSKirill A. Shutemov 
2336b46e756fSKirill A. Shutemov 	mutex_lock(&khugepaged_mutex);
23371064026bSYang Shi 	if (hugepage_flags_enabled()) {
2338b46e756fSKirill A. Shutemov 		if (!khugepaged_thread)
2339b46e756fSKirill A. Shutemov 			khugepaged_thread = kthread_run(khugepaged, NULL,
2340b46e756fSKirill A. Shutemov 							"khugepaged");
2341b46e756fSKirill A. Shutemov 		if (IS_ERR(khugepaged_thread)) {
2342b46e756fSKirill A. Shutemov 			pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2343b46e756fSKirill A. Shutemov 			err = PTR_ERR(khugepaged_thread);
2344b46e756fSKirill A. Shutemov 			khugepaged_thread = NULL;
2345b46e756fSKirill A. Shutemov 			goto fail;
2346b46e756fSKirill A. Shutemov 		}
2347b46e756fSKirill A. Shutemov 
2348b46e756fSKirill A. Shutemov 		if (!list_empty(&khugepaged_scan.mm_head))
2349b46e756fSKirill A. Shutemov 			wake_up_interruptible(&khugepaged_wait);
2350b46e756fSKirill A. Shutemov 	} else if (khugepaged_thread) {
2351b46e756fSKirill A. Shutemov 		kthread_stop(khugepaged_thread);
2352b46e756fSKirill A. Shutemov 		khugepaged_thread = NULL;
2353b46e756fSKirill A. Shutemov 	}
2354bd3400eaSLiangcai Fan 	set_recommended_min_free_kbytes();
2355b46e756fSKirill A. Shutemov fail:
2356b46e756fSKirill A. Shutemov 	mutex_unlock(&khugepaged_mutex);
2357b46e756fSKirill A. Shutemov 	return err;
2358b46e756fSKirill A. Shutemov }
23594aab2be0SVijay Balakrishna 
23604aab2be0SVijay Balakrishna void khugepaged_min_free_kbytes_update(void)
23614aab2be0SVijay Balakrishna {
23624aab2be0SVijay Balakrishna 	mutex_lock(&khugepaged_mutex);
23631064026bSYang Shi 	if (hugepage_flags_enabled() && khugepaged_thread)
23644aab2be0SVijay Balakrishna 		set_recommended_min_free_kbytes();
23654aab2be0SVijay Balakrishna 	mutex_unlock(&khugepaged_mutex);
23664aab2be0SVijay Balakrishna }
23677d8faaf1SZach O'Keefe 
23687d8faaf1SZach O'Keefe static int madvise_collapse_errno(enum scan_result r)
23697d8faaf1SZach O'Keefe {
23707d8faaf1SZach O'Keefe 	/*
23717d8faaf1SZach O'Keefe 	 * MADV_COLLAPSE breaks from existing madvise(2) conventions to provide
23727d8faaf1SZach O'Keefe 	 * actionable feedback to caller, so they may take an appropriate
23737d8faaf1SZach O'Keefe 	 * fallback measure depending on the nature of the failure.
23747d8faaf1SZach O'Keefe 	 */
23757d8faaf1SZach O'Keefe 	switch (r) {
23767d8faaf1SZach O'Keefe 	case SCAN_ALLOC_HUGE_PAGE_FAIL:
23777d8faaf1SZach O'Keefe 		return -ENOMEM;
23787d8faaf1SZach O'Keefe 	case SCAN_CGROUP_CHARGE_FAIL:
23797d8faaf1SZach O'Keefe 		return -EBUSY;
23807d8faaf1SZach O'Keefe 	/* Resource temporary unavailable - trying again might succeed */
23817d8faaf1SZach O'Keefe 	case SCAN_PAGE_LOCK:
23827d8faaf1SZach O'Keefe 	case SCAN_PAGE_LRU:
23837d8faaf1SZach O'Keefe 		return -EAGAIN;
23847d8faaf1SZach O'Keefe 	/*
23857d8faaf1SZach O'Keefe 	 * Other: Trying again likely not to succeed / error intrinsic to
23867d8faaf1SZach O'Keefe 	 * specified memory range. khugepaged likely won't be able to collapse
23877d8faaf1SZach O'Keefe 	 * either.
23887d8faaf1SZach O'Keefe 	 */
23897d8faaf1SZach O'Keefe 	default:
23907d8faaf1SZach O'Keefe 		return -EINVAL;
23917d8faaf1SZach O'Keefe 	}
23927d8faaf1SZach O'Keefe }
23937d8faaf1SZach O'Keefe 
23947d8faaf1SZach O'Keefe int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
23957d8faaf1SZach O'Keefe 		     unsigned long start, unsigned long end)
23967d8faaf1SZach O'Keefe {
23977d8faaf1SZach O'Keefe 	struct collapse_control *cc;
23987d8faaf1SZach O'Keefe 	struct mm_struct *mm = vma->vm_mm;
23997d8faaf1SZach O'Keefe 	unsigned long hstart, hend, addr;
24007d8faaf1SZach O'Keefe 	int thps = 0, last_fail = SCAN_FAIL;
24017d8faaf1SZach O'Keefe 	bool mmap_locked = true;
24027d8faaf1SZach O'Keefe 
24037d8faaf1SZach O'Keefe 	BUG_ON(vma->vm_start > start);
24047d8faaf1SZach O'Keefe 	BUG_ON(vma->vm_end < end);
24057d8faaf1SZach O'Keefe 
24067d8faaf1SZach O'Keefe 	*prev = vma;
24077d8faaf1SZach O'Keefe 
24087d8faaf1SZach O'Keefe 	/* TODO: Support file/shmem */
24097d8faaf1SZach O'Keefe 	if (!vma->anon_vma || !vma_is_anonymous(vma))
24107d8faaf1SZach O'Keefe 		return -EINVAL;
24117d8faaf1SZach O'Keefe 
24127d8faaf1SZach O'Keefe 	if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
24137d8faaf1SZach O'Keefe 		return -EINVAL;
24147d8faaf1SZach O'Keefe 
24157d8faaf1SZach O'Keefe 	cc = kmalloc(sizeof(*cc), GFP_KERNEL);
24167d8faaf1SZach O'Keefe 	if (!cc)
24177d8faaf1SZach O'Keefe 		return -ENOMEM;
24187d8faaf1SZach O'Keefe 	cc->is_khugepaged = false;
24197d8faaf1SZach O'Keefe 	cc->last_target_node = NUMA_NO_NODE;
24207d8faaf1SZach O'Keefe 
24217d8faaf1SZach O'Keefe 	mmgrab(mm);
24227d8faaf1SZach O'Keefe 	lru_add_drain_all();
24237d8faaf1SZach O'Keefe 
24247d8faaf1SZach O'Keefe 	hstart = (start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
24257d8faaf1SZach O'Keefe 	hend = end & HPAGE_PMD_MASK;
24267d8faaf1SZach O'Keefe 
24277d8faaf1SZach O'Keefe 	for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) {
24287d8faaf1SZach O'Keefe 		int result = SCAN_FAIL;
24297d8faaf1SZach O'Keefe 
24307d8faaf1SZach O'Keefe 		if (!mmap_locked) {
24317d8faaf1SZach O'Keefe 			cond_resched();
24327d8faaf1SZach O'Keefe 			mmap_read_lock(mm);
24337d8faaf1SZach O'Keefe 			mmap_locked = true;
24347d8faaf1SZach O'Keefe 			result = hugepage_vma_revalidate(mm, addr, &vma, cc);
24357d8faaf1SZach O'Keefe 			if (result  != SCAN_SUCCEED) {
24367d8faaf1SZach O'Keefe 				last_fail = result;
24377d8faaf1SZach O'Keefe 				goto out_nolock;
24387d8faaf1SZach O'Keefe 			}
24397d8faaf1SZach O'Keefe 		}
24407d8faaf1SZach O'Keefe 		mmap_assert_locked(mm);
24417d8faaf1SZach O'Keefe 		memset(cc->node_load, 0, sizeof(cc->node_load));
2442*7d2c4385SZach O'Keefe 		result = hpage_collapse_scan_pmd(mm, vma, addr, &mmap_locked,
2443*7d2c4385SZach O'Keefe 						 cc);
24447d8faaf1SZach O'Keefe 		if (!mmap_locked)
24457d8faaf1SZach O'Keefe 			*prev = NULL;  /* Tell caller we dropped mmap_lock */
24467d8faaf1SZach O'Keefe 
24477d8faaf1SZach O'Keefe 		switch (result) {
24487d8faaf1SZach O'Keefe 		case SCAN_SUCCEED:
24497d8faaf1SZach O'Keefe 		case SCAN_PMD_MAPPED:
24507d8faaf1SZach O'Keefe 			++thps;
24517d8faaf1SZach O'Keefe 			break;
24527d8faaf1SZach O'Keefe 		/* Whitelisted set of results where continuing OK */
24537d8faaf1SZach O'Keefe 		case SCAN_PMD_NULL:
24547d8faaf1SZach O'Keefe 		case SCAN_PTE_NON_PRESENT:
24557d8faaf1SZach O'Keefe 		case SCAN_PTE_UFFD_WP:
24567d8faaf1SZach O'Keefe 		case SCAN_PAGE_RO:
24577d8faaf1SZach O'Keefe 		case SCAN_LACK_REFERENCED_PAGE:
24587d8faaf1SZach O'Keefe 		case SCAN_PAGE_NULL:
24597d8faaf1SZach O'Keefe 		case SCAN_PAGE_COUNT:
24607d8faaf1SZach O'Keefe 		case SCAN_PAGE_LOCK:
24617d8faaf1SZach O'Keefe 		case SCAN_PAGE_COMPOUND:
24627d8faaf1SZach O'Keefe 		case SCAN_PAGE_LRU:
24637d8faaf1SZach O'Keefe 			last_fail = result;
24647d8faaf1SZach O'Keefe 			break;
24657d8faaf1SZach O'Keefe 		default:
24667d8faaf1SZach O'Keefe 			last_fail = result;
24677d8faaf1SZach O'Keefe 			/* Other error, exit */
24687d8faaf1SZach O'Keefe 			goto out_maybelock;
24697d8faaf1SZach O'Keefe 		}
24707d8faaf1SZach O'Keefe 	}
24717d8faaf1SZach O'Keefe 
24727d8faaf1SZach O'Keefe out_maybelock:
24737d8faaf1SZach O'Keefe 	/* Caller expects us to hold mmap_lock on return */
24747d8faaf1SZach O'Keefe 	if (!mmap_locked)
24757d8faaf1SZach O'Keefe 		mmap_read_lock(mm);
24767d8faaf1SZach O'Keefe out_nolock:
24777d8faaf1SZach O'Keefe 	mmap_assert_locked(mm);
24787d8faaf1SZach O'Keefe 	mmdrop(mm);
24797d8faaf1SZach O'Keefe 	kfree(cc);
24807d8faaf1SZach O'Keefe 
24817d8faaf1SZach O'Keefe 	return thps == ((hend - hstart) >> HPAGE_PMD_SHIFT) ? 0
24827d8faaf1SZach O'Keefe 			: madvise_collapse_errno(last_fail);
24837d8faaf1SZach O'Keefe }
2484