xref: /linux/mm/khugepaged.c (revision d2081b2bf8195b8239c67fdd61518e077da7cbec)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2b46e756fSKirill A. Shutemov #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3b46e756fSKirill A. Shutemov 
4b46e756fSKirill A. Shutemov #include <linux/mm.h>
5b46e756fSKirill A. Shutemov #include <linux/sched.h>
66e84f315SIngo Molnar #include <linux/sched/mm.h>
7f7ccbae4SIngo Molnar #include <linux/sched/coredump.h>
8b46e756fSKirill A. Shutemov #include <linux/mmu_notifier.h>
9b46e756fSKirill A. Shutemov #include <linux/rmap.h>
10b46e756fSKirill A. Shutemov #include <linux/swap.h>
11b46e756fSKirill A. Shutemov #include <linux/mm_inline.h>
12b46e756fSKirill A. Shutemov #include <linux/kthread.h>
13b46e756fSKirill A. Shutemov #include <linux/khugepaged.h>
14b46e756fSKirill A. Shutemov #include <linux/freezer.h>
15b46e756fSKirill A. Shutemov #include <linux/mman.h>
16b46e756fSKirill A. Shutemov #include <linux/hashtable.h>
17b46e756fSKirill A. Shutemov #include <linux/userfaultfd_k.h>
18b46e756fSKirill A. Shutemov #include <linux/page_idle.h>
1980110bbfSPasha Tatashin #include <linux/page_table_check.h>
20b46e756fSKirill A. Shutemov #include <linux/swapops.h>
21f3f0e1d2SKirill A. Shutemov #include <linux/shmem_fs.h>
22b46e756fSKirill A. Shutemov 
23b46e756fSKirill A. Shutemov #include <asm/tlb.h>
24b46e756fSKirill A. Shutemov #include <asm/pgalloc.h>
25b46e756fSKirill A. Shutemov #include "internal.h"
26b46e756fSKirill A. Shutemov 
27b46e756fSKirill A. Shutemov enum scan_result {
28b46e756fSKirill A. Shutemov 	SCAN_FAIL,
29b46e756fSKirill A. Shutemov 	SCAN_SUCCEED,
30b46e756fSKirill A. Shutemov 	SCAN_PMD_NULL,
31b46e756fSKirill A. Shutemov 	SCAN_EXCEED_NONE_PTE,
3271a2c112SKirill A. Shutemov 	SCAN_EXCEED_SWAP_PTE,
3371a2c112SKirill A. Shutemov 	SCAN_EXCEED_SHARED_PTE,
34b46e756fSKirill A. Shutemov 	SCAN_PTE_NON_PRESENT,
35e1e267c7SPeter Xu 	SCAN_PTE_UFFD_WP,
36b46e756fSKirill A. Shutemov 	SCAN_PAGE_RO,
370db501f7SEbru Akagunduz 	SCAN_LACK_REFERENCED_PAGE,
38b46e756fSKirill A. Shutemov 	SCAN_PAGE_NULL,
39b46e756fSKirill A. Shutemov 	SCAN_SCAN_ABORT,
40b46e756fSKirill A. Shutemov 	SCAN_PAGE_COUNT,
41b46e756fSKirill A. Shutemov 	SCAN_PAGE_LRU,
42b46e756fSKirill A. Shutemov 	SCAN_PAGE_LOCK,
43b46e756fSKirill A. Shutemov 	SCAN_PAGE_ANON,
44b46e756fSKirill A. Shutemov 	SCAN_PAGE_COMPOUND,
45b46e756fSKirill A. Shutemov 	SCAN_ANY_PROCESS,
46b46e756fSKirill A. Shutemov 	SCAN_VMA_NULL,
47b46e756fSKirill A. Shutemov 	SCAN_VMA_CHECK,
48b46e756fSKirill A. Shutemov 	SCAN_ADDRESS_RANGE,
49b46e756fSKirill A. Shutemov 	SCAN_DEL_PAGE_LRU,
50b46e756fSKirill A. Shutemov 	SCAN_ALLOC_HUGE_PAGE_FAIL,
51b46e756fSKirill A. Shutemov 	SCAN_CGROUP_CHARGE_FAIL,
52f3f0e1d2SKirill A. Shutemov 	SCAN_TRUNCATED,
5399cb0dbdSSong Liu 	SCAN_PAGE_HAS_PRIVATE,
54b46e756fSKirill A. Shutemov };
55b46e756fSKirill A. Shutemov 
56b46e756fSKirill A. Shutemov #define CREATE_TRACE_POINTS
57b46e756fSKirill A. Shutemov #include <trace/events/huge_memory.h>
58b46e756fSKirill A. Shutemov 
594aab2be0SVijay Balakrishna static struct task_struct *khugepaged_thread __read_mostly;
604aab2be0SVijay Balakrishna static DEFINE_MUTEX(khugepaged_mutex);
614aab2be0SVijay Balakrishna 
62b46e756fSKirill A. Shutemov /* default scan 8*512 pte (or vmas) every 30 second */
63b46e756fSKirill A. Shutemov static unsigned int khugepaged_pages_to_scan __read_mostly;
64b46e756fSKirill A. Shutemov static unsigned int khugepaged_pages_collapsed;
65b46e756fSKirill A. Shutemov static unsigned int khugepaged_full_scans;
66b46e756fSKirill A. Shutemov static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
67b46e756fSKirill A. Shutemov /* during fragmentation poll the hugepage allocator once every minute */
68b46e756fSKirill A. Shutemov static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
69b46e756fSKirill A. Shutemov static unsigned long khugepaged_sleep_expire;
70b46e756fSKirill A. Shutemov static DEFINE_SPINLOCK(khugepaged_mm_lock);
71b46e756fSKirill A. Shutemov static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
72b46e756fSKirill A. Shutemov /*
73b46e756fSKirill A. Shutemov  * default collapse hugepages if there is at least one pte mapped like
74b46e756fSKirill A. Shutemov  * it would have happened if the vma was large enough during page
75b46e756fSKirill A. Shutemov  * fault.
76b46e756fSKirill A. Shutemov  */
77b46e756fSKirill A. Shutemov static unsigned int khugepaged_max_ptes_none __read_mostly;
78b46e756fSKirill A. Shutemov static unsigned int khugepaged_max_ptes_swap __read_mostly;
7971a2c112SKirill A. Shutemov static unsigned int khugepaged_max_ptes_shared __read_mostly;
80b46e756fSKirill A. Shutemov 
81b46e756fSKirill A. Shutemov #define MM_SLOTS_HASH_BITS 10
82b46e756fSKirill A. Shutemov static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
83b46e756fSKirill A. Shutemov 
84b46e756fSKirill A. Shutemov static struct kmem_cache *mm_slot_cache __read_mostly;
85b46e756fSKirill A. Shutemov 
8627e1f827SSong Liu #define MAX_PTE_MAPPED_THP 8
8727e1f827SSong Liu 
88b46e756fSKirill A. Shutemov /**
89b46e756fSKirill A. Shutemov  * struct mm_slot - hash lookup from mm to mm_slot
90b46e756fSKirill A. Shutemov  * @hash: hash collision list
91b46e756fSKirill A. Shutemov  * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
92b46e756fSKirill A. Shutemov  * @mm: the mm that this information is valid for
93336e6b53SAlex Shi  * @nr_pte_mapped_thp: number of pte mapped THP
94336e6b53SAlex Shi  * @pte_mapped_thp: address array corresponding pte mapped THP
95b46e756fSKirill A. Shutemov  */
96b46e756fSKirill A. Shutemov struct mm_slot {
97b46e756fSKirill A. Shutemov 	struct hlist_node hash;
98b46e756fSKirill A. Shutemov 	struct list_head mm_node;
99b46e756fSKirill A. Shutemov 	struct mm_struct *mm;
10027e1f827SSong Liu 
10127e1f827SSong Liu 	/* pte-mapped THP in this mm */
10227e1f827SSong Liu 	int nr_pte_mapped_thp;
10327e1f827SSong Liu 	unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
104b46e756fSKirill A. Shutemov };
105b46e756fSKirill A. Shutemov 
106b46e756fSKirill A. Shutemov /**
107b46e756fSKirill A. Shutemov  * struct khugepaged_scan - cursor for scanning
108b46e756fSKirill A. Shutemov  * @mm_head: the head of the mm list to scan
109b46e756fSKirill A. Shutemov  * @mm_slot: the current mm_slot we are scanning
110b46e756fSKirill A. Shutemov  * @address: the next address inside that to be scanned
111b46e756fSKirill A. Shutemov  *
112b46e756fSKirill A. Shutemov  * There is only the one khugepaged_scan instance of this cursor structure.
113b46e756fSKirill A. Shutemov  */
114b46e756fSKirill A. Shutemov struct khugepaged_scan {
115b46e756fSKirill A. Shutemov 	struct list_head mm_head;
116b46e756fSKirill A. Shutemov 	struct mm_slot *mm_slot;
117b46e756fSKirill A. Shutemov 	unsigned long address;
118b46e756fSKirill A. Shutemov };
119b46e756fSKirill A. Shutemov 
120b46e756fSKirill A. Shutemov static struct khugepaged_scan khugepaged_scan = {
121b46e756fSKirill A. Shutemov 	.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
122b46e756fSKirill A. Shutemov };
123b46e756fSKirill A. Shutemov 
124e1465d12SJérémy Lefaure #ifdef CONFIG_SYSFS
125b46e756fSKirill A. Shutemov static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
126b46e756fSKirill A. Shutemov 					 struct kobj_attribute *attr,
127b46e756fSKirill A. Shutemov 					 char *buf)
128b46e756fSKirill A. Shutemov {
129ae7a927dSJoe Perches 	return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
130b46e756fSKirill A. Shutemov }
131b46e756fSKirill A. Shutemov 
132b46e756fSKirill A. Shutemov static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
133b46e756fSKirill A. Shutemov 					  struct kobj_attribute *attr,
134b46e756fSKirill A. Shutemov 					  const char *buf, size_t count)
135b46e756fSKirill A. Shutemov {
136dfefd226SAlexey Dobriyan 	unsigned int msecs;
137b46e756fSKirill A. Shutemov 	int err;
138b46e756fSKirill A. Shutemov 
139dfefd226SAlexey Dobriyan 	err = kstrtouint(buf, 10, &msecs);
140dfefd226SAlexey Dobriyan 	if (err)
141b46e756fSKirill A. Shutemov 		return -EINVAL;
142b46e756fSKirill A. Shutemov 
143b46e756fSKirill A. Shutemov 	khugepaged_scan_sleep_millisecs = msecs;
144b46e756fSKirill A. Shutemov 	khugepaged_sleep_expire = 0;
145b46e756fSKirill A. Shutemov 	wake_up_interruptible(&khugepaged_wait);
146b46e756fSKirill A. Shutemov 
147b46e756fSKirill A. Shutemov 	return count;
148b46e756fSKirill A. Shutemov }
149b46e756fSKirill A. Shutemov static struct kobj_attribute scan_sleep_millisecs_attr =
150b46e756fSKirill A. Shutemov 	__ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
151b46e756fSKirill A. Shutemov 	       scan_sleep_millisecs_store);
152b46e756fSKirill A. Shutemov 
153b46e756fSKirill A. Shutemov static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
154b46e756fSKirill A. Shutemov 					  struct kobj_attribute *attr,
155b46e756fSKirill A. Shutemov 					  char *buf)
156b46e756fSKirill A. Shutemov {
157ae7a927dSJoe Perches 	return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
158b46e756fSKirill A. Shutemov }
159b46e756fSKirill A. Shutemov 
160b46e756fSKirill A. Shutemov static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
161b46e756fSKirill A. Shutemov 					   struct kobj_attribute *attr,
162b46e756fSKirill A. Shutemov 					   const char *buf, size_t count)
163b46e756fSKirill A. Shutemov {
164dfefd226SAlexey Dobriyan 	unsigned int msecs;
165b46e756fSKirill A. Shutemov 	int err;
166b46e756fSKirill A. Shutemov 
167dfefd226SAlexey Dobriyan 	err = kstrtouint(buf, 10, &msecs);
168dfefd226SAlexey Dobriyan 	if (err)
169b46e756fSKirill A. Shutemov 		return -EINVAL;
170b46e756fSKirill A. Shutemov 
171b46e756fSKirill A. Shutemov 	khugepaged_alloc_sleep_millisecs = msecs;
172b46e756fSKirill A. Shutemov 	khugepaged_sleep_expire = 0;
173b46e756fSKirill A. Shutemov 	wake_up_interruptible(&khugepaged_wait);
174b46e756fSKirill A. Shutemov 
175b46e756fSKirill A. Shutemov 	return count;
176b46e756fSKirill A. Shutemov }
177b46e756fSKirill A. Shutemov static struct kobj_attribute alloc_sleep_millisecs_attr =
178b46e756fSKirill A. Shutemov 	__ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
179b46e756fSKirill A. Shutemov 	       alloc_sleep_millisecs_store);
180b46e756fSKirill A. Shutemov 
181b46e756fSKirill A. Shutemov static ssize_t pages_to_scan_show(struct kobject *kobj,
182b46e756fSKirill A. Shutemov 				  struct kobj_attribute *attr,
183b46e756fSKirill A. Shutemov 				  char *buf)
184b46e756fSKirill A. Shutemov {
185ae7a927dSJoe Perches 	return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
186b46e756fSKirill A. Shutemov }
187b46e756fSKirill A. Shutemov static ssize_t pages_to_scan_store(struct kobject *kobj,
188b46e756fSKirill A. Shutemov 				   struct kobj_attribute *attr,
189b46e756fSKirill A. Shutemov 				   const char *buf, size_t count)
190b46e756fSKirill A. Shutemov {
191dfefd226SAlexey Dobriyan 	unsigned int pages;
192b46e756fSKirill A. Shutemov 	int err;
193b46e756fSKirill A. Shutemov 
194dfefd226SAlexey Dobriyan 	err = kstrtouint(buf, 10, &pages);
195dfefd226SAlexey Dobriyan 	if (err || !pages)
196b46e756fSKirill A. Shutemov 		return -EINVAL;
197b46e756fSKirill A. Shutemov 
198b46e756fSKirill A. Shutemov 	khugepaged_pages_to_scan = pages;
199b46e756fSKirill A. Shutemov 
200b46e756fSKirill A. Shutemov 	return count;
201b46e756fSKirill A. Shutemov }
202b46e756fSKirill A. Shutemov static struct kobj_attribute pages_to_scan_attr =
203b46e756fSKirill A. Shutemov 	__ATTR(pages_to_scan, 0644, pages_to_scan_show,
204b46e756fSKirill A. Shutemov 	       pages_to_scan_store);
205b46e756fSKirill A. Shutemov 
206b46e756fSKirill A. Shutemov static ssize_t pages_collapsed_show(struct kobject *kobj,
207b46e756fSKirill A. Shutemov 				    struct kobj_attribute *attr,
208b46e756fSKirill A. Shutemov 				    char *buf)
209b46e756fSKirill A. Shutemov {
210ae7a927dSJoe Perches 	return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
211b46e756fSKirill A. Shutemov }
212b46e756fSKirill A. Shutemov static struct kobj_attribute pages_collapsed_attr =
213b46e756fSKirill A. Shutemov 	__ATTR_RO(pages_collapsed);
214b46e756fSKirill A. Shutemov 
215b46e756fSKirill A. Shutemov static ssize_t full_scans_show(struct kobject *kobj,
216b46e756fSKirill A. Shutemov 			       struct kobj_attribute *attr,
217b46e756fSKirill A. Shutemov 			       char *buf)
218b46e756fSKirill A. Shutemov {
219ae7a927dSJoe Perches 	return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
220b46e756fSKirill A. Shutemov }
221b46e756fSKirill A. Shutemov static struct kobj_attribute full_scans_attr =
222b46e756fSKirill A. Shutemov 	__ATTR_RO(full_scans);
223b46e756fSKirill A. Shutemov 
224b46e756fSKirill A. Shutemov static ssize_t khugepaged_defrag_show(struct kobject *kobj,
225b46e756fSKirill A. Shutemov 				      struct kobj_attribute *attr, char *buf)
226b46e756fSKirill A. Shutemov {
227b46e756fSKirill A. Shutemov 	return single_hugepage_flag_show(kobj, attr, buf,
228b46e756fSKirill A. Shutemov 					 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
229b46e756fSKirill A. Shutemov }
230b46e756fSKirill A. Shutemov static ssize_t khugepaged_defrag_store(struct kobject *kobj,
231b46e756fSKirill A. Shutemov 				       struct kobj_attribute *attr,
232b46e756fSKirill A. Shutemov 				       const char *buf, size_t count)
233b46e756fSKirill A. Shutemov {
234b46e756fSKirill A. Shutemov 	return single_hugepage_flag_store(kobj, attr, buf, count,
235b46e756fSKirill A. Shutemov 				 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
236b46e756fSKirill A. Shutemov }
237b46e756fSKirill A. Shutemov static struct kobj_attribute khugepaged_defrag_attr =
238b46e756fSKirill A. Shutemov 	__ATTR(defrag, 0644, khugepaged_defrag_show,
239b46e756fSKirill A. Shutemov 	       khugepaged_defrag_store);
240b46e756fSKirill A. Shutemov 
241b46e756fSKirill A. Shutemov /*
242b46e756fSKirill A. Shutemov  * max_ptes_none controls if khugepaged should collapse hugepages over
243b46e756fSKirill A. Shutemov  * any unmapped ptes in turn potentially increasing the memory
244b46e756fSKirill A. Shutemov  * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
245b46e756fSKirill A. Shutemov  * reduce the available free memory in the system as it
246b46e756fSKirill A. Shutemov  * runs. Increasing max_ptes_none will instead potentially reduce the
247b46e756fSKirill A. Shutemov  * free memory in the system during the khugepaged scan.
248b46e756fSKirill A. Shutemov  */
249b46e756fSKirill A. Shutemov static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
250b46e756fSKirill A. Shutemov 					     struct kobj_attribute *attr,
251b46e756fSKirill A. Shutemov 					     char *buf)
252b46e756fSKirill A. Shutemov {
253ae7a927dSJoe Perches 	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
254b46e756fSKirill A. Shutemov }
255b46e756fSKirill A. Shutemov static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
256b46e756fSKirill A. Shutemov 					      struct kobj_attribute *attr,
257b46e756fSKirill A. Shutemov 					      const char *buf, size_t count)
258b46e756fSKirill A. Shutemov {
259b46e756fSKirill A. Shutemov 	int err;
260b46e756fSKirill A. Shutemov 	unsigned long max_ptes_none;
261b46e756fSKirill A. Shutemov 
262b46e756fSKirill A. Shutemov 	err = kstrtoul(buf, 10, &max_ptes_none);
263b46e756fSKirill A. Shutemov 	if (err || max_ptes_none > HPAGE_PMD_NR-1)
264b46e756fSKirill A. Shutemov 		return -EINVAL;
265b46e756fSKirill A. Shutemov 
266b46e756fSKirill A. Shutemov 	khugepaged_max_ptes_none = max_ptes_none;
267b46e756fSKirill A. Shutemov 
268b46e756fSKirill A. Shutemov 	return count;
269b46e756fSKirill A. Shutemov }
270b46e756fSKirill A. Shutemov static struct kobj_attribute khugepaged_max_ptes_none_attr =
271b46e756fSKirill A. Shutemov 	__ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
272b46e756fSKirill A. Shutemov 	       khugepaged_max_ptes_none_store);
273b46e756fSKirill A. Shutemov 
274b46e756fSKirill A. Shutemov static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
275b46e756fSKirill A. Shutemov 					     struct kobj_attribute *attr,
276b46e756fSKirill A. Shutemov 					     char *buf)
277b46e756fSKirill A. Shutemov {
278ae7a927dSJoe Perches 	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
279b46e756fSKirill A. Shutemov }
280b46e756fSKirill A. Shutemov 
281b46e756fSKirill A. Shutemov static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
282b46e756fSKirill A. Shutemov 					      struct kobj_attribute *attr,
283b46e756fSKirill A. Shutemov 					      const char *buf, size_t count)
284b46e756fSKirill A. Shutemov {
285b46e756fSKirill A. Shutemov 	int err;
286b46e756fSKirill A. Shutemov 	unsigned long max_ptes_swap;
287b46e756fSKirill A. Shutemov 
288b46e756fSKirill A. Shutemov 	err  = kstrtoul(buf, 10, &max_ptes_swap);
289b46e756fSKirill A. Shutemov 	if (err || max_ptes_swap > HPAGE_PMD_NR-1)
290b46e756fSKirill A. Shutemov 		return -EINVAL;
291b46e756fSKirill A. Shutemov 
292b46e756fSKirill A. Shutemov 	khugepaged_max_ptes_swap = max_ptes_swap;
293b46e756fSKirill A. Shutemov 
294b46e756fSKirill A. Shutemov 	return count;
295b46e756fSKirill A. Shutemov }
296b46e756fSKirill A. Shutemov 
297b46e756fSKirill A. Shutemov static struct kobj_attribute khugepaged_max_ptes_swap_attr =
298b46e756fSKirill A. Shutemov 	__ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
299b46e756fSKirill A. Shutemov 	       khugepaged_max_ptes_swap_store);
300b46e756fSKirill A. Shutemov 
30171a2c112SKirill A. Shutemov static ssize_t khugepaged_max_ptes_shared_show(struct kobject *kobj,
30271a2c112SKirill A. Shutemov 					       struct kobj_attribute *attr,
30371a2c112SKirill A. Shutemov 					       char *buf)
30471a2c112SKirill A. Shutemov {
305ae7a927dSJoe Perches 	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
30671a2c112SKirill A. Shutemov }
30771a2c112SKirill A. Shutemov 
30871a2c112SKirill A. Shutemov static ssize_t khugepaged_max_ptes_shared_store(struct kobject *kobj,
30971a2c112SKirill A. Shutemov 					      struct kobj_attribute *attr,
31071a2c112SKirill A. Shutemov 					      const char *buf, size_t count)
31171a2c112SKirill A. Shutemov {
31271a2c112SKirill A. Shutemov 	int err;
31371a2c112SKirill A. Shutemov 	unsigned long max_ptes_shared;
31471a2c112SKirill A. Shutemov 
31571a2c112SKirill A. Shutemov 	err  = kstrtoul(buf, 10, &max_ptes_shared);
31671a2c112SKirill A. Shutemov 	if (err || max_ptes_shared > HPAGE_PMD_NR-1)
31771a2c112SKirill A. Shutemov 		return -EINVAL;
31871a2c112SKirill A. Shutemov 
31971a2c112SKirill A. Shutemov 	khugepaged_max_ptes_shared = max_ptes_shared;
32071a2c112SKirill A. Shutemov 
32171a2c112SKirill A. Shutemov 	return count;
32271a2c112SKirill A. Shutemov }
32371a2c112SKirill A. Shutemov 
32471a2c112SKirill A. Shutemov static struct kobj_attribute khugepaged_max_ptes_shared_attr =
32571a2c112SKirill A. Shutemov 	__ATTR(max_ptes_shared, 0644, khugepaged_max_ptes_shared_show,
32671a2c112SKirill A. Shutemov 	       khugepaged_max_ptes_shared_store);
32771a2c112SKirill A. Shutemov 
328b46e756fSKirill A. Shutemov static struct attribute *khugepaged_attr[] = {
329b46e756fSKirill A. Shutemov 	&khugepaged_defrag_attr.attr,
330b46e756fSKirill A. Shutemov 	&khugepaged_max_ptes_none_attr.attr,
33171a2c112SKirill A. Shutemov 	&khugepaged_max_ptes_swap_attr.attr,
33271a2c112SKirill A. Shutemov 	&khugepaged_max_ptes_shared_attr.attr,
333b46e756fSKirill A. Shutemov 	&pages_to_scan_attr.attr,
334b46e756fSKirill A. Shutemov 	&pages_collapsed_attr.attr,
335b46e756fSKirill A. Shutemov 	&full_scans_attr.attr,
336b46e756fSKirill A. Shutemov 	&scan_sleep_millisecs_attr.attr,
337b46e756fSKirill A. Shutemov 	&alloc_sleep_millisecs_attr.attr,
338b46e756fSKirill A. Shutemov 	NULL,
339b46e756fSKirill A. Shutemov };
340b46e756fSKirill A. Shutemov 
341b46e756fSKirill A. Shutemov struct attribute_group khugepaged_attr_group = {
342b46e756fSKirill A. Shutemov 	.attrs = khugepaged_attr,
343b46e756fSKirill A. Shutemov 	.name = "khugepaged",
344b46e756fSKirill A. Shutemov };
345e1465d12SJérémy Lefaure #endif /* CONFIG_SYSFS */
346b46e756fSKirill A. Shutemov 
347b46e756fSKirill A. Shutemov int hugepage_madvise(struct vm_area_struct *vma,
348b46e756fSKirill A. Shutemov 		     unsigned long *vm_flags, int advice)
349b46e756fSKirill A. Shutemov {
350b46e756fSKirill A. Shutemov 	switch (advice) {
351b46e756fSKirill A. Shutemov 	case MADV_HUGEPAGE:
352b46e756fSKirill A. Shutemov #ifdef CONFIG_S390
353b46e756fSKirill A. Shutemov 		/*
354b46e756fSKirill A. Shutemov 		 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
355b46e756fSKirill A. Shutemov 		 * can't handle this properly after s390_enable_sie, so we simply
356b46e756fSKirill A. Shutemov 		 * ignore the madvise to prevent qemu from causing a SIGSEGV.
357b46e756fSKirill A. Shutemov 		 */
358b46e756fSKirill A. Shutemov 		if (mm_has_pgste(vma->vm_mm))
359b46e756fSKirill A. Shutemov 			return 0;
360b46e756fSKirill A. Shutemov #endif
361b46e756fSKirill A. Shutemov 		*vm_flags &= ~VM_NOHUGEPAGE;
362b46e756fSKirill A. Shutemov 		*vm_flags |= VM_HUGEPAGE;
363b46e756fSKirill A. Shutemov 		/*
364b46e756fSKirill A. Shutemov 		 * If the vma become good for khugepaged to scan,
365b46e756fSKirill A. Shutemov 		 * register it here without waiting a page fault that
366b46e756fSKirill A. Shutemov 		 * may not happen any time soon.
367b46e756fSKirill A. Shutemov 		 */
368*d2081b2bSYang Shi 		khugepaged_enter_vma_merge(vma, *vm_flags);
369b46e756fSKirill A. Shutemov 		break;
370b46e756fSKirill A. Shutemov 	case MADV_NOHUGEPAGE:
371b46e756fSKirill A. Shutemov 		*vm_flags &= ~VM_HUGEPAGE;
372b46e756fSKirill A. Shutemov 		*vm_flags |= VM_NOHUGEPAGE;
373b46e756fSKirill A. Shutemov 		/*
374b46e756fSKirill A. Shutemov 		 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
375b46e756fSKirill A. Shutemov 		 * this vma even if we leave the mm registered in khugepaged if
376b46e756fSKirill A. Shutemov 		 * it got registered before VM_NOHUGEPAGE was set.
377b46e756fSKirill A. Shutemov 		 */
378b46e756fSKirill A. Shutemov 		break;
379b46e756fSKirill A. Shutemov 	}
380b46e756fSKirill A. Shutemov 
381b46e756fSKirill A. Shutemov 	return 0;
382b46e756fSKirill A. Shutemov }
383b46e756fSKirill A. Shutemov 
384b46e756fSKirill A. Shutemov int __init khugepaged_init(void)
385b46e756fSKirill A. Shutemov {
386b46e756fSKirill A. Shutemov 	mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
387b46e756fSKirill A. Shutemov 					  sizeof(struct mm_slot),
388b46e756fSKirill A. Shutemov 					  __alignof__(struct mm_slot), 0, NULL);
389b46e756fSKirill A. Shutemov 	if (!mm_slot_cache)
390b46e756fSKirill A. Shutemov 		return -ENOMEM;
391b46e756fSKirill A. Shutemov 
392b46e756fSKirill A. Shutemov 	khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
393b46e756fSKirill A. Shutemov 	khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
394b46e756fSKirill A. Shutemov 	khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
39571a2c112SKirill A. Shutemov 	khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
396b46e756fSKirill A. Shutemov 
397b46e756fSKirill A. Shutemov 	return 0;
398b46e756fSKirill A. Shutemov }
399b46e756fSKirill A. Shutemov 
400b46e756fSKirill A. Shutemov void __init khugepaged_destroy(void)
401b46e756fSKirill A. Shutemov {
402b46e756fSKirill A. Shutemov 	kmem_cache_destroy(mm_slot_cache);
403b46e756fSKirill A. Shutemov }
404b46e756fSKirill A. Shutemov 
405b46e756fSKirill A. Shutemov static inline struct mm_slot *alloc_mm_slot(void)
406b46e756fSKirill A. Shutemov {
407b46e756fSKirill A. Shutemov 	if (!mm_slot_cache)	/* initialization failed */
408b46e756fSKirill A. Shutemov 		return NULL;
409b46e756fSKirill A. Shutemov 	return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
410b46e756fSKirill A. Shutemov }
411b46e756fSKirill A. Shutemov 
412b46e756fSKirill A. Shutemov static inline void free_mm_slot(struct mm_slot *mm_slot)
413b46e756fSKirill A. Shutemov {
414b46e756fSKirill A. Shutemov 	kmem_cache_free(mm_slot_cache, mm_slot);
415b46e756fSKirill A. Shutemov }
416b46e756fSKirill A. Shutemov 
417b46e756fSKirill A. Shutemov static struct mm_slot *get_mm_slot(struct mm_struct *mm)
418b46e756fSKirill A. Shutemov {
419b46e756fSKirill A. Shutemov 	struct mm_slot *mm_slot;
420b46e756fSKirill A. Shutemov 
421b46e756fSKirill A. Shutemov 	hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
422b46e756fSKirill A. Shutemov 		if (mm == mm_slot->mm)
423b46e756fSKirill A. Shutemov 			return mm_slot;
424b46e756fSKirill A. Shutemov 
425b46e756fSKirill A. Shutemov 	return NULL;
426b46e756fSKirill A. Shutemov }
427b46e756fSKirill A. Shutemov 
428b46e756fSKirill A. Shutemov static void insert_to_mm_slots_hash(struct mm_struct *mm,
429b46e756fSKirill A. Shutemov 				    struct mm_slot *mm_slot)
430b46e756fSKirill A. Shutemov {
431b46e756fSKirill A. Shutemov 	mm_slot->mm = mm;
432b46e756fSKirill A. Shutemov 	hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
433b46e756fSKirill A. Shutemov }
434b46e756fSKirill A. Shutemov 
435b46e756fSKirill A. Shutemov static inline int khugepaged_test_exit(struct mm_struct *mm)
436b46e756fSKirill A. Shutemov {
4374d45e75aSJann Horn 	return atomic_read(&mm->mm_users) == 0;
438b46e756fSKirill A. Shutemov }
439b46e756fSKirill A. Shutemov 
44050f8b92fSSong Liu static bool hugepage_vma_check(struct vm_area_struct *vma,
44150f8b92fSSong Liu 			       unsigned long vm_flags)
442c2231020SYang Shi {
443e6be37b2SMiaohe Lin 	if (!transhuge_vma_enabled(vma, vm_flags))
444c2231020SYang Shi 		return false;
44599cb0dbdSSong Liu 
446cb648754SYang Shi 	if (vm_flags & VM_NO_KHUGEPAGED)
447cb648754SYang Shi 		return false;
448cb648754SYang Shi 
44952b52bf1SYang Shi 	/* Don't run khugepaged against DAX vma */
45052b52bf1SYang Shi 	if (vma_is_dax(vma))
45152b52bf1SYang Shi 		return false;
45252b52bf1SYang Shi 
453a4aeaa06SYang Shi 	if (vma->vm_file && !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) -
454a4aeaa06SYang Shi 				vma->vm_pgoff, HPAGE_PMD_NR))
455a4aeaa06SYang Shi 		return false;
456a4aeaa06SYang Shi 
457cd89fb06SRik van Riel 	/* Enabled via shmem mount options or sysfs settings. */
458a4aeaa06SYang Shi 	if (shmem_file(vma->vm_file))
459a4aeaa06SYang Shi 		return shmem_huge_enabled(vma);
460cd89fb06SRik van Riel 
461cd89fb06SRik van Riel 	/* THP settings require madvise. */
462cd89fb06SRik van Riel 	if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always())
463cd89fb06SRik van Riel 		return false;
464cd89fb06SRik van Riel 
465a4aeaa06SYang Shi 	/* Only regular file is valid */
46678d12c19SYang Shi 	if (file_thp_enabled(vma))
46778d12c19SYang Shi 		return true;
468cd89fb06SRik van Riel 
46925fa414aSxu xin 	if (!vma->anon_vma || !vma_is_anonymous(vma))
470c2231020SYang Shi 		return false;
471222100eeSAnshuman Khandual 	if (vma_is_temporary_stack(vma))
472c2231020SYang Shi 		return false;
473cb648754SYang Shi 
474cb648754SYang Shi 	return true;
475c2231020SYang Shi }
476c2231020SYang Shi 
477*d2081b2bSYang Shi void __khugepaged_enter(struct mm_struct *mm)
478b46e756fSKirill A. Shutemov {
479b46e756fSKirill A. Shutemov 	struct mm_slot *mm_slot;
480b46e756fSKirill A. Shutemov 	int wakeup;
481b46e756fSKirill A. Shutemov 
482b46e756fSKirill A. Shutemov 	mm_slot = alloc_mm_slot();
483b46e756fSKirill A. Shutemov 	if (!mm_slot)
484*d2081b2bSYang Shi 		return;
485b46e756fSKirill A. Shutemov 
486b46e756fSKirill A. Shutemov 	/* __khugepaged_exit() must not run from under us */
48728ff0a3cSMiaohe Lin 	VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
488b46e756fSKirill A. Shutemov 	if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
489b46e756fSKirill A. Shutemov 		free_mm_slot(mm_slot);
490*d2081b2bSYang Shi 		return;
491b46e756fSKirill A. Shutemov 	}
492b46e756fSKirill A. Shutemov 
493b46e756fSKirill A. Shutemov 	spin_lock(&khugepaged_mm_lock);
494b46e756fSKirill A. Shutemov 	insert_to_mm_slots_hash(mm, mm_slot);
495b46e756fSKirill A. Shutemov 	/*
496b46e756fSKirill A. Shutemov 	 * Insert just behind the scanning cursor, to let the area settle
497b46e756fSKirill A. Shutemov 	 * down a little.
498b46e756fSKirill A. Shutemov 	 */
499b46e756fSKirill A. Shutemov 	wakeup = list_empty(&khugepaged_scan.mm_head);
500b46e756fSKirill A. Shutemov 	list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
501b46e756fSKirill A. Shutemov 	spin_unlock(&khugepaged_mm_lock);
502b46e756fSKirill A. Shutemov 
503f1f10076SVegard Nossum 	mmgrab(mm);
504b46e756fSKirill A. Shutemov 	if (wakeup)
505b46e756fSKirill A. Shutemov 		wake_up_interruptible(&khugepaged_wait);
506b46e756fSKirill A. Shutemov }
507b46e756fSKirill A. Shutemov 
508*d2081b2bSYang Shi void khugepaged_enter_vma_merge(struct vm_area_struct *vma,
509b46e756fSKirill A. Shutemov 			       unsigned long vm_flags)
510b46e756fSKirill A. Shutemov {
511b46e756fSKirill A. Shutemov 	unsigned long hstart, hend;
512c2231020SYang Shi 
513b46e756fSKirill A. Shutemov 	/*
51499cb0dbdSSong Liu 	 * khugepaged only supports read-only files for non-shmem files.
51599cb0dbdSSong Liu 	 * khugepaged does not yet work on special mappings. And
51699cb0dbdSSong Liu 	 * file-private shmem THP is not supported.
517b46e756fSKirill A. Shutemov 	 */
51850f8b92fSSong Liu 	if (!hugepage_vma_check(vma, vm_flags))
519*d2081b2bSYang Shi 		return;
520c2231020SYang Shi 
521b46e756fSKirill A. Shutemov 	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
522b46e756fSKirill A. Shutemov 	hend = vma->vm_end & HPAGE_PMD_MASK;
523b46e756fSKirill A. Shutemov 	if (hstart < hend)
524*d2081b2bSYang Shi 		khugepaged_enter(vma, vm_flags);
525b46e756fSKirill A. Shutemov }
526b46e756fSKirill A. Shutemov 
527b46e756fSKirill A. Shutemov void __khugepaged_exit(struct mm_struct *mm)
528b46e756fSKirill A. Shutemov {
529b46e756fSKirill A. Shutemov 	struct mm_slot *mm_slot;
530b46e756fSKirill A. Shutemov 	int free = 0;
531b46e756fSKirill A. Shutemov 
532b46e756fSKirill A. Shutemov 	spin_lock(&khugepaged_mm_lock);
533b46e756fSKirill A. Shutemov 	mm_slot = get_mm_slot(mm);
534b46e756fSKirill A. Shutemov 	if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
535b46e756fSKirill A. Shutemov 		hash_del(&mm_slot->hash);
536b46e756fSKirill A. Shutemov 		list_del(&mm_slot->mm_node);
537b46e756fSKirill A. Shutemov 		free = 1;
538b46e756fSKirill A. Shutemov 	}
539b46e756fSKirill A. Shutemov 	spin_unlock(&khugepaged_mm_lock);
540b46e756fSKirill A. Shutemov 
541b46e756fSKirill A. Shutemov 	if (free) {
542b46e756fSKirill A. Shutemov 		clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
543b46e756fSKirill A. Shutemov 		free_mm_slot(mm_slot);
544b46e756fSKirill A. Shutemov 		mmdrop(mm);
545b46e756fSKirill A. Shutemov 	} else if (mm_slot) {
546b46e756fSKirill A. Shutemov 		/*
547b46e756fSKirill A. Shutemov 		 * This is required to serialize against
548b46e756fSKirill A. Shutemov 		 * khugepaged_test_exit() (which is guaranteed to run
549b46e756fSKirill A. Shutemov 		 * under mmap sem read mode). Stop here (after we
550b46e756fSKirill A. Shutemov 		 * return all pagetables will be destroyed) until
551b46e756fSKirill A. Shutemov 		 * khugepaged has finished working on the pagetables
552c1e8d7c6SMichel Lespinasse 		 * under the mmap_lock.
553b46e756fSKirill A. Shutemov 		 */
554d8ed45c5SMichel Lespinasse 		mmap_write_lock(mm);
555d8ed45c5SMichel Lespinasse 		mmap_write_unlock(mm);
556b46e756fSKirill A. Shutemov 	}
557b46e756fSKirill A. Shutemov }
558b46e756fSKirill A. Shutemov 
559b46e756fSKirill A. Shutemov static void release_pte_page(struct page *page)
560b46e756fSKirill A. Shutemov {
5615503fbf2SKirill A. Shutemov 	mod_node_page_state(page_pgdat(page),
5625503fbf2SKirill A. Shutemov 			NR_ISOLATED_ANON + page_is_file_lru(page),
5635503fbf2SKirill A. Shutemov 			-compound_nr(page));
564b46e756fSKirill A. Shutemov 	unlock_page(page);
565b46e756fSKirill A. Shutemov 	putback_lru_page(page);
566b46e756fSKirill A. Shutemov }
567b46e756fSKirill A. Shutemov 
5685503fbf2SKirill A. Shutemov static void release_pte_pages(pte_t *pte, pte_t *_pte,
5695503fbf2SKirill A. Shutemov 		struct list_head *compound_pagelist)
570b46e756fSKirill A. Shutemov {
5715503fbf2SKirill A. Shutemov 	struct page *page, *tmp;
5725503fbf2SKirill A. Shutemov 
573b46e756fSKirill A. Shutemov 	while (--_pte >= pte) {
574b46e756fSKirill A. Shutemov 		pte_t pteval = *_pte;
5755503fbf2SKirill A. Shutemov 
5765503fbf2SKirill A. Shutemov 		page = pte_page(pteval);
5775503fbf2SKirill A. Shutemov 		if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) &&
5785503fbf2SKirill A. Shutemov 				!PageCompound(page))
5795503fbf2SKirill A. Shutemov 			release_pte_page(page);
5805503fbf2SKirill A. Shutemov 	}
5815503fbf2SKirill A. Shutemov 
5825503fbf2SKirill A. Shutemov 	list_for_each_entry_safe(page, tmp, compound_pagelist, lru) {
5835503fbf2SKirill A. Shutemov 		list_del(&page->lru);
5845503fbf2SKirill A. Shutemov 		release_pte_page(page);
585b46e756fSKirill A. Shutemov 	}
586b46e756fSKirill A. Shutemov }
587b46e756fSKirill A. Shutemov 
5889445689fSKirill A. Shutemov static bool is_refcount_suitable(struct page *page)
5899445689fSKirill A. Shutemov {
5909445689fSKirill A. Shutemov 	int expected_refcount;
5919445689fSKirill A. Shutemov 
5929445689fSKirill A. Shutemov 	expected_refcount = total_mapcount(page);
5939445689fSKirill A. Shutemov 	if (PageSwapCache(page))
5949445689fSKirill A. Shutemov 		expected_refcount += compound_nr(page);
5959445689fSKirill A. Shutemov 
5969445689fSKirill A. Shutemov 	return page_count(page) == expected_refcount;
5979445689fSKirill A. Shutemov }
5989445689fSKirill A. Shutemov 
599b46e756fSKirill A. Shutemov static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
600b46e756fSKirill A. Shutemov 					unsigned long address,
6015503fbf2SKirill A. Shutemov 					pte_t *pte,
6025503fbf2SKirill A. Shutemov 					struct list_head *compound_pagelist)
603b46e756fSKirill A. Shutemov {
604b46e756fSKirill A. Shutemov 	struct page *page = NULL;
605b46e756fSKirill A. Shutemov 	pte_t *_pte;
60671a2c112SKirill A. Shutemov 	int none_or_zero = 0, shared = 0, result = 0, referenced = 0;
6070db501f7SEbru Akagunduz 	bool writable = false;
608b46e756fSKirill A. Shutemov 
609b46e756fSKirill A. Shutemov 	for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
610b46e756fSKirill A. Shutemov 	     _pte++, address += PAGE_SIZE) {
611b46e756fSKirill A. Shutemov 		pte_t pteval = *_pte;
612b46e756fSKirill A. Shutemov 		if (pte_none(pteval) || (pte_present(pteval) &&
613b46e756fSKirill A. Shutemov 				is_zero_pfn(pte_pfn(pteval)))) {
614b46e756fSKirill A. Shutemov 			if (!userfaultfd_armed(vma) &&
615b46e756fSKirill A. Shutemov 			    ++none_or_zero <= khugepaged_max_ptes_none) {
616b46e756fSKirill A. Shutemov 				continue;
617b46e756fSKirill A. Shutemov 			} else {
618b46e756fSKirill A. Shutemov 				result = SCAN_EXCEED_NONE_PTE;
619e9ea874aSYang Yang 				count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
620b46e756fSKirill A. Shutemov 				goto out;
621b46e756fSKirill A. Shutemov 			}
622b46e756fSKirill A. Shutemov 		}
623b46e756fSKirill A. Shutemov 		if (!pte_present(pteval)) {
624b46e756fSKirill A. Shutemov 			result = SCAN_PTE_NON_PRESENT;
625b46e756fSKirill A. Shutemov 			goto out;
626b46e756fSKirill A. Shutemov 		}
627b46e756fSKirill A. Shutemov 		page = vm_normal_page(vma, address, pteval);
628b46e756fSKirill A. Shutemov 		if (unlikely(!page)) {
629b46e756fSKirill A. Shutemov 			result = SCAN_PAGE_NULL;
630b46e756fSKirill A. Shutemov 			goto out;
631b46e756fSKirill A. Shutemov 		}
632b46e756fSKirill A. Shutemov 
633b46e756fSKirill A. Shutemov 		VM_BUG_ON_PAGE(!PageAnon(page), page);
634b46e756fSKirill A. Shutemov 
63571a2c112SKirill A. Shutemov 		if (page_mapcount(page) > 1 &&
63671a2c112SKirill A. Shutemov 				++shared > khugepaged_max_ptes_shared) {
63771a2c112SKirill A. Shutemov 			result = SCAN_EXCEED_SHARED_PTE;
638e9ea874aSYang Yang 			count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
63971a2c112SKirill A. Shutemov 			goto out;
64071a2c112SKirill A. Shutemov 		}
64171a2c112SKirill A. Shutemov 
6425503fbf2SKirill A. Shutemov 		if (PageCompound(page)) {
6435503fbf2SKirill A. Shutemov 			struct page *p;
6445503fbf2SKirill A. Shutemov 			page = compound_head(page);
6455503fbf2SKirill A. Shutemov 
6465503fbf2SKirill A. Shutemov 			/*
6475503fbf2SKirill A. Shutemov 			 * Check if we have dealt with the compound page
6485503fbf2SKirill A. Shutemov 			 * already
6495503fbf2SKirill A. Shutemov 			 */
6505503fbf2SKirill A. Shutemov 			list_for_each_entry(p, compound_pagelist, lru) {
6515503fbf2SKirill A. Shutemov 				if (page == p)
6525503fbf2SKirill A. Shutemov 					goto next;
6535503fbf2SKirill A. Shutemov 			}
6545503fbf2SKirill A. Shutemov 		}
6555503fbf2SKirill A. Shutemov 
656b46e756fSKirill A. Shutemov 		/*
657b46e756fSKirill A. Shutemov 		 * We can do it before isolate_lru_page because the
658b46e756fSKirill A. Shutemov 		 * page can't be freed from under us. NOTE: PG_lock
659b46e756fSKirill A. Shutemov 		 * is needed to serialize against split_huge_page
660b46e756fSKirill A. Shutemov 		 * when invoked from the VM.
661b46e756fSKirill A. Shutemov 		 */
662b46e756fSKirill A. Shutemov 		if (!trylock_page(page)) {
663b46e756fSKirill A. Shutemov 			result = SCAN_PAGE_LOCK;
664b46e756fSKirill A. Shutemov 			goto out;
665b46e756fSKirill A. Shutemov 		}
666b46e756fSKirill A. Shutemov 
667b46e756fSKirill A. Shutemov 		/*
6689445689fSKirill A. Shutemov 		 * Check if the page has any GUP (or other external) pins.
6699445689fSKirill A. Shutemov 		 *
6709445689fSKirill A. Shutemov 		 * The page table that maps the page has been already unlinked
6719445689fSKirill A. Shutemov 		 * from the page table tree and this process cannot get
672f0953a1bSIngo Molnar 		 * an additional pin on the page.
6739445689fSKirill A. Shutemov 		 *
6749445689fSKirill A. Shutemov 		 * New pins can come later if the page is shared across fork,
6759445689fSKirill A. Shutemov 		 * but not from this process. The other process cannot write to
6769445689fSKirill A. Shutemov 		 * the page, only trigger CoW.
677b46e756fSKirill A. Shutemov 		 */
6789445689fSKirill A. Shutemov 		if (!is_refcount_suitable(page)) {
679b46e756fSKirill A. Shutemov 			unlock_page(page);
680b46e756fSKirill A. Shutemov 			result = SCAN_PAGE_COUNT;
681b46e756fSKirill A. Shutemov 			goto out;
682b46e756fSKirill A. Shutemov 		}
683b46e756fSKirill A. Shutemov 
684b46e756fSKirill A. Shutemov 		/*
685b46e756fSKirill A. Shutemov 		 * Isolate the page to avoid collapsing an hugepage
686b46e756fSKirill A. Shutemov 		 * currently in use by the VM.
687b46e756fSKirill A. Shutemov 		 */
688b46e756fSKirill A. Shutemov 		if (isolate_lru_page(page)) {
689b46e756fSKirill A. Shutemov 			unlock_page(page);
690b46e756fSKirill A. Shutemov 			result = SCAN_DEL_PAGE_LRU;
691b46e756fSKirill A. Shutemov 			goto out;
692b46e756fSKirill A. Shutemov 		}
6935503fbf2SKirill A. Shutemov 		mod_node_page_state(page_pgdat(page),
6945503fbf2SKirill A. Shutemov 				NR_ISOLATED_ANON + page_is_file_lru(page),
6955503fbf2SKirill A. Shutemov 				compound_nr(page));
696b46e756fSKirill A. Shutemov 		VM_BUG_ON_PAGE(!PageLocked(page), page);
697b46e756fSKirill A. Shutemov 		VM_BUG_ON_PAGE(PageLRU(page), page);
698b46e756fSKirill A. Shutemov 
6995503fbf2SKirill A. Shutemov 		if (PageCompound(page))
7005503fbf2SKirill A. Shutemov 			list_add_tail(&page->lru, compound_pagelist);
7015503fbf2SKirill A. Shutemov next:
7020db501f7SEbru Akagunduz 		/* There should be enough young pte to collapse the page */
703b46e756fSKirill A. Shutemov 		if (pte_young(pteval) ||
704b46e756fSKirill A. Shutemov 		    page_is_young(page) || PageReferenced(page) ||
705b46e756fSKirill A. Shutemov 		    mmu_notifier_test_young(vma->vm_mm, address))
7060db501f7SEbru Akagunduz 			referenced++;
7075503fbf2SKirill A. Shutemov 
7085503fbf2SKirill A. Shutemov 		if (pte_write(pteval))
7095503fbf2SKirill A. Shutemov 			writable = true;
710b46e756fSKirill A. Shutemov 	}
71174e579bfSMiaohe Lin 
71274e579bfSMiaohe Lin 	if (unlikely(!writable)) {
71374e579bfSMiaohe Lin 		result = SCAN_PAGE_RO;
71474e579bfSMiaohe Lin 	} else if (unlikely(!referenced)) {
71574e579bfSMiaohe Lin 		result = SCAN_LACK_REFERENCED_PAGE;
71674e579bfSMiaohe Lin 	} else {
717b46e756fSKirill A. Shutemov 		result = SCAN_SUCCEED;
718b46e756fSKirill A. Shutemov 		trace_mm_collapse_huge_page_isolate(page, none_or_zero,
719b46e756fSKirill A. Shutemov 						    referenced, writable, result);
720b46e756fSKirill A. Shutemov 		return 1;
721b46e756fSKirill A. Shutemov 	}
722b46e756fSKirill A. Shutemov out:
7235503fbf2SKirill A. Shutemov 	release_pte_pages(pte, _pte, compound_pagelist);
724b46e756fSKirill A. Shutemov 	trace_mm_collapse_huge_page_isolate(page, none_or_zero,
725b46e756fSKirill A. Shutemov 					    referenced, writable, result);
726b46e756fSKirill A. Shutemov 	return 0;
727b46e756fSKirill A. Shutemov }
728b46e756fSKirill A. Shutemov 
729b46e756fSKirill A. Shutemov static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
730b46e756fSKirill A. Shutemov 				      struct vm_area_struct *vma,
731b46e756fSKirill A. Shutemov 				      unsigned long address,
7325503fbf2SKirill A. Shutemov 				      spinlock_t *ptl,
7335503fbf2SKirill A. Shutemov 				      struct list_head *compound_pagelist)
734b46e756fSKirill A. Shutemov {
7355503fbf2SKirill A. Shutemov 	struct page *src_page, *tmp;
736b46e756fSKirill A. Shutemov 	pte_t *_pte;
737338a16baSDavid Rientjes 	for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
738338a16baSDavid Rientjes 				_pte++, page++, address += PAGE_SIZE) {
739b46e756fSKirill A. Shutemov 		pte_t pteval = *_pte;
740b46e756fSKirill A. Shutemov 
741b46e756fSKirill A. Shutemov 		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
742b46e756fSKirill A. Shutemov 			clear_user_highpage(page, address);
743b46e756fSKirill A. Shutemov 			add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
744b46e756fSKirill A. Shutemov 			if (is_zero_pfn(pte_pfn(pteval))) {
745b46e756fSKirill A. Shutemov 				/*
746b46e756fSKirill A. Shutemov 				 * ptl mostly unnecessary.
747b46e756fSKirill A. Shutemov 				 */
748b46e756fSKirill A. Shutemov 				spin_lock(ptl);
74908d5b29eSPasha Tatashin 				ptep_clear(vma->vm_mm, address, _pte);
750b46e756fSKirill A. Shutemov 				spin_unlock(ptl);
751b46e756fSKirill A. Shutemov 			}
752b46e756fSKirill A. Shutemov 		} else {
753b46e756fSKirill A. Shutemov 			src_page = pte_page(pteval);
754b46e756fSKirill A. Shutemov 			copy_user_highpage(page, src_page, address, vma);
7555503fbf2SKirill A. Shutemov 			if (!PageCompound(src_page))
756b46e756fSKirill A. Shutemov 				release_pte_page(src_page);
757b46e756fSKirill A. Shutemov 			/*
758b46e756fSKirill A. Shutemov 			 * ptl mostly unnecessary, but preempt has to
759b46e756fSKirill A. Shutemov 			 * be disabled to update the per-cpu stats
760b46e756fSKirill A. Shutemov 			 * inside page_remove_rmap().
761b46e756fSKirill A. Shutemov 			 */
762b46e756fSKirill A. Shutemov 			spin_lock(ptl);
76308d5b29eSPasha Tatashin 			ptep_clear(vma->vm_mm, address, _pte);
764cea86fe2SHugh Dickins 			page_remove_rmap(src_page, vma, false);
765b46e756fSKirill A. Shutemov 			spin_unlock(ptl);
766b46e756fSKirill A. Shutemov 			free_page_and_swap_cache(src_page);
767b46e756fSKirill A. Shutemov 		}
768b46e756fSKirill A. Shutemov 	}
7695503fbf2SKirill A. Shutemov 
7705503fbf2SKirill A. Shutemov 	list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
7715503fbf2SKirill A. Shutemov 		list_del(&src_page->lru);
7725503fbf2SKirill A. Shutemov 		release_pte_page(src_page);
7735503fbf2SKirill A. Shutemov 	}
774b46e756fSKirill A. Shutemov }
775b46e756fSKirill A. Shutemov 
776b46e756fSKirill A. Shutemov static void khugepaged_alloc_sleep(void)
777b46e756fSKirill A. Shutemov {
778b46e756fSKirill A. Shutemov 	DEFINE_WAIT(wait);
779b46e756fSKirill A. Shutemov 
780b46e756fSKirill A. Shutemov 	add_wait_queue(&khugepaged_wait, &wait);
781b46e756fSKirill A. Shutemov 	freezable_schedule_timeout_interruptible(
782b46e756fSKirill A. Shutemov 		msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
783b46e756fSKirill A. Shutemov 	remove_wait_queue(&khugepaged_wait, &wait);
784b46e756fSKirill A. Shutemov }
785b46e756fSKirill A. Shutemov 
786b46e756fSKirill A. Shutemov static int khugepaged_node_load[MAX_NUMNODES];
787b46e756fSKirill A. Shutemov 
788b46e756fSKirill A. Shutemov static bool khugepaged_scan_abort(int nid)
789b46e756fSKirill A. Shutemov {
790b46e756fSKirill A. Shutemov 	int i;
791b46e756fSKirill A. Shutemov 
792b46e756fSKirill A. Shutemov 	/*
793a5f5f91dSMel Gorman 	 * If node_reclaim_mode is disabled, then no extra effort is made to
794b46e756fSKirill A. Shutemov 	 * allocate memory locally.
795b46e756fSKirill A. Shutemov 	 */
796202e35dbSDave Hansen 	if (!node_reclaim_enabled())
797b46e756fSKirill A. Shutemov 		return false;
798b46e756fSKirill A. Shutemov 
799b46e756fSKirill A. Shutemov 	/* If there is a count for this node already, it must be acceptable */
800b46e756fSKirill A. Shutemov 	if (khugepaged_node_load[nid])
801b46e756fSKirill A. Shutemov 		return false;
802b46e756fSKirill A. Shutemov 
803b46e756fSKirill A. Shutemov 	for (i = 0; i < MAX_NUMNODES; i++) {
804b46e756fSKirill A. Shutemov 		if (!khugepaged_node_load[i])
805b46e756fSKirill A. Shutemov 			continue;
806a55c7454SMatt Fleming 		if (node_distance(nid, i) > node_reclaim_distance)
807b46e756fSKirill A. Shutemov 			return true;
808b46e756fSKirill A. Shutemov 	}
809b46e756fSKirill A. Shutemov 	return false;
810b46e756fSKirill A. Shutemov }
811b46e756fSKirill A. Shutemov 
812b46e756fSKirill A. Shutemov /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
813b46e756fSKirill A. Shutemov static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
814b46e756fSKirill A. Shutemov {
81525160354SVlastimil Babka 	return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
816b46e756fSKirill A. Shutemov }
817b46e756fSKirill A. Shutemov 
818b46e756fSKirill A. Shutemov #ifdef CONFIG_NUMA
819b46e756fSKirill A. Shutemov static int khugepaged_find_target_node(void)
820b46e756fSKirill A. Shutemov {
821b46e756fSKirill A. Shutemov 	static int last_khugepaged_target_node = NUMA_NO_NODE;
822b46e756fSKirill A. Shutemov 	int nid, target_node = 0, max_value = 0;
823b46e756fSKirill A. Shutemov 
824b46e756fSKirill A. Shutemov 	/* find first node with max normal pages hit */
825b46e756fSKirill A. Shutemov 	for (nid = 0; nid < MAX_NUMNODES; nid++)
826b46e756fSKirill A. Shutemov 		if (khugepaged_node_load[nid] > max_value) {
827b46e756fSKirill A. Shutemov 			max_value = khugepaged_node_load[nid];
828b46e756fSKirill A. Shutemov 			target_node = nid;
829b46e756fSKirill A. Shutemov 		}
830b46e756fSKirill A. Shutemov 
831b46e756fSKirill A. Shutemov 	/* do some balance if several nodes have the same hit record */
832b46e756fSKirill A. Shutemov 	if (target_node <= last_khugepaged_target_node)
833b46e756fSKirill A. Shutemov 		for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
834b46e756fSKirill A. Shutemov 				nid++)
835b46e756fSKirill A. Shutemov 			if (max_value == khugepaged_node_load[nid]) {
836b46e756fSKirill A. Shutemov 				target_node = nid;
837b46e756fSKirill A. Shutemov 				break;
838b46e756fSKirill A. Shutemov 			}
839b46e756fSKirill A. Shutemov 
840b46e756fSKirill A. Shutemov 	last_khugepaged_target_node = target_node;
841b46e756fSKirill A. Shutemov 	return target_node;
842b46e756fSKirill A. Shutemov }
843b46e756fSKirill A. Shutemov 
844b46e756fSKirill A. Shutemov static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
845b46e756fSKirill A. Shutemov {
846b46e756fSKirill A. Shutemov 	if (IS_ERR(*hpage)) {
847b46e756fSKirill A. Shutemov 		if (!*wait)
848b46e756fSKirill A. Shutemov 			return false;
849b46e756fSKirill A. Shutemov 
850b46e756fSKirill A. Shutemov 		*wait = false;
851b46e756fSKirill A. Shutemov 		*hpage = NULL;
852b46e756fSKirill A. Shutemov 		khugepaged_alloc_sleep();
853b46e756fSKirill A. Shutemov 	} else if (*hpage) {
854b46e756fSKirill A. Shutemov 		put_page(*hpage);
855b46e756fSKirill A. Shutemov 		*hpage = NULL;
856b46e756fSKirill A. Shutemov 	}
857b46e756fSKirill A. Shutemov 
858b46e756fSKirill A. Shutemov 	return true;
859b46e756fSKirill A. Shutemov }
860b46e756fSKirill A. Shutemov 
861b46e756fSKirill A. Shutemov static struct page *
862988ddb71SKirill A. Shutemov khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
863b46e756fSKirill A. Shutemov {
864b46e756fSKirill A. Shutemov 	VM_BUG_ON_PAGE(*hpage, *hpage);
865b46e756fSKirill A. Shutemov 
866b46e756fSKirill A. Shutemov 	*hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
867b46e756fSKirill A. Shutemov 	if (unlikely(!*hpage)) {
868b46e756fSKirill A. Shutemov 		count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
869b46e756fSKirill A. Shutemov 		*hpage = ERR_PTR(-ENOMEM);
870b46e756fSKirill A. Shutemov 		return NULL;
871b46e756fSKirill A. Shutemov 	}
872b46e756fSKirill A. Shutemov 
873b46e756fSKirill A. Shutemov 	prep_transhuge_page(*hpage);
874b46e756fSKirill A. Shutemov 	count_vm_event(THP_COLLAPSE_ALLOC);
875b46e756fSKirill A. Shutemov 	return *hpage;
876b46e756fSKirill A. Shutemov }
877b46e756fSKirill A. Shutemov #else
878b46e756fSKirill A. Shutemov static int khugepaged_find_target_node(void)
879b46e756fSKirill A. Shutemov {
880b46e756fSKirill A. Shutemov 	return 0;
881b46e756fSKirill A. Shutemov }
882b46e756fSKirill A. Shutemov 
883b46e756fSKirill A. Shutemov static inline struct page *alloc_khugepaged_hugepage(void)
884b46e756fSKirill A. Shutemov {
885b46e756fSKirill A. Shutemov 	struct page *page;
886b46e756fSKirill A. Shutemov 
887b46e756fSKirill A. Shutemov 	page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
888b46e756fSKirill A. Shutemov 			   HPAGE_PMD_ORDER);
889b46e756fSKirill A. Shutemov 	if (page)
890b46e756fSKirill A. Shutemov 		prep_transhuge_page(page);
891b46e756fSKirill A. Shutemov 	return page;
892b46e756fSKirill A. Shutemov }
893b46e756fSKirill A. Shutemov 
894b46e756fSKirill A. Shutemov static struct page *khugepaged_alloc_hugepage(bool *wait)
895b46e756fSKirill A. Shutemov {
896b46e756fSKirill A. Shutemov 	struct page *hpage;
897b46e756fSKirill A. Shutemov 
898b46e756fSKirill A. Shutemov 	do {
899b46e756fSKirill A. Shutemov 		hpage = alloc_khugepaged_hugepage();
900b46e756fSKirill A. Shutemov 		if (!hpage) {
901b46e756fSKirill A. Shutemov 			count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
902b46e756fSKirill A. Shutemov 			if (!*wait)
903b46e756fSKirill A. Shutemov 				return NULL;
904b46e756fSKirill A. Shutemov 
905b46e756fSKirill A. Shutemov 			*wait = false;
906b46e756fSKirill A. Shutemov 			khugepaged_alloc_sleep();
907b46e756fSKirill A. Shutemov 		} else
908b46e756fSKirill A. Shutemov 			count_vm_event(THP_COLLAPSE_ALLOC);
909b46e756fSKirill A. Shutemov 	} while (unlikely(!hpage) && likely(khugepaged_enabled()));
910b46e756fSKirill A. Shutemov 
911b46e756fSKirill A. Shutemov 	return hpage;
912b46e756fSKirill A. Shutemov }
913b46e756fSKirill A. Shutemov 
914b46e756fSKirill A. Shutemov static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
915b46e756fSKirill A. Shutemov {
916033b5d77SHugh Dickins 	/*
917033b5d77SHugh Dickins 	 * If the hpage allocated earlier was briefly exposed in page cache
918033b5d77SHugh Dickins 	 * before collapse_file() failed, it is possible that racing lookups
919033b5d77SHugh Dickins 	 * have not yet completed, and would then be unpleasantly surprised by
920033b5d77SHugh Dickins 	 * finding the hpage reused for the same mapping at a different offset.
921033b5d77SHugh Dickins 	 * Just release the previous allocation if there is any danger of that.
922033b5d77SHugh Dickins 	 */
923033b5d77SHugh Dickins 	if (*hpage && page_count(*hpage) > 1) {
924033b5d77SHugh Dickins 		put_page(*hpage);
925033b5d77SHugh Dickins 		*hpage = NULL;
926033b5d77SHugh Dickins 	}
927033b5d77SHugh Dickins 
928b46e756fSKirill A. Shutemov 	if (!*hpage)
929b46e756fSKirill A. Shutemov 		*hpage = khugepaged_alloc_hugepage(wait);
930b46e756fSKirill A. Shutemov 
931b46e756fSKirill A. Shutemov 	if (unlikely(!*hpage))
932b46e756fSKirill A. Shutemov 		return false;
933b46e756fSKirill A. Shutemov 
934b46e756fSKirill A. Shutemov 	return true;
935b46e756fSKirill A. Shutemov }
936b46e756fSKirill A. Shutemov 
937b46e756fSKirill A. Shutemov static struct page *
938988ddb71SKirill A. Shutemov khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
939b46e756fSKirill A. Shutemov {
940b46e756fSKirill A. Shutemov 	VM_BUG_ON(!*hpage);
941b46e756fSKirill A. Shutemov 
942b46e756fSKirill A. Shutemov 	return  *hpage;
943b46e756fSKirill A. Shutemov }
944b46e756fSKirill A. Shutemov #endif
945b46e756fSKirill A. Shutemov 
946b46e756fSKirill A. Shutemov /*
947c1e8d7c6SMichel Lespinasse  * If mmap_lock temporarily dropped, revalidate vma
948c1e8d7c6SMichel Lespinasse  * before taking mmap_lock.
949b46e756fSKirill A. Shutemov  * Return 0 if succeeds, otherwise return none-zero
950b46e756fSKirill A. Shutemov  * value (scan code).
951b46e756fSKirill A. Shutemov  */
952b46e756fSKirill A. Shutemov 
953c131f751SKirill A. Shutemov static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
954c131f751SKirill A. Shutemov 		struct vm_area_struct **vmap)
955b46e756fSKirill A. Shutemov {
956b46e756fSKirill A. Shutemov 	struct vm_area_struct *vma;
957b46e756fSKirill A. Shutemov 	unsigned long hstart, hend;
958b46e756fSKirill A. Shutemov 
959b46e756fSKirill A. Shutemov 	if (unlikely(khugepaged_test_exit(mm)))
960b46e756fSKirill A. Shutemov 		return SCAN_ANY_PROCESS;
961b46e756fSKirill A. Shutemov 
962c131f751SKirill A. Shutemov 	*vmap = vma = find_vma(mm, address);
963b46e756fSKirill A. Shutemov 	if (!vma)
964b46e756fSKirill A. Shutemov 		return SCAN_VMA_NULL;
965b46e756fSKirill A. Shutemov 
966b46e756fSKirill A. Shutemov 	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
967b46e756fSKirill A. Shutemov 	hend = vma->vm_end & HPAGE_PMD_MASK;
968b46e756fSKirill A. Shutemov 	if (address < hstart || address + HPAGE_PMD_SIZE > hend)
969b46e756fSKirill A. Shutemov 		return SCAN_ADDRESS_RANGE;
97050f8b92fSSong Liu 	if (!hugepage_vma_check(vma, vma->vm_flags))
971b46e756fSKirill A. Shutemov 		return SCAN_VMA_CHECK;
972594cced1SKirill A. Shutemov 	/* Anon VMA expected */
97325fa414aSxu xin 	if (!vma->anon_vma || !vma_is_anonymous(vma))
974594cced1SKirill A. Shutemov 		return SCAN_VMA_CHECK;
975b46e756fSKirill A. Shutemov 	return 0;
976b46e756fSKirill A. Shutemov }
977b46e756fSKirill A. Shutemov 
978b46e756fSKirill A. Shutemov /*
979b46e756fSKirill A. Shutemov  * Bring missing pages in from swap, to complete THP collapse.
980b46e756fSKirill A. Shutemov  * Only done if khugepaged_scan_pmd believes it is worthwhile.
981b46e756fSKirill A. Shutemov  *
982b46e756fSKirill A. Shutemov  * Called and returns without pte mapped or spinlocks held,
983c1e8d7c6SMichel Lespinasse  * but with mmap_lock held to protect against vma changes.
984b46e756fSKirill A. Shutemov  */
985b46e756fSKirill A. Shutemov 
986b46e756fSKirill A. Shutemov static bool __collapse_huge_page_swapin(struct mm_struct *mm,
987b46e756fSKirill A. Shutemov 					struct vm_area_struct *vma,
9882b635dd3SWill Deacon 					unsigned long haddr, pmd_t *pmd,
9890db501f7SEbru Akagunduz 					int referenced)
990b46e756fSKirill A. Shutemov {
9912b740303SSouptick Joarder 	int swapped_in = 0;
9922b740303SSouptick Joarder 	vm_fault_t ret = 0;
9932b635dd3SWill Deacon 	unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
9942b635dd3SWill Deacon 
9952b635dd3SWill Deacon 	for (address = haddr; address < end; address += PAGE_SIZE) {
99682b0f8c3SJan Kara 		struct vm_fault vmf = {
997b46e756fSKirill A. Shutemov 			.vma = vma,
998b46e756fSKirill A. Shutemov 			.address = address,
9992b635dd3SWill Deacon 			.pgoff = linear_page_index(vma, haddr),
1000b46e756fSKirill A. Shutemov 			.flags = FAULT_FLAG_ALLOW_RETRY,
1001b46e756fSKirill A. Shutemov 			.pmd = pmd,
1002b46e756fSKirill A. Shutemov 		};
1003b46e756fSKirill A. Shutemov 
100482b0f8c3SJan Kara 		vmf.pte = pte_offset_map(pmd, address);
10052994302bSJan Kara 		vmf.orig_pte = *vmf.pte;
10062b635dd3SWill Deacon 		if (!is_swap_pte(vmf.orig_pte)) {
10072b635dd3SWill Deacon 			pte_unmap(vmf.pte);
1008b46e756fSKirill A. Shutemov 			continue;
10092b635dd3SWill Deacon 		}
1010b46e756fSKirill A. Shutemov 		swapped_in++;
10112994302bSJan Kara 		ret = do_swap_page(&vmf);
10120db501f7SEbru Akagunduz 
1013c1e8d7c6SMichel Lespinasse 		/* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */
1014b46e756fSKirill A. Shutemov 		if (ret & VM_FAULT_RETRY) {
1015d8ed45c5SMichel Lespinasse 			mmap_read_lock(mm);
10162b635dd3SWill Deacon 			if (hugepage_vma_revalidate(mm, haddr, &vma)) {
1017b46e756fSKirill A. Shutemov 				/* vma is no longer available, don't continue to swapin */
10180db501f7SEbru Akagunduz 				trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1019b46e756fSKirill A. Shutemov 				return false;
102047f863eaSEbru Akagunduz 			}
1021b46e756fSKirill A. Shutemov 			/* check if the pmd is still valid */
10222b635dd3SWill Deacon 			if (mm_find_pmd(mm, haddr) != pmd) {
1023835152a2SSeongJae Park 				trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1024b46e756fSKirill A. Shutemov 				return false;
1025b46e756fSKirill A. Shutemov 			}
1026835152a2SSeongJae Park 		}
1027b46e756fSKirill A. Shutemov 		if (ret & VM_FAULT_ERROR) {
10280db501f7SEbru Akagunduz 			trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1029b46e756fSKirill A. Shutemov 			return false;
1030b46e756fSKirill A. Shutemov 		}
1031b46e756fSKirill A. Shutemov 	}
1032ae2c5d80SKirill A. Shutemov 
1033ae2c5d80SKirill A. Shutemov 	/* Drain LRU add pagevec to remove extra pin on the swapped in pages */
1034ae2c5d80SKirill A. Shutemov 	if (swapped_in)
1035ae2c5d80SKirill A. Shutemov 		lru_add_drain();
1036ae2c5d80SKirill A. Shutemov 
10370db501f7SEbru Akagunduz 	trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
1038b46e756fSKirill A. Shutemov 	return true;
1039b46e756fSKirill A. Shutemov }
1040b46e756fSKirill A. Shutemov 
1041b46e756fSKirill A. Shutemov static void collapse_huge_page(struct mm_struct *mm,
1042b46e756fSKirill A. Shutemov 				   unsigned long address,
1043b46e756fSKirill A. Shutemov 				   struct page **hpage,
1044ffe945e6SKirill A. Shutemov 				   int node, int referenced, int unmapped)
1045b46e756fSKirill A. Shutemov {
10465503fbf2SKirill A. Shutemov 	LIST_HEAD(compound_pagelist);
1047b46e756fSKirill A. Shutemov 	pmd_t *pmd, _pmd;
1048b46e756fSKirill A. Shutemov 	pte_t *pte;
1049b46e756fSKirill A. Shutemov 	pgtable_t pgtable;
1050b46e756fSKirill A. Shutemov 	struct page *new_page;
1051b46e756fSKirill A. Shutemov 	spinlock_t *pmd_ptl, *pte_ptl;
1052b46e756fSKirill A. Shutemov 	int isolated = 0, result = 0;
1053c131f751SKirill A. Shutemov 	struct vm_area_struct *vma;
1054ac46d4f3SJérôme Glisse 	struct mmu_notifier_range range;
1055b46e756fSKirill A. Shutemov 	gfp_t gfp;
1056b46e756fSKirill A. Shutemov 
1057b46e756fSKirill A. Shutemov 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1058b46e756fSKirill A. Shutemov 
1059b46e756fSKirill A. Shutemov 	/* Only allocate from the target node */
106041b6167eSMichal Hocko 	gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1061b46e756fSKirill A. Shutemov 
1062988ddb71SKirill A. Shutemov 	/*
1063c1e8d7c6SMichel Lespinasse 	 * Before allocating the hugepage, release the mmap_lock read lock.
1064988ddb71SKirill A. Shutemov 	 * The allocation can take potentially a long time if it involves
1065c1e8d7c6SMichel Lespinasse 	 * sync compaction, and we do not need to hold the mmap_lock during
1066988ddb71SKirill A. Shutemov 	 * that. We will recheck the vma after taking it again in write mode.
1067988ddb71SKirill A. Shutemov 	 */
1068d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
1069988ddb71SKirill A. Shutemov 	new_page = khugepaged_alloc_page(hpage, gfp, node);
1070b46e756fSKirill A. Shutemov 	if (!new_page) {
1071b46e756fSKirill A. Shutemov 		result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1072b46e756fSKirill A. Shutemov 		goto out_nolock;
1073b46e756fSKirill A. Shutemov 	}
1074b46e756fSKirill A. Shutemov 
10758f425e4eSMatthew Wilcox (Oracle) 	if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) {
1076b46e756fSKirill A. Shutemov 		result = SCAN_CGROUP_CHARGE_FAIL;
1077b46e756fSKirill A. Shutemov 		goto out_nolock;
1078b46e756fSKirill A. Shutemov 	}
10799d82c694SJohannes Weiner 	count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
1080b46e756fSKirill A. Shutemov 
1081d8ed45c5SMichel Lespinasse 	mmap_read_lock(mm);
1082c131f751SKirill A. Shutemov 	result = hugepage_vma_revalidate(mm, address, &vma);
1083b46e756fSKirill A. Shutemov 	if (result) {
1084d8ed45c5SMichel Lespinasse 		mmap_read_unlock(mm);
1085b46e756fSKirill A. Shutemov 		goto out_nolock;
1086b46e756fSKirill A. Shutemov 	}
1087b46e756fSKirill A. Shutemov 
1088b46e756fSKirill A. Shutemov 	pmd = mm_find_pmd(mm, address);
1089b46e756fSKirill A. Shutemov 	if (!pmd) {
1090b46e756fSKirill A. Shutemov 		result = SCAN_PMD_NULL;
1091d8ed45c5SMichel Lespinasse 		mmap_read_unlock(mm);
1092b46e756fSKirill A. Shutemov 		goto out_nolock;
1093b46e756fSKirill A. Shutemov 	}
1094b46e756fSKirill A. Shutemov 
1095b46e756fSKirill A. Shutemov 	/*
1096c1e8d7c6SMichel Lespinasse 	 * __collapse_huge_page_swapin always returns with mmap_lock locked.
1097c1e8d7c6SMichel Lespinasse 	 * If it fails, we release mmap_lock and jump out_nolock.
1098b46e756fSKirill A. Shutemov 	 * Continuing to collapse causes inconsistency.
1099b46e756fSKirill A. Shutemov 	 */
1100ffe945e6SKirill A. Shutemov 	if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
1101ffe945e6SKirill A. Shutemov 						     pmd, referenced)) {
1102d8ed45c5SMichel Lespinasse 		mmap_read_unlock(mm);
1103b46e756fSKirill A. Shutemov 		goto out_nolock;
1104b46e756fSKirill A. Shutemov 	}
1105b46e756fSKirill A. Shutemov 
1106d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
1107b46e756fSKirill A. Shutemov 	/*
1108b46e756fSKirill A. Shutemov 	 * Prevent all access to pagetables with the exception of
1109b46e756fSKirill A. Shutemov 	 * gup_fast later handled by the ptep_clear_flush and the VM
1110b46e756fSKirill A. Shutemov 	 * handled by the anon_vma lock + PG_lock.
1111b46e756fSKirill A. Shutemov 	 */
1112d8ed45c5SMichel Lespinasse 	mmap_write_lock(mm);
1113c131f751SKirill A. Shutemov 	result = hugepage_vma_revalidate(mm, address, &vma);
1114b46e756fSKirill A. Shutemov 	if (result)
111518d24a7cSMiaohe Lin 		goto out_up_write;
1116b46e756fSKirill A. Shutemov 	/* check if the pmd is still valid */
1117b46e756fSKirill A. Shutemov 	if (mm_find_pmd(mm, address) != pmd)
111818d24a7cSMiaohe Lin 		goto out_up_write;
1119b46e756fSKirill A. Shutemov 
1120b46e756fSKirill A. Shutemov 	anon_vma_lock_write(vma->anon_vma);
1121b46e756fSKirill A. Shutemov 
11227269f999SJérôme Glisse 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
11236f4f13e8SJérôme Glisse 				address, address + HPAGE_PMD_SIZE);
1124ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_start(&range);
1125ec649c9dSVille Syrjälä 
1126ec649c9dSVille Syrjälä 	pte = pte_offset_map(pmd, address);
1127ec649c9dSVille Syrjälä 	pte_ptl = pte_lockptr(mm, pmd);
1128ec649c9dSVille Syrjälä 
1129b46e756fSKirill A. Shutemov 	pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1130b46e756fSKirill A. Shutemov 	/*
1131b46e756fSKirill A. Shutemov 	 * After this gup_fast can't run anymore. This also removes
1132b46e756fSKirill A. Shutemov 	 * any huge TLB entry from the CPU so we won't allow
1133b46e756fSKirill A. Shutemov 	 * huge and small TLB entries for the same virtual address
1134b46e756fSKirill A. Shutemov 	 * to avoid the risk of CPU bugs in that area.
1135b46e756fSKirill A. Shutemov 	 */
1136b46e756fSKirill A. Shutemov 	_pmd = pmdp_collapse_flush(vma, address, pmd);
1137b46e756fSKirill A. Shutemov 	spin_unlock(pmd_ptl);
1138ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_end(&range);
1139b46e756fSKirill A. Shutemov 
1140b46e756fSKirill A. Shutemov 	spin_lock(pte_ptl);
11415503fbf2SKirill A. Shutemov 	isolated = __collapse_huge_page_isolate(vma, address, pte,
11425503fbf2SKirill A. Shutemov 			&compound_pagelist);
1143b46e756fSKirill A. Shutemov 	spin_unlock(pte_ptl);
1144b46e756fSKirill A. Shutemov 
1145b46e756fSKirill A. Shutemov 	if (unlikely(!isolated)) {
1146b46e756fSKirill A. Shutemov 		pte_unmap(pte);
1147b46e756fSKirill A. Shutemov 		spin_lock(pmd_ptl);
1148b46e756fSKirill A. Shutemov 		BUG_ON(!pmd_none(*pmd));
1149b46e756fSKirill A. Shutemov 		/*
1150b46e756fSKirill A. Shutemov 		 * We can only use set_pmd_at when establishing
1151b46e756fSKirill A. Shutemov 		 * hugepmds and never for establishing regular pmds that
1152b46e756fSKirill A. Shutemov 		 * points to regular pagetables. Use pmd_populate for that
1153b46e756fSKirill A. Shutemov 		 */
1154b46e756fSKirill A. Shutemov 		pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1155b46e756fSKirill A. Shutemov 		spin_unlock(pmd_ptl);
1156b46e756fSKirill A. Shutemov 		anon_vma_unlock_write(vma->anon_vma);
1157b46e756fSKirill A. Shutemov 		result = SCAN_FAIL;
115818d24a7cSMiaohe Lin 		goto out_up_write;
1159b46e756fSKirill A. Shutemov 	}
1160b46e756fSKirill A. Shutemov 
1161b46e756fSKirill A. Shutemov 	/*
1162b46e756fSKirill A. Shutemov 	 * All pages are isolated and locked so anon_vma rmap
1163b46e756fSKirill A. Shutemov 	 * can't run anymore.
1164b46e756fSKirill A. Shutemov 	 */
1165b46e756fSKirill A. Shutemov 	anon_vma_unlock_write(vma->anon_vma);
1166b46e756fSKirill A. Shutemov 
11675503fbf2SKirill A. Shutemov 	__collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl,
11685503fbf2SKirill A. Shutemov 			&compound_pagelist);
1169b46e756fSKirill A. Shutemov 	pte_unmap(pte);
1170588d01f9SMiaohe Lin 	/*
1171588d01f9SMiaohe Lin 	 * spin_lock() below is not the equivalent of smp_wmb(), but
1172588d01f9SMiaohe Lin 	 * the smp_wmb() inside __SetPageUptodate() can be reused to
1173588d01f9SMiaohe Lin 	 * avoid the copy_huge_page writes to become visible after
1174588d01f9SMiaohe Lin 	 * the set_pmd_at() write.
1175588d01f9SMiaohe Lin 	 */
1176b46e756fSKirill A. Shutemov 	__SetPageUptodate(new_page);
1177b46e756fSKirill A. Shutemov 	pgtable = pmd_pgtable(_pmd);
1178b46e756fSKirill A. Shutemov 
1179b46e756fSKirill A. Shutemov 	_pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1180f55e1014SLinus Torvalds 	_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1181b46e756fSKirill A. Shutemov 
1182b46e756fSKirill A. Shutemov 	spin_lock(pmd_ptl);
1183b46e756fSKirill A. Shutemov 	BUG_ON(!pmd_none(*pmd));
118440f2bbf7SDavid Hildenbrand 	page_add_new_anon_rmap(new_page, vma, address);
1185b518154eSJoonsoo Kim 	lru_cache_add_inactive_or_unevictable(new_page, vma);
1186b46e756fSKirill A. Shutemov 	pgtable_trans_huge_deposit(mm, pmd, pgtable);
1187b46e756fSKirill A. Shutemov 	set_pmd_at(mm, address, pmd, _pmd);
1188b46e756fSKirill A. Shutemov 	update_mmu_cache_pmd(vma, address, pmd);
1189b46e756fSKirill A. Shutemov 	spin_unlock(pmd_ptl);
1190b46e756fSKirill A. Shutemov 
1191b46e756fSKirill A. Shutemov 	*hpage = NULL;
1192b46e756fSKirill A. Shutemov 
1193b46e756fSKirill A. Shutemov 	khugepaged_pages_collapsed++;
1194b46e756fSKirill A. Shutemov 	result = SCAN_SUCCEED;
1195b46e756fSKirill A. Shutemov out_up_write:
1196d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
1197b46e756fSKirill A. Shutemov out_nolock:
11989d82c694SJohannes Weiner 	if (!IS_ERR_OR_NULL(*hpage))
1199bbc6b703SMatthew Wilcox (Oracle) 		mem_cgroup_uncharge(page_folio(*hpage));
1200b46e756fSKirill A. Shutemov 	trace_mm_collapse_huge_page(mm, isolated, result);
1201b46e756fSKirill A. Shutemov 	return;
1202b46e756fSKirill A. Shutemov }
1203b46e756fSKirill A. Shutemov 
1204b46e756fSKirill A. Shutemov static int khugepaged_scan_pmd(struct mm_struct *mm,
1205b46e756fSKirill A. Shutemov 			       struct vm_area_struct *vma,
1206b46e756fSKirill A. Shutemov 			       unsigned long address,
1207b46e756fSKirill A. Shutemov 			       struct page **hpage)
1208b46e756fSKirill A. Shutemov {
1209b46e756fSKirill A. Shutemov 	pmd_t *pmd;
1210b46e756fSKirill A. Shutemov 	pte_t *pte, *_pte;
121171a2c112SKirill A. Shutemov 	int ret = 0, result = 0, referenced = 0;
121271a2c112SKirill A. Shutemov 	int none_or_zero = 0, shared = 0;
1213b46e756fSKirill A. Shutemov 	struct page *page = NULL;
1214b46e756fSKirill A. Shutemov 	unsigned long _address;
1215b46e756fSKirill A. Shutemov 	spinlock_t *ptl;
1216b46e756fSKirill A. Shutemov 	int node = NUMA_NO_NODE, unmapped = 0;
12170db501f7SEbru Akagunduz 	bool writable = false;
1218b46e756fSKirill A. Shutemov 
1219b46e756fSKirill A. Shutemov 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1220b46e756fSKirill A. Shutemov 
1221b46e756fSKirill A. Shutemov 	pmd = mm_find_pmd(mm, address);
1222b46e756fSKirill A. Shutemov 	if (!pmd) {
1223b46e756fSKirill A. Shutemov 		result = SCAN_PMD_NULL;
1224b46e756fSKirill A. Shutemov 		goto out;
1225b46e756fSKirill A. Shutemov 	}
1226b46e756fSKirill A. Shutemov 
1227b46e756fSKirill A. Shutemov 	memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1228b46e756fSKirill A. Shutemov 	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1229b46e756fSKirill A. Shutemov 	for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1230b46e756fSKirill A. Shutemov 	     _pte++, _address += PAGE_SIZE) {
1231b46e756fSKirill A. Shutemov 		pte_t pteval = *_pte;
1232b46e756fSKirill A. Shutemov 		if (is_swap_pte(pteval)) {
1233b46e756fSKirill A. Shutemov 			if (++unmapped <= khugepaged_max_ptes_swap) {
1234e1e267c7SPeter Xu 				/*
1235e1e267c7SPeter Xu 				 * Always be strict with uffd-wp
1236e1e267c7SPeter Xu 				 * enabled swap entries.  Please see
1237e1e267c7SPeter Xu 				 * comment below for pte_uffd_wp().
1238e1e267c7SPeter Xu 				 */
1239e1e267c7SPeter Xu 				if (pte_swp_uffd_wp(pteval)) {
1240e1e267c7SPeter Xu 					result = SCAN_PTE_UFFD_WP;
1241e1e267c7SPeter Xu 					goto out_unmap;
1242e1e267c7SPeter Xu 				}
1243b46e756fSKirill A. Shutemov 				continue;
1244b46e756fSKirill A. Shutemov 			} else {
1245b46e756fSKirill A. Shutemov 				result = SCAN_EXCEED_SWAP_PTE;
1246e9ea874aSYang Yang 				count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
1247b46e756fSKirill A. Shutemov 				goto out_unmap;
1248b46e756fSKirill A. Shutemov 			}
1249b46e756fSKirill A. Shutemov 		}
1250b46e756fSKirill A. Shutemov 		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1251b46e756fSKirill A. Shutemov 			if (!userfaultfd_armed(vma) &&
1252b46e756fSKirill A. Shutemov 			    ++none_or_zero <= khugepaged_max_ptes_none) {
1253b46e756fSKirill A. Shutemov 				continue;
1254b46e756fSKirill A. Shutemov 			} else {
1255b46e756fSKirill A. Shutemov 				result = SCAN_EXCEED_NONE_PTE;
1256e9ea874aSYang Yang 				count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
1257b46e756fSKirill A. Shutemov 				goto out_unmap;
1258b46e756fSKirill A. Shutemov 			}
1259b46e756fSKirill A. Shutemov 		}
1260e1e267c7SPeter Xu 		if (pte_uffd_wp(pteval)) {
1261e1e267c7SPeter Xu 			/*
1262e1e267c7SPeter Xu 			 * Don't collapse the page if any of the small
1263e1e267c7SPeter Xu 			 * PTEs are armed with uffd write protection.
1264e1e267c7SPeter Xu 			 * Here we can also mark the new huge pmd as
1265e1e267c7SPeter Xu 			 * write protected if any of the small ones is
12668958b249SHaitao Shi 			 * marked but that could bring unknown
1267e1e267c7SPeter Xu 			 * userfault messages that falls outside of
1268e1e267c7SPeter Xu 			 * the registered range.  So, just be simple.
1269e1e267c7SPeter Xu 			 */
1270e1e267c7SPeter Xu 			result = SCAN_PTE_UFFD_WP;
1271e1e267c7SPeter Xu 			goto out_unmap;
1272e1e267c7SPeter Xu 		}
1273b46e756fSKirill A. Shutemov 		if (pte_write(pteval))
1274b46e756fSKirill A. Shutemov 			writable = true;
1275b46e756fSKirill A. Shutemov 
1276b46e756fSKirill A. Shutemov 		page = vm_normal_page(vma, _address, pteval);
1277b46e756fSKirill A. Shutemov 		if (unlikely(!page)) {
1278b46e756fSKirill A. Shutemov 			result = SCAN_PAGE_NULL;
1279b46e756fSKirill A. Shutemov 			goto out_unmap;
1280b46e756fSKirill A. Shutemov 		}
1281b46e756fSKirill A. Shutemov 
128271a2c112SKirill A. Shutemov 		if (page_mapcount(page) > 1 &&
128371a2c112SKirill A. Shutemov 				++shared > khugepaged_max_ptes_shared) {
128471a2c112SKirill A. Shutemov 			result = SCAN_EXCEED_SHARED_PTE;
1285e9ea874aSYang Yang 			count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
128671a2c112SKirill A. Shutemov 			goto out_unmap;
128771a2c112SKirill A. Shutemov 		}
128871a2c112SKirill A. Shutemov 
12895503fbf2SKirill A. Shutemov 		page = compound_head(page);
1290b46e756fSKirill A. Shutemov 
1291b46e756fSKirill A. Shutemov 		/*
1292b46e756fSKirill A. Shutemov 		 * Record which node the original page is from and save this
1293b46e756fSKirill A. Shutemov 		 * information to khugepaged_node_load[].
12940b8f0d87SQuanfa Fu 		 * Khugepaged will allocate hugepage from the node has the max
1295b46e756fSKirill A. Shutemov 		 * hit record.
1296b46e756fSKirill A. Shutemov 		 */
1297b46e756fSKirill A. Shutemov 		node = page_to_nid(page);
1298b46e756fSKirill A. Shutemov 		if (khugepaged_scan_abort(node)) {
1299b46e756fSKirill A. Shutemov 			result = SCAN_SCAN_ABORT;
1300b46e756fSKirill A. Shutemov 			goto out_unmap;
1301b46e756fSKirill A. Shutemov 		}
1302b46e756fSKirill A. Shutemov 		khugepaged_node_load[node]++;
1303b46e756fSKirill A. Shutemov 		if (!PageLRU(page)) {
1304b46e756fSKirill A. Shutemov 			result = SCAN_PAGE_LRU;
1305b46e756fSKirill A. Shutemov 			goto out_unmap;
1306b46e756fSKirill A. Shutemov 		}
1307b46e756fSKirill A. Shutemov 		if (PageLocked(page)) {
1308b46e756fSKirill A. Shutemov 			result = SCAN_PAGE_LOCK;
1309b46e756fSKirill A. Shutemov 			goto out_unmap;
1310b46e756fSKirill A. Shutemov 		}
1311b46e756fSKirill A. Shutemov 		if (!PageAnon(page)) {
1312b46e756fSKirill A. Shutemov 			result = SCAN_PAGE_ANON;
1313b46e756fSKirill A. Shutemov 			goto out_unmap;
1314b46e756fSKirill A. Shutemov 		}
1315b46e756fSKirill A. Shutemov 
1316b46e756fSKirill A. Shutemov 		/*
13179445689fSKirill A. Shutemov 		 * Check if the page has any GUP (or other external) pins.
13189445689fSKirill A. Shutemov 		 *
13199445689fSKirill A. Shutemov 		 * Here the check is racy it may see totmal_mapcount > refcount
13209445689fSKirill A. Shutemov 		 * in some cases.
13219445689fSKirill A. Shutemov 		 * For example, one process with one forked child process.
13229445689fSKirill A. Shutemov 		 * The parent has the PMD split due to MADV_DONTNEED, then
13239445689fSKirill A. Shutemov 		 * the child is trying unmap the whole PMD, but khugepaged
13249445689fSKirill A. Shutemov 		 * may be scanning the parent between the child has
13259445689fSKirill A. Shutemov 		 * PageDoubleMap flag cleared and dec the mapcount.  So
13269445689fSKirill A. Shutemov 		 * khugepaged may see total_mapcount > refcount.
13279445689fSKirill A. Shutemov 		 *
13289445689fSKirill A. Shutemov 		 * But such case is ephemeral we could always retry collapse
13299445689fSKirill A. Shutemov 		 * later.  However it may report false positive if the page
13309445689fSKirill A. Shutemov 		 * has excessive GUP pins (i.e. 512).  Anyway the same check
13319445689fSKirill A. Shutemov 		 * will be done again later the risk seems low.
1332b46e756fSKirill A. Shutemov 		 */
13339445689fSKirill A. Shutemov 		if (!is_refcount_suitable(page)) {
1334b46e756fSKirill A. Shutemov 			result = SCAN_PAGE_COUNT;
1335b46e756fSKirill A. Shutemov 			goto out_unmap;
1336b46e756fSKirill A. Shutemov 		}
1337b46e756fSKirill A. Shutemov 		if (pte_young(pteval) ||
1338b46e756fSKirill A. Shutemov 		    page_is_young(page) || PageReferenced(page) ||
1339b46e756fSKirill A. Shutemov 		    mmu_notifier_test_young(vma->vm_mm, address))
13400db501f7SEbru Akagunduz 			referenced++;
1341b46e756fSKirill A. Shutemov 	}
1342ffe945e6SKirill A. Shutemov 	if (!writable) {
1343ffe945e6SKirill A. Shutemov 		result = SCAN_PAGE_RO;
1344ffe945e6SKirill A. Shutemov 	} else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) {
1345ffe945e6SKirill A. Shutemov 		result = SCAN_LACK_REFERENCED_PAGE;
1346ffe945e6SKirill A. Shutemov 	} else {
1347b46e756fSKirill A. Shutemov 		result = SCAN_SUCCEED;
1348b46e756fSKirill A. Shutemov 		ret = 1;
1349b46e756fSKirill A. Shutemov 	}
1350b46e756fSKirill A. Shutemov out_unmap:
1351b46e756fSKirill A. Shutemov 	pte_unmap_unlock(pte, ptl);
1352b46e756fSKirill A. Shutemov 	if (ret) {
1353b46e756fSKirill A. Shutemov 		node = khugepaged_find_target_node();
1354c1e8d7c6SMichel Lespinasse 		/* collapse_huge_page will return with the mmap_lock released */
1355ffe945e6SKirill A. Shutemov 		collapse_huge_page(mm, address, hpage, node,
1356ffe945e6SKirill A. Shutemov 				referenced, unmapped);
1357b46e756fSKirill A. Shutemov 	}
1358b46e756fSKirill A. Shutemov out:
1359b46e756fSKirill A. Shutemov 	trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1360b46e756fSKirill A. Shutemov 				     none_or_zero, result, unmapped);
1361b46e756fSKirill A. Shutemov 	return ret;
1362b46e756fSKirill A. Shutemov }
1363b46e756fSKirill A. Shutemov 
1364b46e756fSKirill A. Shutemov static void collect_mm_slot(struct mm_slot *mm_slot)
1365b46e756fSKirill A. Shutemov {
1366b46e756fSKirill A. Shutemov 	struct mm_struct *mm = mm_slot->mm;
1367b46e756fSKirill A. Shutemov 
136835f3aa39SLance Roy 	lockdep_assert_held(&khugepaged_mm_lock);
1369b46e756fSKirill A. Shutemov 
1370b46e756fSKirill A. Shutemov 	if (khugepaged_test_exit(mm)) {
1371b46e756fSKirill A. Shutemov 		/* free mm_slot */
1372b46e756fSKirill A. Shutemov 		hash_del(&mm_slot->hash);
1373b46e756fSKirill A. Shutemov 		list_del(&mm_slot->mm_node);
1374b46e756fSKirill A. Shutemov 
1375b46e756fSKirill A. Shutemov 		/*
1376b46e756fSKirill A. Shutemov 		 * Not strictly needed because the mm exited already.
1377b46e756fSKirill A. Shutemov 		 *
1378b46e756fSKirill A. Shutemov 		 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1379b46e756fSKirill A. Shutemov 		 */
1380b46e756fSKirill A. Shutemov 
1381b46e756fSKirill A. Shutemov 		/* khugepaged_mm_lock actually not necessary for the below */
1382b46e756fSKirill A. Shutemov 		free_mm_slot(mm_slot);
1383b46e756fSKirill A. Shutemov 		mmdrop(mm);
1384b46e756fSKirill A. Shutemov 	}
1385b46e756fSKirill A. Shutemov }
1386b46e756fSKirill A. Shutemov 
1387396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_SHMEM
138827e1f827SSong Liu /*
138927e1f827SSong Liu  * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
139027e1f827SSong Liu  * khugepaged should try to collapse the page table.
139127e1f827SSong Liu  */
139227e1f827SSong Liu static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
139327e1f827SSong Liu 					 unsigned long addr)
139427e1f827SSong Liu {
139527e1f827SSong Liu 	struct mm_slot *mm_slot;
139627e1f827SSong Liu 
139727e1f827SSong Liu 	VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
139827e1f827SSong Liu 
139927e1f827SSong Liu 	spin_lock(&khugepaged_mm_lock);
140027e1f827SSong Liu 	mm_slot = get_mm_slot(mm);
140127e1f827SSong Liu 	if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
140227e1f827SSong Liu 		mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
140327e1f827SSong Liu 	spin_unlock(&khugepaged_mm_lock);
140427e1f827SSong Liu 	return 0;
140527e1f827SSong Liu }
140627e1f827SSong Liu 
1407e59a47b8SPasha Tatashin static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
1408e59a47b8SPasha Tatashin 				  unsigned long addr, pmd_t *pmdp)
1409e59a47b8SPasha Tatashin {
1410e59a47b8SPasha Tatashin 	spinlock_t *ptl;
1411e59a47b8SPasha Tatashin 	pmd_t pmd;
1412e59a47b8SPasha Tatashin 
141380110bbfSPasha Tatashin 	mmap_assert_write_locked(mm);
1414e59a47b8SPasha Tatashin 	ptl = pmd_lock(vma->vm_mm, pmdp);
1415e59a47b8SPasha Tatashin 	pmd = pmdp_collapse_flush(vma, addr, pmdp);
1416e59a47b8SPasha Tatashin 	spin_unlock(ptl);
1417e59a47b8SPasha Tatashin 	mm_dec_nr_ptes(mm);
141880110bbfSPasha Tatashin 	page_table_check_pte_clear_range(mm, addr, pmd);
1419e59a47b8SPasha Tatashin 	pte_free(mm, pmd_pgtable(pmd));
1420e59a47b8SPasha Tatashin }
1421e59a47b8SPasha Tatashin 
142227e1f827SSong Liu /**
1423336e6b53SAlex Shi  * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1424336e6b53SAlex Shi  * address haddr.
1425336e6b53SAlex Shi  *
1426336e6b53SAlex Shi  * @mm: process address space where collapse happens
1427336e6b53SAlex Shi  * @addr: THP collapse address
142827e1f827SSong Liu  *
142927e1f827SSong Liu  * This function checks whether all the PTEs in the PMD are pointing to the
143027e1f827SSong Liu  * right THP. If so, retract the page table so the THP can refault in with
143127e1f827SSong Liu  * as pmd-mapped.
143227e1f827SSong Liu  */
143327e1f827SSong Liu void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
143427e1f827SSong Liu {
143527e1f827SSong Liu 	unsigned long haddr = addr & HPAGE_PMD_MASK;
143627e1f827SSong Liu 	struct vm_area_struct *vma = find_vma(mm, haddr);
1437119a5fc1SHugh Dickins 	struct page *hpage;
143827e1f827SSong Liu 	pte_t *start_pte, *pte;
1439e59a47b8SPasha Tatashin 	pmd_t *pmd;
144027e1f827SSong Liu 	spinlock_t *ptl;
144127e1f827SSong Liu 	int count = 0;
144227e1f827SSong Liu 	int i;
144327e1f827SSong Liu 
144427e1f827SSong Liu 	if (!vma || !vma->vm_file ||
1445fef792a4SMiaohe Lin 	    !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
144627e1f827SSong Liu 		return;
144727e1f827SSong Liu 
144827e1f827SSong Liu 	/*
144927e1f827SSong Liu 	 * This vm_flags may not have VM_HUGEPAGE if the page was not
145027e1f827SSong Liu 	 * collapsed by this mm. But we can still collapse if the page is
145127e1f827SSong Liu 	 * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
145227e1f827SSong Liu 	 * will not fail the vma for missing VM_HUGEPAGE
145327e1f827SSong Liu 	 */
145427e1f827SSong Liu 	if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
145527e1f827SSong Liu 		return;
145627e1f827SSong Liu 
1457deb4c93aSPeter Xu 	/* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
1458deb4c93aSPeter Xu 	if (userfaultfd_wp(vma))
1459deb4c93aSPeter Xu 		return;
1460deb4c93aSPeter Xu 
1461119a5fc1SHugh Dickins 	hpage = find_lock_page(vma->vm_file->f_mapping,
1462119a5fc1SHugh Dickins 			       linear_page_index(vma, haddr));
1463119a5fc1SHugh Dickins 	if (!hpage)
1464119a5fc1SHugh Dickins 		return;
1465119a5fc1SHugh Dickins 
1466119a5fc1SHugh Dickins 	if (!PageHead(hpage))
1467119a5fc1SHugh Dickins 		goto drop_hpage;
1468119a5fc1SHugh Dickins 
146927e1f827SSong Liu 	pmd = mm_find_pmd(mm, haddr);
147027e1f827SSong Liu 	if (!pmd)
1471119a5fc1SHugh Dickins 		goto drop_hpage;
147227e1f827SSong Liu 
147327e1f827SSong Liu 	start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
147427e1f827SSong Liu 
147527e1f827SSong Liu 	/* step 1: check all mapped PTEs are to the right huge page */
147627e1f827SSong Liu 	for (i = 0, addr = haddr, pte = start_pte;
147727e1f827SSong Liu 	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
147827e1f827SSong Liu 		struct page *page;
147927e1f827SSong Liu 
148027e1f827SSong Liu 		/* empty pte, skip */
148127e1f827SSong Liu 		if (pte_none(*pte))
148227e1f827SSong Liu 			continue;
148327e1f827SSong Liu 
148427e1f827SSong Liu 		/* page swapped out, abort */
148527e1f827SSong Liu 		if (!pte_present(*pte))
148627e1f827SSong Liu 			goto abort;
148727e1f827SSong Liu 
148827e1f827SSong Liu 		page = vm_normal_page(vma, addr, *pte);
148927e1f827SSong Liu 
149027e1f827SSong Liu 		/*
1491119a5fc1SHugh Dickins 		 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1492119a5fc1SHugh Dickins 		 * page table, but the new page will not be a subpage of hpage.
149327e1f827SSong Liu 		 */
1494119a5fc1SHugh Dickins 		if (hpage + i != page)
149527e1f827SSong Liu 			goto abort;
149627e1f827SSong Liu 		count++;
149727e1f827SSong Liu 	}
149827e1f827SSong Liu 
149927e1f827SSong Liu 	/* step 2: adjust rmap */
150027e1f827SSong Liu 	for (i = 0, addr = haddr, pte = start_pte;
150127e1f827SSong Liu 	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
150227e1f827SSong Liu 		struct page *page;
150327e1f827SSong Liu 
150427e1f827SSong Liu 		if (pte_none(*pte))
150527e1f827SSong Liu 			continue;
150627e1f827SSong Liu 		page = vm_normal_page(vma, addr, *pte);
1507cea86fe2SHugh Dickins 		page_remove_rmap(page, vma, false);
150827e1f827SSong Liu 	}
150927e1f827SSong Liu 
151027e1f827SSong Liu 	pte_unmap_unlock(start_pte, ptl);
151127e1f827SSong Liu 
151227e1f827SSong Liu 	/* step 3: set proper refcount and mm_counters. */
1513119a5fc1SHugh Dickins 	if (count) {
151427e1f827SSong Liu 		page_ref_sub(hpage, count);
151527e1f827SSong Liu 		add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
151627e1f827SSong Liu 	}
151727e1f827SSong Liu 
151827e1f827SSong Liu 	/* step 4: collapse pmd */
1519e59a47b8SPasha Tatashin 	collapse_and_free_pmd(mm, vma, haddr, pmd);
1520119a5fc1SHugh Dickins drop_hpage:
1521119a5fc1SHugh Dickins 	unlock_page(hpage);
1522119a5fc1SHugh Dickins 	put_page(hpage);
152327e1f827SSong Liu 	return;
152427e1f827SSong Liu 
152527e1f827SSong Liu abort:
152627e1f827SSong Liu 	pte_unmap_unlock(start_pte, ptl);
1527119a5fc1SHugh Dickins 	goto drop_hpage;
152827e1f827SSong Liu }
152927e1f827SSong Liu 
15300edf61e5SMiaohe Lin static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
153127e1f827SSong Liu {
153227e1f827SSong Liu 	struct mm_struct *mm = mm_slot->mm;
153327e1f827SSong Liu 	int i;
153427e1f827SSong Liu 
153527e1f827SSong Liu 	if (likely(mm_slot->nr_pte_mapped_thp == 0))
15360edf61e5SMiaohe Lin 		return;
153727e1f827SSong Liu 
1538d8ed45c5SMichel Lespinasse 	if (!mmap_write_trylock(mm))
15390edf61e5SMiaohe Lin 		return;
154027e1f827SSong Liu 
154127e1f827SSong Liu 	if (unlikely(khugepaged_test_exit(mm)))
154227e1f827SSong Liu 		goto out;
154327e1f827SSong Liu 
154427e1f827SSong Liu 	for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
154527e1f827SSong Liu 		collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
154627e1f827SSong Liu 
154727e1f827SSong Liu out:
154827e1f827SSong Liu 	mm_slot->nr_pte_mapped_thp = 0;
1549d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
155027e1f827SSong Liu }
155127e1f827SSong Liu 
1552f3f0e1d2SKirill A. Shutemov static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1553f3f0e1d2SKirill A. Shutemov {
1554f3f0e1d2SKirill A. Shutemov 	struct vm_area_struct *vma;
155518e77600SHugh Dickins 	struct mm_struct *mm;
1556f3f0e1d2SKirill A. Shutemov 	unsigned long addr;
1557e59a47b8SPasha Tatashin 	pmd_t *pmd;
1558f3f0e1d2SKirill A. Shutemov 
1559f3f0e1d2SKirill A. Shutemov 	i_mmap_lock_write(mapping);
1560f3f0e1d2SKirill A. Shutemov 	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
156127e1f827SSong Liu 		/*
156227e1f827SSong Liu 		 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
156327e1f827SSong Liu 		 * got written to. These VMAs are likely not worth investing
15643e4e28c5SMichel Lespinasse 		 * mmap_write_lock(mm) as PMD-mapping is likely to be split
156527e1f827SSong Liu 		 * later.
156627e1f827SSong Liu 		 *
156727e1f827SSong Liu 		 * Not that vma->anon_vma check is racy: it can be set up after
1568c1e8d7c6SMichel Lespinasse 		 * the check but before we took mmap_lock by the fault path.
156927e1f827SSong Liu 		 * But page lock would prevent establishing any new ptes of the
157027e1f827SSong Liu 		 * page, so we are safe.
157127e1f827SSong Liu 		 *
157227e1f827SSong Liu 		 * An alternative would be drop the check, but check that page
157327e1f827SSong Liu 		 * table is clear before calling pmdp_collapse_flush() under
157427e1f827SSong Liu 		 * ptl. It has higher chance to recover THP for the VMA, but
157527e1f827SSong Liu 		 * has higher cost too.
157627e1f827SSong Liu 		 */
1577f3f0e1d2SKirill A. Shutemov 		if (vma->anon_vma)
1578f3f0e1d2SKirill A. Shutemov 			continue;
1579f3f0e1d2SKirill A. Shutemov 		addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1580f3f0e1d2SKirill A. Shutemov 		if (addr & ~HPAGE_PMD_MASK)
1581f3f0e1d2SKirill A. Shutemov 			continue;
1582f3f0e1d2SKirill A. Shutemov 		if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1583f3f0e1d2SKirill A. Shutemov 			continue;
158418e77600SHugh Dickins 		mm = vma->vm_mm;
158518e77600SHugh Dickins 		pmd = mm_find_pmd(mm, addr);
1586f3f0e1d2SKirill A. Shutemov 		if (!pmd)
1587f3f0e1d2SKirill A. Shutemov 			continue;
1588f3f0e1d2SKirill A. Shutemov 		/*
1589c1e8d7c6SMichel Lespinasse 		 * We need exclusive mmap_lock to retract page table.
159027e1f827SSong Liu 		 *
159127e1f827SSong Liu 		 * We use trylock due to lock inversion: we need to acquire
1592c1e8d7c6SMichel Lespinasse 		 * mmap_lock while holding page lock. Fault path does it in
159327e1f827SSong Liu 		 * reverse order. Trylock is a way to avoid deadlock.
1594f3f0e1d2SKirill A. Shutemov 		 */
159518e77600SHugh Dickins 		if (mmap_write_trylock(mm)) {
1596deb4c93aSPeter Xu 			/*
1597deb4c93aSPeter Xu 			 * When a vma is registered with uffd-wp, we can't
1598deb4c93aSPeter Xu 			 * recycle the pmd pgtable because there can be pte
1599deb4c93aSPeter Xu 			 * markers installed.  Skip it only, so the rest mm/vma
1600deb4c93aSPeter Xu 			 * can still have the same file mapped hugely, however
1601deb4c93aSPeter Xu 			 * it'll always mapped in small page size for uffd-wp
1602deb4c93aSPeter Xu 			 * registered ranges.
1603deb4c93aSPeter Xu 			 */
1604deb4c93aSPeter Xu 			if (!khugepaged_test_exit(mm) && !userfaultfd_wp(vma))
1605e59a47b8SPasha Tatashin 				collapse_and_free_pmd(mm, vma, addr, pmd);
160618e77600SHugh Dickins 			mmap_write_unlock(mm);
160727e1f827SSong Liu 		} else {
160827e1f827SSong Liu 			/* Try again later */
160918e77600SHugh Dickins 			khugepaged_add_pte_mapped_thp(mm, addr);
1610f3f0e1d2SKirill A. Shutemov 		}
1611f3f0e1d2SKirill A. Shutemov 	}
1612f3f0e1d2SKirill A. Shutemov 	i_mmap_unlock_write(mapping);
1613f3f0e1d2SKirill A. Shutemov }
1614f3f0e1d2SKirill A. Shutemov 
1615f3f0e1d2SKirill A. Shutemov /**
161699cb0dbdSSong Liu  * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1617f3f0e1d2SKirill A. Shutemov  *
1618336e6b53SAlex Shi  * @mm: process address space where collapse happens
1619336e6b53SAlex Shi  * @file: file that collapse on
1620336e6b53SAlex Shi  * @start: collapse start address
1621336e6b53SAlex Shi  * @hpage: new allocated huge page for collapse
1622336e6b53SAlex Shi  * @node: appointed node the new huge page allocate from
1623336e6b53SAlex Shi  *
1624f3f0e1d2SKirill A. Shutemov  * Basic scheme is simple, details are more complex:
162587c460a0SHugh Dickins  *  - allocate and lock a new huge page;
162677da9389SMatthew Wilcox  *  - scan page cache replacing old pages with the new one
162799cb0dbdSSong Liu  *    + swap/gup in pages if necessary;
1628f3f0e1d2SKirill A. Shutemov  *    + fill in gaps;
162977da9389SMatthew Wilcox  *    + keep old pages around in case rollback is required;
163077da9389SMatthew Wilcox  *  - if replacing succeeds:
1631f3f0e1d2SKirill A. Shutemov  *    + copy data over;
1632f3f0e1d2SKirill A. Shutemov  *    + free old pages;
163387c460a0SHugh Dickins  *    + unlock huge page;
1634f3f0e1d2SKirill A. Shutemov  *  - if replacing failed;
1635f3f0e1d2SKirill A. Shutemov  *    + put all pages back and unfreeze them;
163677da9389SMatthew Wilcox  *    + restore gaps in the page cache;
163787c460a0SHugh Dickins  *    + unlock and free huge page;
1638f3f0e1d2SKirill A. Shutemov  */
1639579c571eSSong Liu static void collapse_file(struct mm_struct *mm,
1640579c571eSSong Liu 		struct file *file, pgoff_t start,
1641f3f0e1d2SKirill A. Shutemov 		struct page **hpage, int node)
1642f3f0e1d2SKirill A. Shutemov {
1643579c571eSSong Liu 	struct address_space *mapping = file->f_mapping;
1644f3f0e1d2SKirill A. Shutemov 	gfp_t gfp;
164577da9389SMatthew Wilcox 	struct page *new_page;
1646f3f0e1d2SKirill A. Shutemov 	pgoff_t index, end = start + HPAGE_PMD_NR;
1647f3f0e1d2SKirill A. Shutemov 	LIST_HEAD(pagelist);
164877da9389SMatthew Wilcox 	XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
1649f3f0e1d2SKirill A. Shutemov 	int nr_none = 0, result = SCAN_SUCCEED;
165099cb0dbdSSong Liu 	bool is_shmem = shmem_file(file);
1651bf9eceadSMuchun Song 	int nr;
1652f3f0e1d2SKirill A. Shutemov 
165399cb0dbdSSong Liu 	VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1654f3f0e1d2SKirill A. Shutemov 	VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1655f3f0e1d2SKirill A. Shutemov 
1656f3f0e1d2SKirill A. Shutemov 	/* Only allocate from the target node */
165741b6167eSMichal Hocko 	gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1658f3f0e1d2SKirill A. Shutemov 
1659f3f0e1d2SKirill A. Shutemov 	new_page = khugepaged_alloc_page(hpage, gfp, node);
1660f3f0e1d2SKirill A. Shutemov 	if (!new_page) {
1661f3f0e1d2SKirill A. Shutemov 		result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1662f3f0e1d2SKirill A. Shutemov 		goto out;
1663f3f0e1d2SKirill A. Shutemov 	}
1664f3f0e1d2SKirill A. Shutemov 
16658f425e4eSMatthew Wilcox (Oracle) 	if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) {
1666f3f0e1d2SKirill A. Shutemov 		result = SCAN_CGROUP_CHARGE_FAIL;
1667f3f0e1d2SKirill A. Shutemov 		goto out;
1668f3f0e1d2SKirill A. Shutemov 	}
16699d82c694SJohannes Weiner 	count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
1670f3f0e1d2SKirill A. Shutemov 
16716b24ca4aSMatthew Wilcox (Oracle) 	/*
16726b24ca4aSMatthew Wilcox (Oracle) 	 * Ensure we have slots for all the pages in the range.  This is
16736b24ca4aSMatthew Wilcox (Oracle) 	 * almost certainly a no-op because most of the pages must be present
16746b24ca4aSMatthew Wilcox (Oracle) 	 */
167595feeabbSHugh Dickins 	do {
167695feeabbSHugh Dickins 		xas_lock_irq(&xas);
167795feeabbSHugh Dickins 		xas_create_range(&xas);
167895feeabbSHugh Dickins 		if (!xas_error(&xas))
167995feeabbSHugh Dickins 			break;
168095feeabbSHugh Dickins 		xas_unlock_irq(&xas);
168195feeabbSHugh Dickins 		if (!xas_nomem(&xas, GFP_KERNEL)) {
168295feeabbSHugh Dickins 			result = SCAN_FAIL;
168395feeabbSHugh Dickins 			goto out;
168495feeabbSHugh Dickins 		}
168595feeabbSHugh Dickins 	} while (1);
168695feeabbSHugh Dickins 
1687042a3082SHugh Dickins 	__SetPageLocked(new_page);
168899cb0dbdSSong Liu 	if (is_shmem)
1689042a3082SHugh Dickins 		__SetPageSwapBacked(new_page);
1690f3f0e1d2SKirill A. Shutemov 	new_page->index = start;
1691f3f0e1d2SKirill A. Shutemov 	new_page->mapping = mapping;
1692f3f0e1d2SKirill A. Shutemov 
1693f3f0e1d2SKirill A. Shutemov 	/*
169487c460a0SHugh Dickins 	 * At this point the new_page is locked and not up-to-date.
169587c460a0SHugh Dickins 	 * It's safe to insert it into the page cache, because nobody would
169687c460a0SHugh Dickins 	 * be able to map it or use it in another way until we unlock it.
1697f3f0e1d2SKirill A. Shutemov 	 */
1698f3f0e1d2SKirill A. Shutemov 
169977da9389SMatthew Wilcox 	xas_set(&xas, start);
170077da9389SMatthew Wilcox 	for (index = start; index < end; index++) {
170177da9389SMatthew Wilcox 		struct page *page = xas_next(&xas);
170277da9389SMatthew Wilcox 
170377da9389SMatthew Wilcox 		VM_BUG_ON(index != xas.xa_index);
170499cb0dbdSSong Liu 		if (is_shmem) {
170577da9389SMatthew Wilcox 			if (!page) {
1706701270faSHugh Dickins 				/*
170799cb0dbdSSong Liu 				 * Stop if extent has been truncated or
170899cb0dbdSSong Liu 				 * hole-punched, and is now completely
170999cb0dbdSSong Liu 				 * empty.
1710701270faSHugh Dickins 				 */
1711701270faSHugh Dickins 				if (index == start) {
1712701270faSHugh Dickins 					if (!xas_next_entry(&xas, end - 1)) {
1713701270faSHugh Dickins 						result = SCAN_TRUNCATED;
1714042a3082SHugh Dickins 						goto xa_locked;
1715701270faSHugh Dickins 					}
1716701270faSHugh Dickins 					xas_set(&xas, index);
1717701270faSHugh Dickins 				}
171877da9389SMatthew Wilcox 				if (!shmem_charge(mapping->host, 1)) {
1719f3f0e1d2SKirill A. Shutemov 					result = SCAN_FAIL;
1720042a3082SHugh Dickins 					goto xa_locked;
1721f3f0e1d2SKirill A. Shutemov 				}
17224101196bSMatthew Wilcox (Oracle) 				xas_store(&xas, new_page);
172377da9389SMatthew Wilcox 				nr_none++;
172477da9389SMatthew Wilcox 				continue;
1725f3f0e1d2SKirill A. Shutemov 			}
1726f3f0e1d2SKirill A. Shutemov 
17273159f943SMatthew Wilcox 			if (xa_is_value(page) || !PageUptodate(page)) {
172877da9389SMatthew Wilcox 				xas_unlock_irq(&xas);
1729f3f0e1d2SKirill A. Shutemov 				/* swap in or instantiate fallocated page */
1730f3f0e1d2SKirill A. Shutemov 				if (shmem_getpage(mapping->host, index, &page,
1731acdd9f8eSHugh Dickins 						  SGP_NOALLOC)) {
1732f3f0e1d2SKirill A. Shutemov 					result = SCAN_FAIL;
173377da9389SMatthew Wilcox 					goto xa_unlocked;
1734f3f0e1d2SKirill A. Shutemov 				}
1735f3f0e1d2SKirill A. Shutemov 			} else if (trylock_page(page)) {
1736f3f0e1d2SKirill A. Shutemov 				get_page(page);
1737042a3082SHugh Dickins 				xas_unlock_irq(&xas);
1738f3f0e1d2SKirill A. Shutemov 			} else {
1739f3f0e1d2SKirill A. Shutemov 				result = SCAN_PAGE_LOCK;
1740042a3082SHugh Dickins 				goto xa_locked;
1741f3f0e1d2SKirill A. Shutemov 			}
174299cb0dbdSSong Liu 		} else {	/* !is_shmem */
174399cb0dbdSSong Liu 			if (!page || xa_is_value(page)) {
174499cb0dbdSSong Liu 				xas_unlock_irq(&xas);
174599cb0dbdSSong Liu 				page_cache_sync_readahead(mapping, &file->f_ra,
174699cb0dbdSSong Liu 							  file, index,
1747e5a59d30SDavid Howells 							  end - index);
174899cb0dbdSSong Liu 				/* drain pagevecs to help isolate_lru_page() */
174999cb0dbdSSong Liu 				lru_add_drain();
175099cb0dbdSSong Liu 				page = find_lock_page(mapping, index);
175199cb0dbdSSong Liu 				if (unlikely(page == NULL)) {
175299cb0dbdSSong Liu 					result = SCAN_FAIL;
175399cb0dbdSSong Liu 					goto xa_unlocked;
175499cb0dbdSSong Liu 				}
175575f36069SSong Liu 			} else if (PageDirty(page)) {
175675f36069SSong Liu 				/*
175775f36069SSong Liu 				 * khugepaged only works on read-only fd,
175875f36069SSong Liu 				 * so this page is dirty because it hasn't
175975f36069SSong Liu 				 * been flushed since first write. There
176075f36069SSong Liu 				 * won't be new dirty pages.
176175f36069SSong Liu 				 *
176275f36069SSong Liu 				 * Trigger async flush here and hope the
176375f36069SSong Liu 				 * writeback is done when khugepaged
176475f36069SSong Liu 				 * revisits this page.
176575f36069SSong Liu 				 *
176675f36069SSong Liu 				 * This is a one-off situation. We are not
176775f36069SSong Liu 				 * forcing writeback in loop.
176875f36069SSong Liu 				 */
176975f36069SSong Liu 				xas_unlock_irq(&xas);
177075f36069SSong Liu 				filemap_flush(mapping);
177175f36069SSong Liu 				result = SCAN_FAIL;
177275f36069SSong Liu 				goto xa_unlocked;
177374c42e1bSRongwei Wang 			} else if (PageWriteback(page)) {
177474c42e1bSRongwei Wang 				xas_unlock_irq(&xas);
177574c42e1bSRongwei Wang 				result = SCAN_FAIL;
177674c42e1bSRongwei Wang 				goto xa_unlocked;
177799cb0dbdSSong Liu 			} else if (trylock_page(page)) {
177899cb0dbdSSong Liu 				get_page(page);
177999cb0dbdSSong Liu 				xas_unlock_irq(&xas);
178099cb0dbdSSong Liu 			} else {
178199cb0dbdSSong Liu 				result = SCAN_PAGE_LOCK;
178299cb0dbdSSong Liu 				goto xa_locked;
178399cb0dbdSSong Liu 			}
178499cb0dbdSSong Liu 		}
1785f3f0e1d2SKirill A. Shutemov 
1786f3f0e1d2SKirill A. Shutemov 		/*
1787b93b0163SMatthew Wilcox 		 * The page must be locked, so we can drop the i_pages lock
1788f3f0e1d2SKirill A. Shutemov 		 * without racing with truncate.
1789f3f0e1d2SKirill A. Shutemov 		 */
1790f3f0e1d2SKirill A. Shutemov 		VM_BUG_ON_PAGE(!PageLocked(page), page);
17914655e5e5SSong Liu 
17924655e5e5SSong Liu 		/* make sure the page is up to date */
17934655e5e5SSong Liu 		if (unlikely(!PageUptodate(page))) {
17944655e5e5SSong Liu 			result = SCAN_FAIL;
17954655e5e5SSong Liu 			goto out_unlock;
17964655e5e5SSong Liu 		}
179706a5e126SHugh Dickins 
179806a5e126SHugh Dickins 		/*
179906a5e126SHugh Dickins 		 * If file was truncated then extended, or hole-punched, before
180006a5e126SHugh Dickins 		 * we locked the first page, then a THP might be there already.
180106a5e126SHugh Dickins 		 */
180206a5e126SHugh Dickins 		if (PageTransCompound(page)) {
180306a5e126SHugh Dickins 			result = SCAN_PAGE_COMPOUND;
180406a5e126SHugh Dickins 			goto out_unlock;
180506a5e126SHugh Dickins 		}
1806f3f0e1d2SKirill A. Shutemov 
1807f3f0e1d2SKirill A. Shutemov 		if (page_mapping(page) != mapping) {
1808f3f0e1d2SKirill A. Shutemov 			result = SCAN_TRUNCATED;
1809f3f0e1d2SKirill A. Shutemov 			goto out_unlock;
1810f3f0e1d2SKirill A. Shutemov 		}
1811f3f0e1d2SKirill A. Shutemov 
181274c42e1bSRongwei Wang 		if (!is_shmem && (PageDirty(page) ||
181374c42e1bSRongwei Wang 				  PageWriteback(page))) {
18144655e5e5SSong Liu 			/*
18154655e5e5SSong Liu 			 * khugepaged only works on read-only fd, so this
18164655e5e5SSong Liu 			 * page is dirty because it hasn't been flushed
18174655e5e5SSong Liu 			 * since first write.
18184655e5e5SSong Liu 			 */
18194655e5e5SSong Liu 			result = SCAN_FAIL;
18204655e5e5SSong Liu 			goto out_unlock;
18214655e5e5SSong Liu 		}
18224655e5e5SSong Liu 
1823f3f0e1d2SKirill A. Shutemov 		if (isolate_lru_page(page)) {
1824f3f0e1d2SKirill A. Shutemov 			result = SCAN_DEL_PAGE_LRU;
1825042a3082SHugh Dickins 			goto out_unlock;
1826f3f0e1d2SKirill A. Shutemov 		}
1827f3f0e1d2SKirill A. Shutemov 
182899cb0dbdSSong Liu 		if (page_has_private(page) &&
182999cb0dbdSSong Liu 		    !try_to_release_page(page, GFP_KERNEL)) {
183099cb0dbdSSong Liu 			result = SCAN_PAGE_HAS_PRIVATE;
18312f33a706SHugh Dickins 			putback_lru_page(page);
183299cb0dbdSSong Liu 			goto out_unlock;
183399cb0dbdSSong Liu 		}
183499cb0dbdSSong Liu 
1835f3f0e1d2SKirill A. Shutemov 		if (page_mapped(page))
1836869f7ee6SMatthew Wilcox (Oracle) 			try_to_unmap(page_folio(page),
1837869f7ee6SMatthew Wilcox (Oracle) 					TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
1838f3f0e1d2SKirill A. Shutemov 
183977da9389SMatthew Wilcox 		xas_lock_irq(&xas);
184077da9389SMatthew Wilcox 		xas_set(&xas, index);
1841f3f0e1d2SKirill A. Shutemov 
184277da9389SMatthew Wilcox 		VM_BUG_ON_PAGE(page != xas_load(&xas), page);
1843f3f0e1d2SKirill A. Shutemov 
1844f3f0e1d2SKirill A. Shutemov 		/*
1845f3f0e1d2SKirill A. Shutemov 		 * The page is expected to have page_count() == 3:
1846f3f0e1d2SKirill A. Shutemov 		 *  - we hold a pin on it;
184777da9389SMatthew Wilcox 		 *  - one reference from page cache;
1848f3f0e1d2SKirill A. Shutemov 		 *  - one from isolate_lru_page;
1849f3f0e1d2SKirill A. Shutemov 		 */
1850f3f0e1d2SKirill A. Shutemov 		if (!page_ref_freeze(page, 3)) {
1851f3f0e1d2SKirill A. Shutemov 			result = SCAN_PAGE_COUNT;
1852042a3082SHugh Dickins 			xas_unlock_irq(&xas);
1853042a3082SHugh Dickins 			putback_lru_page(page);
1854042a3082SHugh Dickins 			goto out_unlock;
1855f3f0e1d2SKirill A. Shutemov 		}
1856f3f0e1d2SKirill A. Shutemov 
1857f3f0e1d2SKirill A. Shutemov 		/*
1858f3f0e1d2SKirill A. Shutemov 		 * Add the page to the list to be able to undo the collapse if
1859f3f0e1d2SKirill A. Shutemov 		 * something go wrong.
1860f3f0e1d2SKirill A. Shutemov 		 */
1861f3f0e1d2SKirill A. Shutemov 		list_add_tail(&page->lru, &pagelist);
1862f3f0e1d2SKirill A. Shutemov 
1863f3f0e1d2SKirill A. Shutemov 		/* Finally, replace with the new page. */
18644101196bSMatthew Wilcox (Oracle) 		xas_store(&xas, new_page);
1865f3f0e1d2SKirill A. Shutemov 		continue;
1866f3f0e1d2SKirill A. Shutemov out_unlock:
1867f3f0e1d2SKirill A. Shutemov 		unlock_page(page);
1868f3f0e1d2SKirill A. Shutemov 		put_page(page);
1869042a3082SHugh Dickins 		goto xa_unlocked;
1870f3f0e1d2SKirill A. Shutemov 	}
1871bf9eceadSMuchun Song 	nr = thp_nr_pages(new_page);
1872f3f0e1d2SKirill A. Shutemov 
187399cb0dbdSSong Liu 	if (is_shmem)
187457b2847dSMuchun Song 		__mod_lruvec_page_state(new_page, NR_SHMEM_THPS, nr);
187509d91cdaSSong Liu 	else {
1876bf9eceadSMuchun Song 		__mod_lruvec_page_state(new_page, NR_FILE_THPS, nr);
187709d91cdaSSong Liu 		filemap_nr_thps_inc(mapping);
1878eb6ecbedSCollin Fijalkovich 		/*
1879eb6ecbedSCollin Fijalkovich 		 * Paired with smp_mb() in do_dentry_open() to ensure
1880eb6ecbedSCollin Fijalkovich 		 * i_writecount is up to date and the update to nr_thps is
1881eb6ecbedSCollin Fijalkovich 		 * visible. Ensures the page cache will be truncated if the
1882eb6ecbedSCollin Fijalkovich 		 * file is opened writable.
1883eb6ecbedSCollin Fijalkovich 		 */
1884eb6ecbedSCollin Fijalkovich 		smp_mb();
1885eb6ecbedSCollin Fijalkovich 		if (inode_is_open_for_write(mapping->host)) {
1886eb6ecbedSCollin Fijalkovich 			result = SCAN_FAIL;
1887eb6ecbedSCollin Fijalkovich 			__mod_lruvec_page_state(new_page, NR_FILE_THPS, -nr);
1888eb6ecbedSCollin Fijalkovich 			filemap_nr_thps_dec(mapping);
1889eb6ecbedSCollin Fijalkovich 			goto xa_locked;
1890eb6ecbedSCollin Fijalkovich 		}
189109d91cdaSSong Liu 	}
189299cb0dbdSSong Liu 
1893042a3082SHugh Dickins 	if (nr_none) {
18949d82c694SJohannes Weiner 		__mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none);
189599cb0dbdSSong Liu 		if (is_shmem)
18969d82c694SJohannes Weiner 			__mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
1897042a3082SHugh Dickins 	}
1898042a3082SHugh Dickins 
18996b24ca4aSMatthew Wilcox (Oracle) 	/* Join all the small entries into a single multi-index entry */
19006b24ca4aSMatthew Wilcox (Oracle) 	xas_set_order(&xas, start, HPAGE_PMD_ORDER);
19016b24ca4aSMatthew Wilcox (Oracle) 	xas_store(&xas, new_page);
1902042a3082SHugh Dickins xa_locked:
1903042a3082SHugh Dickins 	xas_unlock_irq(&xas);
190477da9389SMatthew Wilcox xa_unlocked:
1905042a3082SHugh Dickins 
19066d9df8a5SHugh Dickins 	/*
19076d9df8a5SHugh Dickins 	 * If collapse is successful, flush must be done now before copying.
19086d9df8a5SHugh Dickins 	 * If collapse is unsuccessful, does flush actually need to be done?
19096d9df8a5SHugh Dickins 	 * Do it anyway, to clear the state.
19106d9df8a5SHugh Dickins 	 */
19116d9df8a5SHugh Dickins 	try_to_unmap_flush();
19126d9df8a5SHugh Dickins 
1913f3f0e1d2SKirill A. Shutemov 	if (result == SCAN_SUCCEED) {
191477da9389SMatthew Wilcox 		struct page *page, *tmp;
1915f3f0e1d2SKirill A. Shutemov 
1916f3f0e1d2SKirill A. Shutemov 		/*
191777da9389SMatthew Wilcox 		 * Replacing old pages with new one has succeeded, now we
191877da9389SMatthew Wilcox 		 * need to copy the content and free the old pages.
1919f3f0e1d2SKirill A. Shutemov 		 */
19202af8ff29SHugh Dickins 		index = start;
1921f3f0e1d2SKirill A. Shutemov 		list_for_each_entry_safe(page, tmp, &pagelist, lru) {
19222af8ff29SHugh Dickins 			while (index < page->index) {
19232af8ff29SHugh Dickins 				clear_highpage(new_page + (index % HPAGE_PMD_NR));
19242af8ff29SHugh Dickins 				index++;
19252af8ff29SHugh Dickins 			}
1926f3f0e1d2SKirill A. Shutemov 			copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1927f3f0e1d2SKirill A. Shutemov 					page);
1928f3f0e1d2SKirill A. Shutemov 			list_del(&page->lru);
1929f3f0e1d2SKirill A. Shutemov 			page->mapping = NULL;
1930042a3082SHugh Dickins 			page_ref_unfreeze(page, 1);
1931f3f0e1d2SKirill A. Shutemov 			ClearPageActive(page);
1932f3f0e1d2SKirill A. Shutemov 			ClearPageUnevictable(page);
1933042a3082SHugh Dickins 			unlock_page(page);
1934f3f0e1d2SKirill A. Shutemov 			put_page(page);
19352af8ff29SHugh Dickins 			index++;
19362af8ff29SHugh Dickins 		}
19372af8ff29SHugh Dickins 		while (index < end) {
19382af8ff29SHugh Dickins 			clear_highpage(new_page + (index % HPAGE_PMD_NR));
19392af8ff29SHugh Dickins 			index++;
1940f3f0e1d2SKirill A. Shutemov 		}
1941f3f0e1d2SKirill A. Shutemov 
1942f3f0e1d2SKirill A. Shutemov 		SetPageUptodate(new_page);
194387c460a0SHugh Dickins 		page_ref_add(new_page, HPAGE_PMD_NR - 1);
19446058eaecSJohannes Weiner 		if (is_shmem)
194599cb0dbdSSong Liu 			set_page_dirty(new_page);
19466058eaecSJohannes Weiner 		lru_cache_add(new_page);
1947f3f0e1d2SKirill A. Shutemov 
1948042a3082SHugh Dickins 		/*
1949042a3082SHugh Dickins 		 * Remove pte page tables, so we can re-fault the page as huge.
1950042a3082SHugh Dickins 		 */
1951042a3082SHugh Dickins 		retract_page_tables(mapping, start);
1952f3f0e1d2SKirill A. Shutemov 		*hpage = NULL;
195387aa7529SYang Shi 
195487aa7529SYang Shi 		khugepaged_pages_collapsed++;
1955f3f0e1d2SKirill A. Shutemov 	} else {
195677da9389SMatthew Wilcox 		struct page *page;
1957aaa52e34SHugh Dickins 
195877da9389SMatthew Wilcox 		/* Something went wrong: roll back page cache changes */
195977da9389SMatthew Wilcox 		xas_lock_irq(&xas);
1960aaa52e34SHugh Dickins 		mapping->nrpages -= nr_none;
196199cb0dbdSSong Liu 
196299cb0dbdSSong Liu 		if (is_shmem)
1963aaa52e34SHugh Dickins 			shmem_uncharge(mapping->host, nr_none);
1964aaa52e34SHugh Dickins 
196577da9389SMatthew Wilcox 		xas_set(&xas, start);
196677da9389SMatthew Wilcox 		xas_for_each(&xas, page, end - 1) {
1967f3f0e1d2SKirill A. Shutemov 			page = list_first_entry_or_null(&pagelist,
1968f3f0e1d2SKirill A. Shutemov 					struct page, lru);
196977da9389SMatthew Wilcox 			if (!page || xas.xa_index < page->index) {
1970f3f0e1d2SKirill A. Shutemov 				if (!nr_none)
1971f3f0e1d2SKirill A. Shutemov 					break;
1972f3f0e1d2SKirill A. Shutemov 				nr_none--;
197359749e6cSJohannes Weiner 				/* Put holes back where they were */
197477da9389SMatthew Wilcox 				xas_store(&xas, NULL);
1975f3f0e1d2SKirill A. Shutemov 				continue;
1976f3f0e1d2SKirill A. Shutemov 			}
1977f3f0e1d2SKirill A. Shutemov 
197877da9389SMatthew Wilcox 			VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
1979f3f0e1d2SKirill A. Shutemov 
1980f3f0e1d2SKirill A. Shutemov 			/* Unfreeze the page. */
1981f3f0e1d2SKirill A. Shutemov 			list_del(&page->lru);
1982f3f0e1d2SKirill A. Shutemov 			page_ref_unfreeze(page, 2);
198377da9389SMatthew Wilcox 			xas_store(&xas, page);
198477da9389SMatthew Wilcox 			xas_pause(&xas);
198577da9389SMatthew Wilcox 			xas_unlock_irq(&xas);
1986f3f0e1d2SKirill A. Shutemov 			unlock_page(page);
1987042a3082SHugh Dickins 			putback_lru_page(page);
198877da9389SMatthew Wilcox 			xas_lock_irq(&xas);
1989f3f0e1d2SKirill A. Shutemov 		}
1990f3f0e1d2SKirill A. Shutemov 		VM_BUG_ON(nr_none);
199177da9389SMatthew Wilcox 		xas_unlock_irq(&xas);
1992f3f0e1d2SKirill A. Shutemov 
1993f3f0e1d2SKirill A. Shutemov 		new_page->mapping = NULL;
1994f3f0e1d2SKirill A. Shutemov 	}
1995042a3082SHugh Dickins 
1996042a3082SHugh Dickins 	unlock_page(new_page);
1997f3f0e1d2SKirill A. Shutemov out:
1998f3f0e1d2SKirill A. Shutemov 	VM_BUG_ON(!list_empty(&pagelist));
19999d82c694SJohannes Weiner 	if (!IS_ERR_OR_NULL(*hpage))
2000bbc6b703SMatthew Wilcox (Oracle) 		mem_cgroup_uncharge(page_folio(*hpage));
2001f3f0e1d2SKirill A. Shutemov 	/* TODO: tracepoints */
2002f3f0e1d2SKirill A. Shutemov }
2003f3f0e1d2SKirill A. Shutemov 
2004579c571eSSong Liu static void khugepaged_scan_file(struct mm_struct *mm,
2005579c571eSSong Liu 		struct file *file, pgoff_t start, struct page **hpage)
2006f3f0e1d2SKirill A. Shutemov {
2007f3f0e1d2SKirill A. Shutemov 	struct page *page = NULL;
2008579c571eSSong Liu 	struct address_space *mapping = file->f_mapping;
200985b392dbSMatthew Wilcox 	XA_STATE(xas, &mapping->i_pages, start);
2010f3f0e1d2SKirill A. Shutemov 	int present, swap;
2011f3f0e1d2SKirill A. Shutemov 	int node = NUMA_NO_NODE;
2012f3f0e1d2SKirill A. Shutemov 	int result = SCAN_SUCCEED;
2013f3f0e1d2SKirill A. Shutemov 
2014f3f0e1d2SKirill A. Shutemov 	present = 0;
2015f3f0e1d2SKirill A. Shutemov 	swap = 0;
2016f3f0e1d2SKirill A. Shutemov 	memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
2017f3f0e1d2SKirill A. Shutemov 	rcu_read_lock();
201885b392dbSMatthew Wilcox 	xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
201985b392dbSMatthew Wilcox 		if (xas_retry(&xas, page))
2020f3f0e1d2SKirill A. Shutemov 			continue;
2021f3f0e1d2SKirill A. Shutemov 
202285b392dbSMatthew Wilcox 		if (xa_is_value(page)) {
2023f3f0e1d2SKirill A. Shutemov 			if (++swap > khugepaged_max_ptes_swap) {
2024f3f0e1d2SKirill A. Shutemov 				result = SCAN_EXCEED_SWAP_PTE;
2025e9ea874aSYang Yang 				count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
2026f3f0e1d2SKirill A. Shutemov 				break;
2027f3f0e1d2SKirill A. Shutemov 			}
2028f3f0e1d2SKirill A. Shutemov 			continue;
2029f3f0e1d2SKirill A. Shutemov 		}
2030f3f0e1d2SKirill A. Shutemov 
20316b24ca4aSMatthew Wilcox (Oracle) 		/*
20326b24ca4aSMatthew Wilcox (Oracle) 		 * XXX: khugepaged should compact smaller compound pages
20336b24ca4aSMatthew Wilcox (Oracle) 		 * into a PMD sized page
20346b24ca4aSMatthew Wilcox (Oracle) 		 */
2035f3f0e1d2SKirill A. Shutemov 		if (PageTransCompound(page)) {
2036f3f0e1d2SKirill A. Shutemov 			result = SCAN_PAGE_COMPOUND;
2037f3f0e1d2SKirill A. Shutemov 			break;
2038f3f0e1d2SKirill A. Shutemov 		}
2039f3f0e1d2SKirill A. Shutemov 
2040f3f0e1d2SKirill A. Shutemov 		node = page_to_nid(page);
2041f3f0e1d2SKirill A. Shutemov 		if (khugepaged_scan_abort(node)) {
2042f3f0e1d2SKirill A. Shutemov 			result = SCAN_SCAN_ABORT;
2043f3f0e1d2SKirill A. Shutemov 			break;
2044f3f0e1d2SKirill A. Shutemov 		}
2045f3f0e1d2SKirill A. Shutemov 		khugepaged_node_load[node]++;
2046f3f0e1d2SKirill A. Shutemov 
2047f3f0e1d2SKirill A. Shutemov 		if (!PageLRU(page)) {
2048f3f0e1d2SKirill A. Shutemov 			result = SCAN_PAGE_LRU;
2049f3f0e1d2SKirill A. Shutemov 			break;
2050f3f0e1d2SKirill A. Shutemov 		}
2051f3f0e1d2SKirill A. Shutemov 
205299cb0dbdSSong Liu 		if (page_count(page) !=
205399cb0dbdSSong Liu 		    1 + page_mapcount(page) + page_has_private(page)) {
2054f3f0e1d2SKirill A. Shutemov 			result = SCAN_PAGE_COUNT;
2055f3f0e1d2SKirill A. Shutemov 			break;
2056f3f0e1d2SKirill A. Shutemov 		}
2057f3f0e1d2SKirill A. Shutemov 
2058f3f0e1d2SKirill A. Shutemov 		/*
2059f3f0e1d2SKirill A. Shutemov 		 * We probably should check if the page is referenced here, but
2060f3f0e1d2SKirill A. Shutemov 		 * nobody would transfer pte_young() to PageReferenced() for us.
2061f3f0e1d2SKirill A. Shutemov 		 * And rmap walk here is just too costly...
2062f3f0e1d2SKirill A. Shutemov 		 */
2063f3f0e1d2SKirill A. Shutemov 
2064f3f0e1d2SKirill A. Shutemov 		present++;
2065f3f0e1d2SKirill A. Shutemov 
2066f3f0e1d2SKirill A. Shutemov 		if (need_resched()) {
206785b392dbSMatthew Wilcox 			xas_pause(&xas);
2068f3f0e1d2SKirill A. Shutemov 			cond_resched_rcu();
2069f3f0e1d2SKirill A. Shutemov 		}
2070f3f0e1d2SKirill A. Shutemov 	}
2071f3f0e1d2SKirill A. Shutemov 	rcu_read_unlock();
2072f3f0e1d2SKirill A. Shutemov 
2073f3f0e1d2SKirill A. Shutemov 	if (result == SCAN_SUCCEED) {
2074f3f0e1d2SKirill A. Shutemov 		if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2075f3f0e1d2SKirill A. Shutemov 			result = SCAN_EXCEED_NONE_PTE;
2076e9ea874aSYang Yang 			count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
2077f3f0e1d2SKirill A. Shutemov 		} else {
2078f3f0e1d2SKirill A. Shutemov 			node = khugepaged_find_target_node();
2079579c571eSSong Liu 			collapse_file(mm, file, start, hpage, node);
2080f3f0e1d2SKirill A. Shutemov 		}
2081f3f0e1d2SKirill A. Shutemov 	}
2082f3f0e1d2SKirill A. Shutemov 
2083f3f0e1d2SKirill A. Shutemov 	/* TODO: tracepoints */
2084f3f0e1d2SKirill A. Shutemov }
2085f3f0e1d2SKirill A. Shutemov #else
2086579c571eSSong Liu static void khugepaged_scan_file(struct mm_struct *mm,
2087579c571eSSong Liu 		struct file *file, pgoff_t start, struct page **hpage)
2088f3f0e1d2SKirill A. Shutemov {
2089f3f0e1d2SKirill A. Shutemov 	BUILD_BUG();
2090f3f0e1d2SKirill A. Shutemov }
209127e1f827SSong Liu 
20920edf61e5SMiaohe Lin static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
209327e1f827SSong Liu {
209427e1f827SSong Liu }
2095f3f0e1d2SKirill A. Shutemov #endif
2096f3f0e1d2SKirill A. Shutemov 
2097b46e756fSKirill A. Shutemov static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2098b46e756fSKirill A. Shutemov 					    struct page **hpage)
2099b46e756fSKirill A. Shutemov 	__releases(&khugepaged_mm_lock)
2100b46e756fSKirill A. Shutemov 	__acquires(&khugepaged_mm_lock)
2101b46e756fSKirill A. Shutemov {
2102b46e756fSKirill A. Shutemov 	struct mm_slot *mm_slot;
2103b46e756fSKirill A. Shutemov 	struct mm_struct *mm;
2104b46e756fSKirill A. Shutemov 	struct vm_area_struct *vma;
2105b46e756fSKirill A. Shutemov 	int progress = 0;
2106b46e756fSKirill A. Shutemov 
2107b46e756fSKirill A. Shutemov 	VM_BUG_ON(!pages);
210835f3aa39SLance Roy 	lockdep_assert_held(&khugepaged_mm_lock);
2109b46e756fSKirill A. Shutemov 
2110b46e756fSKirill A. Shutemov 	if (khugepaged_scan.mm_slot)
2111b46e756fSKirill A. Shutemov 		mm_slot = khugepaged_scan.mm_slot;
2112b46e756fSKirill A. Shutemov 	else {
2113b46e756fSKirill A. Shutemov 		mm_slot = list_entry(khugepaged_scan.mm_head.next,
2114b46e756fSKirill A. Shutemov 				     struct mm_slot, mm_node);
2115b46e756fSKirill A. Shutemov 		khugepaged_scan.address = 0;
2116b46e756fSKirill A. Shutemov 		khugepaged_scan.mm_slot = mm_slot;
2117b46e756fSKirill A. Shutemov 	}
2118b46e756fSKirill A. Shutemov 	spin_unlock(&khugepaged_mm_lock);
211927e1f827SSong Liu 	khugepaged_collapse_pte_mapped_thps(mm_slot);
2120b46e756fSKirill A. Shutemov 
2121b46e756fSKirill A. Shutemov 	mm = mm_slot->mm;
21223b454ad3SYang Shi 	/*
21233b454ad3SYang Shi 	 * Don't wait for semaphore (to avoid long wait times).  Just move to
21243b454ad3SYang Shi 	 * the next mm on the list.
21253b454ad3SYang Shi 	 */
2126b46e756fSKirill A. Shutemov 	vma = NULL;
2127d8ed45c5SMichel Lespinasse 	if (unlikely(!mmap_read_trylock(mm)))
2128c1e8d7c6SMichel Lespinasse 		goto breakouterloop_mmap_lock;
21293b454ad3SYang Shi 	if (likely(!khugepaged_test_exit(mm)))
2130b46e756fSKirill A. Shutemov 		vma = find_vma(mm, khugepaged_scan.address);
2131b46e756fSKirill A. Shutemov 
2132b46e756fSKirill A. Shutemov 	progress++;
2133b46e756fSKirill A. Shutemov 	for (; vma; vma = vma->vm_next) {
2134b46e756fSKirill A. Shutemov 		unsigned long hstart, hend;
2135b46e756fSKirill A. Shutemov 
2136b46e756fSKirill A. Shutemov 		cond_resched();
2137b46e756fSKirill A. Shutemov 		if (unlikely(khugepaged_test_exit(mm))) {
2138b46e756fSKirill A. Shutemov 			progress++;
2139b46e756fSKirill A. Shutemov 			break;
2140b46e756fSKirill A. Shutemov 		}
214150f8b92fSSong Liu 		if (!hugepage_vma_check(vma, vma->vm_flags)) {
2142b46e756fSKirill A. Shutemov skip:
2143b46e756fSKirill A. Shutemov 			progress++;
2144b46e756fSKirill A. Shutemov 			continue;
2145b46e756fSKirill A. Shutemov 		}
2146b46e756fSKirill A. Shutemov 		hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2147b46e756fSKirill A. Shutemov 		hend = vma->vm_end & HPAGE_PMD_MASK;
2148b46e756fSKirill A. Shutemov 		if (hstart >= hend)
2149b46e756fSKirill A. Shutemov 			goto skip;
2150b46e756fSKirill A. Shutemov 		if (khugepaged_scan.address > hend)
2151b46e756fSKirill A. Shutemov 			goto skip;
2152b46e756fSKirill A. Shutemov 		if (khugepaged_scan.address < hstart)
2153b46e756fSKirill A. Shutemov 			khugepaged_scan.address = hstart;
2154b46e756fSKirill A. Shutemov 		VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2155396bcc52SMatthew Wilcox (Oracle) 		if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma))
2156396bcc52SMatthew Wilcox (Oracle) 			goto skip;
2157b46e756fSKirill A. Shutemov 
2158b46e756fSKirill A. Shutemov 		while (khugepaged_scan.address < hend) {
2159b46e756fSKirill A. Shutemov 			int ret;
2160b46e756fSKirill A. Shutemov 			cond_resched();
2161b46e756fSKirill A. Shutemov 			if (unlikely(khugepaged_test_exit(mm)))
2162b46e756fSKirill A. Shutemov 				goto breakouterloop;
2163b46e756fSKirill A. Shutemov 
2164b46e756fSKirill A. Shutemov 			VM_BUG_ON(khugepaged_scan.address < hstart ||
2165b46e756fSKirill A. Shutemov 				  khugepaged_scan.address + HPAGE_PMD_SIZE >
2166b46e756fSKirill A. Shutemov 				  hend);
216799cb0dbdSSong Liu 			if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2168396bcc52SMatthew Wilcox (Oracle) 				struct file *file = get_file(vma->vm_file);
2169f3f0e1d2SKirill A. Shutemov 				pgoff_t pgoff = linear_page_index(vma,
2170f3f0e1d2SKirill A. Shutemov 						khugepaged_scan.address);
217199cb0dbdSSong Liu 
2172d8ed45c5SMichel Lespinasse 				mmap_read_unlock(mm);
2173f3f0e1d2SKirill A. Shutemov 				ret = 1;
2174579c571eSSong Liu 				khugepaged_scan_file(mm, file, pgoff, hpage);
2175f3f0e1d2SKirill A. Shutemov 				fput(file);
2176f3f0e1d2SKirill A. Shutemov 			} else {
2177b46e756fSKirill A. Shutemov 				ret = khugepaged_scan_pmd(mm, vma,
2178b46e756fSKirill A. Shutemov 						khugepaged_scan.address,
2179b46e756fSKirill A. Shutemov 						hpage);
2180f3f0e1d2SKirill A. Shutemov 			}
2181b46e756fSKirill A. Shutemov 			/* move to next address */
2182b46e756fSKirill A. Shutemov 			khugepaged_scan.address += HPAGE_PMD_SIZE;
2183b46e756fSKirill A. Shutemov 			progress += HPAGE_PMD_NR;
2184b46e756fSKirill A. Shutemov 			if (ret)
2185c1e8d7c6SMichel Lespinasse 				/* we released mmap_lock so break loop */
2186c1e8d7c6SMichel Lespinasse 				goto breakouterloop_mmap_lock;
2187b46e756fSKirill A. Shutemov 			if (progress >= pages)
2188b46e756fSKirill A. Shutemov 				goto breakouterloop;
2189b46e756fSKirill A. Shutemov 		}
2190b46e756fSKirill A. Shutemov 	}
2191b46e756fSKirill A. Shutemov breakouterloop:
2192d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
2193c1e8d7c6SMichel Lespinasse breakouterloop_mmap_lock:
2194b46e756fSKirill A. Shutemov 
2195b46e756fSKirill A. Shutemov 	spin_lock(&khugepaged_mm_lock);
2196b46e756fSKirill A. Shutemov 	VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2197b46e756fSKirill A. Shutemov 	/*
2198b46e756fSKirill A. Shutemov 	 * Release the current mm_slot if this mm is about to die, or
2199b46e756fSKirill A. Shutemov 	 * if we scanned all vmas of this mm.
2200b46e756fSKirill A. Shutemov 	 */
2201b46e756fSKirill A. Shutemov 	if (khugepaged_test_exit(mm) || !vma) {
2202b46e756fSKirill A. Shutemov 		/*
2203b46e756fSKirill A. Shutemov 		 * Make sure that if mm_users is reaching zero while
2204b46e756fSKirill A. Shutemov 		 * khugepaged runs here, khugepaged_exit will find
2205b46e756fSKirill A. Shutemov 		 * mm_slot not pointing to the exiting mm.
2206b46e756fSKirill A. Shutemov 		 */
2207b46e756fSKirill A. Shutemov 		if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2208b46e756fSKirill A. Shutemov 			khugepaged_scan.mm_slot = list_entry(
2209b46e756fSKirill A. Shutemov 				mm_slot->mm_node.next,
2210b46e756fSKirill A. Shutemov 				struct mm_slot, mm_node);
2211b46e756fSKirill A. Shutemov 			khugepaged_scan.address = 0;
2212b46e756fSKirill A. Shutemov 		} else {
2213b46e756fSKirill A. Shutemov 			khugepaged_scan.mm_slot = NULL;
2214b46e756fSKirill A. Shutemov 			khugepaged_full_scans++;
2215b46e756fSKirill A. Shutemov 		}
2216b46e756fSKirill A. Shutemov 
2217b46e756fSKirill A. Shutemov 		collect_mm_slot(mm_slot);
2218b46e756fSKirill A. Shutemov 	}
2219b46e756fSKirill A. Shutemov 
2220b46e756fSKirill A. Shutemov 	return progress;
2221b46e756fSKirill A. Shutemov }
2222b46e756fSKirill A. Shutemov 
2223b46e756fSKirill A. Shutemov static int khugepaged_has_work(void)
2224b46e756fSKirill A. Shutemov {
2225b46e756fSKirill A. Shutemov 	return !list_empty(&khugepaged_scan.mm_head) &&
2226b46e756fSKirill A. Shutemov 		khugepaged_enabled();
2227b46e756fSKirill A. Shutemov }
2228b46e756fSKirill A. Shutemov 
2229b46e756fSKirill A. Shutemov static int khugepaged_wait_event(void)
2230b46e756fSKirill A. Shutemov {
2231b46e756fSKirill A. Shutemov 	return !list_empty(&khugepaged_scan.mm_head) ||
2232b46e756fSKirill A. Shutemov 		kthread_should_stop();
2233b46e756fSKirill A. Shutemov }
2234b46e756fSKirill A. Shutemov 
2235b46e756fSKirill A. Shutemov static void khugepaged_do_scan(void)
2236b46e756fSKirill A. Shutemov {
2237b46e756fSKirill A. Shutemov 	struct page *hpage = NULL;
2238b46e756fSKirill A. Shutemov 	unsigned int progress = 0, pass_through_head = 0;
223989dc6a96SYanfei Xu 	unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
2240b46e756fSKirill A. Shutemov 	bool wait = true;
2241b46e756fSKirill A. Shutemov 
2242a980df33SKirill A. Shutemov 	lru_add_drain_all();
2243a980df33SKirill A. Shutemov 
2244b46e756fSKirill A. Shutemov 	while (progress < pages) {
2245b46e756fSKirill A. Shutemov 		if (!khugepaged_prealloc_page(&hpage, &wait))
2246b46e756fSKirill A. Shutemov 			break;
2247b46e756fSKirill A. Shutemov 
2248b46e756fSKirill A. Shutemov 		cond_resched();
2249b46e756fSKirill A. Shutemov 
2250b46e756fSKirill A. Shutemov 		if (unlikely(kthread_should_stop() || try_to_freeze()))
2251b46e756fSKirill A. Shutemov 			break;
2252b46e756fSKirill A. Shutemov 
2253b46e756fSKirill A. Shutemov 		spin_lock(&khugepaged_mm_lock);
2254b46e756fSKirill A. Shutemov 		if (!khugepaged_scan.mm_slot)
2255b46e756fSKirill A. Shutemov 			pass_through_head++;
2256b46e756fSKirill A. Shutemov 		if (khugepaged_has_work() &&
2257b46e756fSKirill A. Shutemov 		    pass_through_head < 2)
2258b46e756fSKirill A. Shutemov 			progress += khugepaged_scan_mm_slot(pages - progress,
2259b46e756fSKirill A. Shutemov 							    &hpage);
2260b46e756fSKirill A. Shutemov 		else
2261b46e756fSKirill A. Shutemov 			progress = pages;
2262b46e756fSKirill A. Shutemov 		spin_unlock(&khugepaged_mm_lock);
2263b46e756fSKirill A. Shutemov 	}
2264b46e756fSKirill A. Shutemov 
2265b46e756fSKirill A. Shutemov 	if (!IS_ERR_OR_NULL(hpage))
2266b46e756fSKirill A. Shutemov 		put_page(hpage);
2267b46e756fSKirill A. Shutemov }
2268b46e756fSKirill A. Shutemov 
2269b46e756fSKirill A. Shutemov static bool khugepaged_should_wakeup(void)
2270b46e756fSKirill A. Shutemov {
2271b46e756fSKirill A. Shutemov 	return kthread_should_stop() ||
2272b46e756fSKirill A. Shutemov 	       time_after_eq(jiffies, khugepaged_sleep_expire);
2273b46e756fSKirill A. Shutemov }
2274b46e756fSKirill A. Shutemov 
2275b46e756fSKirill A. Shutemov static void khugepaged_wait_work(void)
2276b46e756fSKirill A. Shutemov {
2277b46e756fSKirill A. Shutemov 	if (khugepaged_has_work()) {
2278b46e756fSKirill A. Shutemov 		const unsigned long scan_sleep_jiffies =
2279b46e756fSKirill A. Shutemov 			msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2280b46e756fSKirill A. Shutemov 
2281b46e756fSKirill A. Shutemov 		if (!scan_sleep_jiffies)
2282b46e756fSKirill A. Shutemov 			return;
2283b46e756fSKirill A. Shutemov 
2284b46e756fSKirill A. Shutemov 		khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2285b46e756fSKirill A. Shutemov 		wait_event_freezable_timeout(khugepaged_wait,
2286b46e756fSKirill A. Shutemov 					     khugepaged_should_wakeup(),
2287b46e756fSKirill A. Shutemov 					     scan_sleep_jiffies);
2288b46e756fSKirill A. Shutemov 		return;
2289b46e756fSKirill A. Shutemov 	}
2290b46e756fSKirill A. Shutemov 
2291b46e756fSKirill A. Shutemov 	if (khugepaged_enabled())
2292b46e756fSKirill A. Shutemov 		wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2293b46e756fSKirill A. Shutemov }
2294b46e756fSKirill A. Shutemov 
2295b46e756fSKirill A. Shutemov static int khugepaged(void *none)
2296b46e756fSKirill A. Shutemov {
2297b46e756fSKirill A. Shutemov 	struct mm_slot *mm_slot;
2298b46e756fSKirill A. Shutemov 
2299b46e756fSKirill A. Shutemov 	set_freezable();
2300b46e756fSKirill A. Shutemov 	set_user_nice(current, MAX_NICE);
2301b46e756fSKirill A. Shutemov 
2302b46e756fSKirill A. Shutemov 	while (!kthread_should_stop()) {
2303b46e756fSKirill A. Shutemov 		khugepaged_do_scan();
2304b46e756fSKirill A. Shutemov 		khugepaged_wait_work();
2305b46e756fSKirill A. Shutemov 	}
2306b46e756fSKirill A. Shutemov 
2307b46e756fSKirill A. Shutemov 	spin_lock(&khugepaged_mm_lock);
2308b46e756fSKirill A. Shutemov 	mm_slot = khugepaged_scan.mm_slot;
2309b46e756fSKirill A. Shutemov 	khugepaged_scan.mm_slot = NULL;
2310b46e756fSKirill A. Shutemov 	if (mm_slot)
2311b46e756fSKirill A. Shutemov 		collect_mm_slot(mm_slot);
2312b46e756fSKirill A. Shutemov 	spin_unlock(&khugepaged_mm_lock);
2313b46e756fSKirill A. Shutemov 	return 0;
2314b46e756fSKirill A. Shutemov }
2315b46e756fSKirill A. Shutemov 
2316b46e756fSKirill A. Shutemov static void set_recommended_min_free_kbytes(void)
2317b46e756fSKirill A. Shutemov {
2318b46e756fSKirill A. Shutemov 	struct zone *zone;
2319b46e756fSKirill A. Shutemov 	int nr_zones = 0;
2320b46e756fSKirill A. Shutemov 	unsigned long recommended_min;
2321b46e756fSKirill A. Shutemov 
2322bd3400eaSLiangcai Fan 	if (!khugepaged_enabled()) {
2323bd3400eaSLiangcai Fan 		calculate_min_free_kbytes();
2324bd3400eaSLiangcai Fan 		goto update_wmarks;
2325bd3400eaSLiangcai Fan 	}
2326bd3400eaSLiangcai Fan 
2327b7d349c7SJoonsoo Kim 	for_each_populated_zone(zone) {
2328b7d349c7SJoonsoo Kim 		/*
2329b7d349c7SJoonsoo Kim 		 * We don't need to worry about fragmentation of
2330b7d349c7SJoonsoo Kim 		 * ZONE_MOVABLE since it only has movable pages.
2331b7d349c7SJoonsoo Kim 		 */
2332b7d349c7SJoonsoo Kim 		if (zone_idx(zone) > gfp_zone(GFP_USER))
2333b7d349c7SJoonsoo Kim 			continue;
2334b7d349c7SJoonsoo Kim 
2335b46e756fSKirill A. Shutemov 		nr_zones++;
2336b7d349c7SJoonsoo Kim 	}
2337b46e756fSKirill A. Shutemov 
2338b46e756fSKirill A. Shutemov 	/* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2339b46e756fSKirill A. Shutemov 	recommended_min = pageblock_nr_pages * nr_zones * 2;
2340b46e756fSKirill A. Shutemov 
2341b46e756fSKirill A. Shutemov 	/*
2342b46e756fSKirill A. Shutemov 	 * Make sure that on average at least two pageblocks are almost free
2343b46e756fSKirill A. Shutemov 	 * of another type, one for a migratetype to fall back to and a
2344b46e756fSKirill A. Shutemov 	 * second to avoid subsequent fallbacks of other types There are 3
2345b46e756fSKirill A. Shutemov 	 * MIGRATE_TYPES we care about.
2346b46e756fSKirill A. Shutemov 	 */
2347b46e756fSKirill A. Shutemov 	recommended_min += pageblock_nr_pages * nr_zones *
2348b46e756fSKirill A. Shutemov 			   MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2349b46e756fSKirill A. Shutemov 
2350b46e756fSKirill A. Shutemov 	/* don't ever allow to reserve more than 5% of the lowmem */
2351b46e756fSKirill A. Shutemov 	recommended_min = min(recommended_min,
2352b46e756fSKirill A. Shutemov 			      (unsigned long) nr_free_buffer_pages() / 20);
2353b46e756fSKirill A. Shutemov 	recommended_min <<= (PAGE_SHIFT-10);
2354b46e756fSKirill A. Shutemov 
2355b46e756fSKirill A. Shutemov 	if (recommended_min > min_free_kbytes) {
2356b46e756fSKirill A. Shutemov 		if (user_min_free_kbytes >= 0)
2357b46e756fSKirill A. Shutemov 			pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2358b46e756fSKirill A. Shutemov 				min_free_kbytes, recommended_min);
2359b46e756fSKirill A. Shutemov 
2360b46e756fSKirill A. Shutemov 		min_free_kbytes = recommended_min;
2361b46e756fSKirill A. Shutemov 	}
2362bd3400eaSLiangcai Fan 
2363bd3400eaSLiangcai Fan update_wmarks:
2364b46e756fSKirill A. Shutemov 	setup_per_zone_wmarks();
2365b46e756fSKirill A. Shutemov }
2366b46e756fSKirill A. Shutemov 
2367b46e756fSKirill A. Shutemov int start_stop_khugepaged(void)
2368b46e756fSKirill A. Shutemov {
2369b46e756fSKirill A. Shutemov 	int err = 0;
2370b46e756fSKirill A. Shutemov 
2371b46e756fSKirill A. Shutemov 	mutex_lock(&khugepaged_mutex);
2372b46e756fSKirill A. Shutemov 	if (khugepaged_enabled()) {
2373b46e756fSKirill A. Shutemov 		if (!khugepaged_thread)
2374b46e756fSKirill A. Shutemov 			khugepaged_thread = kthread_run(khugepaged, NULL,
2375b46e756fSKirill A. Shutemov 							"khugepaged");
2376b46e756fSKirill A. Shutemov 		if (IS_ERR(khugepaged_thread)) {
2377b46e756fSKirill A. Shutemov 			pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2378b46e756fSKirill A. Shutemov 			err = PTR_ERR(khugepaged_thread);
2379b46e756fSKirill A. Shutemov 			khugepaged_thread = NULL;
2380b46e756fSKirill A. Shutemov 			goto fail;
2381b46e756fSKirill A. Shutemov 		}
2382b46e756fSKirill A. Shutemov 
2383b46e756fSKirill A. Shutemov 		if (!list_empty(&khugepaged_scan.mm_head))
2384b46e756fSKirill A. Shutemov 			wake_up_interruptible(&khugepaged_wait);
2385b46e756fSKirill A. Shutemov 	} else if (khugepaged_thread) {
2386b46e756fSKirill A. Shutemov 		kthread_stop(khugepaged_thread);
2387b46e756fSKirill A. Shutemov 		khugepaged_thread = NULL;
2388b46e756fSKirill A. Shutemov 	}
2389bd3400eaSLiangcai Fan 	set_recommended_min_free_kbytes();
2390b46e756fSKirill A. Shutemov fail:
2391b46e756fSKirill A. Shutemov 	mutex_unlock(&khugepaged_mutex);
2392b46e756fSKirill A. Shutemov 	return err;
2393b46e756fSKirill A. Shutemov }
23944aab2be0SVijay Balakrishna 
23954aab2be0SVijay Balakrishna void khugepaged_min_free_kbytes_update(void)
23964aab2be0SVijay Balakrishna {
23974aab2be0SVijay Balakrishna 	mutex_lock(&khugepaged_mutex);
23984aab2be0SVijay Balakrishna 	if (khugepaged_enabled() && khugepaged_thread)
23994aab2be0SVijay Balakrishna 		set_recommended_min_free_kbytes();
24004aab2be0SVijay Balakrishna 	mutex_unlock(&khugepaged_mutex);
24014aab2be0SVijay Balakrishna }
2402