xref: /linux/mm/page_isolation.c (revision 3213486f2e442831e324cc6201a2f9e924ecc235)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/mm/page_isolation.c
4  */
5 
6 #include <linux/mm.h>
7 #include <linux/page-isolation.h>
8 #include <linux/pageblock-flags.h>
9 #include <linux/memory.h>
10 #include <linux/hugetlb.h>
11 #include <linux/page_owner.h>
12 #include <linux/migrate.h>
13 #include "internal.h"
14 
15 #define CREATE_TRACE_POINTS
16 #include <trace/events/page_isolation.h>
17 
18 static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags)
19 {
20 	struct zone *zone;
21 	unsigned long flags, pfn;
22 	struct memory_isolate_notify arg;
23 	int notifier_ret;
24 	int ret = -EBUSY;
25 
26 	zone = page_zone(page);
27 
28 	spin_lock_irqsave(&zone->lock, flags);
29 
30 	/*
31 	 * We assume the caller intended to SET migrate type to isolate.
32 	 * If it is already set, then someone else must have raced and
33 	 * set it before us.  Return -EBUSY
34 	 */
35 	if (is_migrate_isolate_page(page))
36 		goto out;
37 
38 	pfn = page_to_pfn(page);
39 	arg.start_pfn = pfn;
40 	arg.nr_pages = pageblock_nr_pages;
41 	arg.pages_found = 0;
42 
43 	/*
44 	 * It may be possible to isolate a pageblock even if the
45 	 * migratetype is not MIGRATE_MOVABLE. The memory isolation
46 	 * notifier chain is used by balloon drivers to return the
47 	 * number of pages in a range that are held by the balloon
48 	 * driver to shrink memory. If all the pages are accounted for
49 	 * by balloons, are free, or on the LRU, isolation can continue.
50 	 * Later, for example, when memory hotplug notifier runs, these
51 	 * pages reported as "can be isolated" should be isolated(freed)
52 	 * by the balloon driver through the memory notifier chain.
53 	 */
54 	notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
55 	notifier_ret = notifier_to_errno(notifier_ret);
56 	if (notifier_ret)
57 		goto out;
58 	/*
59 	 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
60 	 * We just check MOVABLE pages.
61 	 */
62 	if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype,
63 				 isol_flags))
64 		ret = 0;
65 
66 	/*
67 	 * immobile means "not-on-lru" pages. If immobile is larger than
68 	 * removable-by-driver pages reported by notifier, we'll fail.
69 	 */
70 
71 out:
72 	if (!ret) {
73 		unsigned long nr_pages;
74 		int mt = get_pageblock_migratetype(page);
75 
76 		set_pageblock_migratetype(page, MIGRATE_ISOLATE);
77 		zone->nr_isolate_pageblock++;
78 		nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE,
79 									NULL);
80 
81 		__mod_zone_freepage_state(zone, -nr_pages, mt);
82 	}
83 
84 	spin_unlock_irqrestore(&zone->lock, flags);
85 	if (!ret)
86 		drain_all_pages(zone);
87 	return ret;
88 }
89 
90 static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
91 {
92 	struct zone *zone;
93 	unsigned long flags, nr_pages;
94 	bool isolated_page = false;
95 	unsigned int order;
96 	unsigned long pfn, buddy_pfn;
97 	struct page *buddy;
98 
99 	zone = page_zone(page);
100 	spin_lock_irqsave(&zone->lock, flags);
101 	if (!is_migrate_isolate_page(page))
102 		goto out;
103 
104 	/*
105 	 * Because freepage with more than pageblock_order on isolated
106 	 * pageblock is restricted to merge due to freepage counting problem,
107 	 * it is possible that there is free buddy page.
108 	 * move_freepages_block() doesn't care of merge so we need other
109 	 * approach in order to merge them. Isolation and free will make
110 	 * these pages to be merged.
111 	 */
112 	if (PageBuddy(page)) {
113 		order = page_order(page);
114 		if (order >= pageblock_order) {
115 			pfn = page_to_pfn(page);
116 			buddy_pfn = __find_buddy_pfn(pfn, order);
117 			buddy = page + (buddy_pfn - pfn);
118 
119 			if (pfn_valid_within(buddy_pfn) &&
120 			    !is_migrate_isolate_page(buddy)) {
121 				__isolate_free_page(page, order);
122 				isolated_page = true;
123 			}
124 		}
125 	}
126 
127 	/*
128 	 * If we isolate freepage with more than pageblock_order, there
129 	 * should be no freepage in the range, so we could avoid costly
130 	 * pageblock scanning for freepage moving.
131 	 */
132 	if (!isolated_page) {
133 		nr_pages = move_freepages_block(zone, page, migratetype, NULL);
134 		__mod_zone_freepage_state(zone, nr_pages, migratetype);
135 	}
136 	set_pageblock_migratetype(page, migratetype);
137 	zone->nr_isolate_pageblock--;
138 out:
139 	spin_unlock_irqrestore(&zone->lock, flags);
140 	if (isolated_page) {
141 		post_alloc_hook(page, order, __GFP_MOVABLE);
142 		__free_pages(page, order);
143 	}
144 }
145 
146 static inline struct page *
147 __first_valid_page(unsigned long pfn, unsigned long nr_pages)
148 {
149 	int i;
150 
151 	for (i = 0; i < nr_pages; i++) {
152 		struct page *page;
153 
154 		if (!pfn_valid_within(pfn + i))
155 			continue;
156 		page = pfn_to_online_page(pfn + i);
157 		if (!page)
158 			continue;
159 		return page;
160 	}
161 	return NULL;
162 }
163 
164 /**
165  * start_isolate_page_range() - make page-allocation-type of range of pages to
166  * be MIGRATE_ISOLATE.
167  * @start_pfn:		The lower PFN of the range to be isolated.
168  * @end_pfn:		The upper PFN of the range to be isolated.
169  *			start_pfn/end_pfn must be aligned to pageblock_order.
170  * @migratetype:	Migrate type to set in error recovery.
171  * @flags:		The following flags are allowed (they can be combined in
172  *			a bit mask)
173  *			SKIP_HWPOISON - ignore hwpoison pages
174  *			REPORT_FAILURE - report details about the failure to
175  *			isolate the range
176  *
177  * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
178  * the range will never be allocated. Any free pages and pages freed in the
179  * future will not be allocated again. If specified range includes migrate types
180  * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all
181  * pages in the range finally, the caller have to free all pages in the range.
182  * test_page_isolated() can be used for test it.
183  *
184  * There is no high level synchronization mechanism that prevents two threads
185  * from trying to isolate overlapping ranges. If this happens, one thread
186  * will notice pageblocks in the overlapping range already set to isolate.
187  * This happens in set_migratetype_isolate, and set_migratetype_isolate
188  * returns an error. We then clean up by restoring the migration type on
189  * pageblocks we may have modified and return -EBUSY to caller. This
190  * prevents two threads from simultaneously working on overlapping ranges.
191  *
192  * Return: the number of isolated pageblocks on success and -EBUSY if any part
193  * of range cannot be isolated.
194  */
195 int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
196 			     unsigned migratetype, int flags)
197 {
198 	unsigned long pfn;
199 	unsigned long undo_pfn;
200 	struct page *page;
201 	int nr_isolate_pageblock = 0;
202 
203 	BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
204 	BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
205 
206 	for (pfn = start_pfn;
207 	     pfn < end_pfn;
208 	     pfn += pageblock_nr_pages) {
209 		page = __first_valid_page(pfn, pageblock_nr_pages);
210 		if (page) {
211 			if (set_migratetype_isolate(page, migratetype, flags)) {
212 				undo_pfn = pfn;
213 				goto undo;
214 			}
215 			nr_isolate_pageblock++;
216 		}
217 	}
218 	return nr_isolate_pageblock;
219 undo:
220 	for (pfn = start_pfn;
221 	     pfn < undo_pfn;
222 	     pfn += pageblock_nr_pages) {
223 		struct page *page = pfn_to_online_page(pfn);
224 		if (!page)
225 			continue;
226 		unset_migratetype_isolate(page, migratetype);
227 	}
228 
229 	return -EBUSY;
230 }
231 
232 /*
233  * Make isolated pages available again.
234  */
235 int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
236 			    unsigned migratetype)
237 {
238 	unsigned long pfn;
239 	struct page *page;
240 
241 	BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
242 	BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
243 
244 	for (pfn = start_pfn;
245 	     pfn < end_pfn;
246 	     pfn += pageblock_nr_pages) {
247 		page = __first_valid_page(pfn, pageblock_nr_pages);
248 		if (!page || !is_migrate_isolate_page(page))
249 			continue;
250 		unset_migratetype_isolate(page, migratetype);
251 	}
252 	return 0;
253 }
254 /*
255  * Test all pages in the range is free(means isolated) or not.
256  * all pages in [start_pfn...end_pfn) must be in the same zone.
257  * zone->lock must be held before call this.
258  *
259  * Returns the last tested pfn.
260  */
261 static unsigned long
262 __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
263 				  bool skip_hwpoisoned_pages)
264 {
265 	struct page *page;
266 
267 	while (pfn < end_pfn) {
268 		if (!pfn_valid_within(pfn)) {
269 			pfn++;
270 			continue;
271 		}
272 		page = pfn_to_page(pfn);
273 		if (PageBuddy(page))
274 			/*
275 			 * If the page is on a free list, it has to be on
276 			 * the correct MIGRATE_ISOLATE freelist. There is no
277 			 * simple way to verify that as VM_BUG_ON(), though.
278 			 */
279 			pfn += 1 << page_order(page);
280 		else if (skip_hwpoisoned_pages && PageHWPoison(page))
281 			/* A HWPoisoned page cannot be also PageBuddy */
282 			pfn++;
283 		else
284 			break;
285 	}
286 
287 	return pfn;
288 }
289 
290 /* Caller should ensure that requested range is in a single zone */
291 int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
292 			bool skip_hwpoisoned_pages)
293 {
294 	unsigned long pfn, flags;
295 	struct page *page;
296 	struct zone *zone;
297 
298 	/*
299 	 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
300 	 * are not aligned to pageblock_nr_pages.
301 	 * Then we just check migratetype first.
302 	 */
303 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
304 		page = __first_valid_page(pfn, pageblock_nr_pages);
305 		if (page && !is_migrate_isolate_page(page))
306 			break;
307 	}
308 	page = __first_valid_page(start_pfn, end_pfn - start_pfn);
309 	if ((pfn < end_pfn) || !page)
310 		return -EBUSY;
311 	/* Check all pages are free or marked as ISOLATED */
312 	zone = page_zone(page);
313 	spin_lock_irqsave(&zone->lock, flags);
314 	pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
315 						skip_hwpoisoned_pages);
316 	spin_unlock_irqrestore(&zone->lock, flags);
317 
318 	trace_test_pages_isolated(start_pfn, end_pfn, pfn);
319 
320 	return pfn < end_pfn ? -EBUSY : 0;
321 }
322 
323 struct page *alloc_migrate_target(struct page *page, unsigned long private)
324 {
325 	return new_page_nodemask(page, numa_node_id(), &node_states[N_MEMORY]);
326 }
327