xref: /linux/mm/page_isolation.c (revision b889fcf63cb62e7fdb7816565e28f44dbe4a76a5)
1 /*
2  * linux/mm/page_isolation.c
3  */
4 
5 #include <linux/mm.h>
6 #include <linux/page-isolation.h>
7 #include <linux/pageblock-flags.h>
8 #include <linux/memory.h>
9 #include "internal.h"
10 
11 /* called while holding zone->lock */
12 static void set_pageblock_isolate(struct page *page)
13 {
14 	if (get_pageblock_migratetype(page) == MIGRATE_ISOLATE)
15 		return;
16 
17 	set_pageblock_migratetype(page, MIGRATE_ISOLATE);
18 	page_zone(page)->nr_pageblock_isolate++;
19 }
20 
21 /* called while holding zone->lock */
22 static void restore_pageblock_isolate(struct page *page, int migratetype)
23 {
24 	struct zone *zone = page_zone(page);
25 	if (WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE))
26 		return;
27 
28 	BUG_ON(zone->nr_pageblock_isolate <= 0);
29 	set_pageblock_migratetype(page, migratetype);
30 	zone->nr_pageblock_isolate--;
31 }
32 
33 int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages)
34 {
35 	struct zone *zone;
36 	unsigned long flags, pfn;
37 	struct memory_isolate_notify arg;
38 	int notifier_ret;
39 	int ret = -EBUSY;
40 
41 	zone = page_zone(page);
42 
43 	spin_lock_irqsave(&zone->lock, flags);
44 
45 	pfn = page_to_pfn(page);
46 	arg.start_pfn = pfn;
47 	arg.nr_pages = pageblock_nr_pages;
48 	arg.pages_found = 0;
49 
50 	/*
51 	 * It may be possible to isolate a pageblock even if the
52 	 * migratetype is not MIGRATE_MOVABLE. The memory isolation
53 	 * notifier chain is used by balloon drivers to return the
54 	 * number of pages in a range that are held by the balloon
55 	 * driver to shrink memory. If all the pages are accounted for
56 	 * by balloons, are free, or on the LRU, isolation can continue.
57 	 * Later, for example, when memory hotplug notifier runs, these
58 	 * pages reported as "can be isolated" should be isolated(freed)
59 	 * by the balloon driver through the memory notifier chain.
60 	 */
61 	notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
62 	notifier_ret = notifier_to_errno(notifier_ret);
63 	if (notifier_ret)
64 		goto out;
65 	/*
66 	 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
67 	 * We just check MOVABLE pages.
68 	 */
69 	if (!has_unmovable_pages(zone, page, arg.pages_found,
70 				 skip_hwpoisoned_pages))
71 		ret = 0;
72 
73 	/*
74 	 * immobile means "not-on-lru" paes. If immobile is larger than
75 	 * removable-by-driver pages reported by notifier, we'll fail.
76 	 */
77 
78 out:
79 	if (!ret) {
80 		unsigned long nr_pages;
81 		int migratetype = get_pageblock_migratetype(page);
82 
83 		set_pageblock_isolate(page);
84 		nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE);
85 
86 		__mod_zone_freepage_state(zone, -nr_pages, migratetype);
87 	}
88 
89 	spin_unlock_irqrestore(&zone->lock, flags);
90 	if (!ret)
91 		drain_all_pages();
92 	return ret;
93 }
94 
95 void unset_migratetype_isolate(struct page *page, unsigned migratetype)
96 {
97 	struct zone *zone;
98 	unsigned long flags, nr_pages;
99 
100 	zone = page_zone(page);
101 	spin_lock_irqsave(&zone->lock, flags);
102 	if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
103 		goto out;
104 	nr_pages = move_freepages_block(zone, page, migratetype);
105 	__mod_zone_freepage_state(zone, nr_pages, migratetype);
106 	restore_pageblock_isolate(page, migratetype);
107 out:
108 	spin_unlock_irqrestore(&zone->lock, flags);
109 }
110 
111 static inline struct page *
112 __first_valid_page(unsigned long pfn, unsigned long nr_pages)
113 {
114 	int i;
115 	for (i = 0; i < nr_pages; i++)
116 		if (pfn_valid_within(pfn + i))
117 			break;
118 	if (unlikely(i == nr_pages))
119 		return NULL;
120 	return pfn_to_page(pfn + i);
121 }
122 
123 /*
124  * start_isolate_page_range() -- make page-allocation-type of range of pages
125  * to be MIGRATE_ISOLATE.
126  * @start_pfn: The lower PFN of the range to be isolated.
127  * @end_pfn: The upper PFN of the range to be isolated.
128  * @migratetype: migrate type to set in error recovery.
129  *
130  * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
131  * the range will never be allocated. Any free pages and pages freed in the
132  * future will not be allocated again.
133  *
134  * start_pfn/end_pfn must be aligned to pageblock_order.
135  * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
136  */
137 int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
138 			     unsigned migratetype, bool skip_hwpoisoned_pages)
139 {
140 	unsigned long pfn;
141 	unsigned long undo_pfn;
142 	struct page *page;
143 
144 	BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
145 	BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
146 
147 	for (pfn = start_pfn;
148 	     pfn < end_pfn;
149 	     pfn += pageblock_nr_pages) {
150 		page = __first_valid_page(pfn, pageblock_nr_pages);
151 		if (page &&
152 		    set_migratetype_isolate(page, skip_hwpoisoned_pages)) {
153 			undo_pfn = pfn;
154 			goto undo;
155 		}
156 	}
157 	return 0;
158 undo:
159 	for (pfn = start_pfn;
160 	     pfn < undo_pfn;
161 	     pfn += pageblock_nr_pages)
162 		unset_migratetype_isolate(pfn_to_page(pfn), migratetype);
163 
164 	return -EBUSY;
165 }
166 
167 /*
168  * Make isolated pages available again.
169  */
170 int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
171 			    unsigned migratetype)
172 {
173 	unsigned long pfn;
174 	struct page *page;
175 	BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
176 	BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
177 	for (pfn = start_pfn;
178 	     pfn < end_pfn;
179 	     pfn += pageblock_nr_pages) {
180 		page = __first_valid_page(pfn, pageblock_nr_pages);
181 		if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
182 			continue;
183 		unset_migratetype_isolate(page, migratetype);
184 	}
185 	return 0;
186 }
187 /*
188  * Test all pages in the range is free(means isolated) or not.
189  * all pages in [start_pfn...end_pfn) must be in the same zone.
190  * zone->lock must be held before call this.
191  *
192  * Returns 1 if all pages in the range are isolated.
193  */
194 static int
195 __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
196 				  bool skip_hwpoisoned_pages)
197 {
198 	struct page *page;
199 
200 	while (pfn < end_pfn) {
201 		if (!pfn_valid_within(pfn)) {
202 			pfn++;
203 			continue;
204 		}
205 		page = pfn_to_page(pfn);
206 		if (PageBuddy(page)) {
207 			/*
208 			 * If race between isolatation and allocation happens,
209 			 * some free pages could be in MIGRATE_MOVABLE list
210 			 * although pageblock's migratation type of the page
211 			 * is MIGRATE_ISOLATE. Catch it and move the page into
212 			 * MIGRATE_ISOLATE list.
213 			 */
214 			if (get_freepage_migratetype(page) != MIGRATE_ISOLATE) {
215 				struct page *end_page;
216 
217 				end_page = page + (1 << page_order(page)) - 1;
218 				move_freepages(page_zone(page), page, end_page,
219 						MIGRATE_ISOLATE);
220 			}
221 			pfn += 1 << page_order(page);
222 		}
223 		else if (page_count(page) == 0 &&
224 			get_freepage_migratetype(page) == MIGRATE_ISOLATE)
225 			pfn += 1;
226 		else if (skip_hwpoisoned_pages && PageHWPoison(page)) {
227 			/*
228 			 * The HWPoisoned page may be not in buddy
229 			 * system, and page_count() is not 0.
230 			 */
231 			pfn++;
232 			continue;
233 		}
234 		else
235 			break;
236 	}
237 	if (pfn < end_pfn)
238 		return 0;
239 	return 1;
240 }
241 
242 int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
243 			bool skip_hwpoisoned_pages)
244 {
245 	unsigned long pfn, flags;
246 	struct page *page;
247 	struct zone *zone;
248 	int ret;
249 
250 	/*
251 	 * Note: pageblock_nr_page != MAX_ORDER. Then, chunks of free page
252 	 * is not aligned to pageblock_nr_pages.
253 	 * Then we just check pagetype fist.
254 	 */
255 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
256 		page = __first_valid_page(pfn, pageblock_nr_pages);
257 		if (page && get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
258 			break;
259 	}
260 	page = __first_valid_page(start_pfn, end_pfn - start_pfn);
261 	if ((pfn < end_pfn) || !page)
262 		return -EBUSY;
263 	/* Check all pages are free or Marked as ISOLATED */
264 	zone = page_zone(page);
265 	spin_lock_irqsave(&zone->lock, flags);
266 	ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
267 						skip_hwpoisoned_pages);
268 	spin_unlock_irqrestore(&zone->lock, flags);
269 	return ret ? 0 : -EBUSY;
270 }
271 
272 struct page *alloc_migrate_target(struct page *page, unsigned long private,
273 				  int **resultp)
274 {
275 	gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
276 
277 	if (PageHighMem(page))
278 		gfp_mask |= __GFP_HIGHMEM;
279 
280 	return alloc_page(gfp_mask);
281 }
282