xref: /linux/mm/compaction.c (revision 010fc29a45a2e8dbc08bf45ef80b8622619aaae0)
1748446bbSMel Gorman /*
2748446bbSMel Gorman  * linux/mm/compaction.c
3748446bbSMel Gorman  *
4748446bbSMel Gorman  * Memory compaction for the reduction of external fragmentation. Note that
5748446bbSMel Gorman  * this heavily depends upon page migration to do all the real heavy
6748446bbSMel Gorman  * lifting
7748446bbSMel Gorman  *
8748446bbSMel Gorman  * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
9748446bbSMel Gorman  */
10748446bbSMel Gorman #include <linux/swap.h>
11748446bbSMel Gorman #include <linux/migrate.h>
12748446bbSMel Gorman #include <linux/compaction.h>
13748446bbSMel Gorman #include <linux/mm_inline.h>
14748446bbSMel Gorman #include <linux/backing-dev.h>
1576ab0f53SMel Gorman #include <linux/sysctl.h>
16ed4a6d7fSMel Gorman #include <linux/sysfs.h>
17bf6bddf1SRafael Aquini #include <linux/balloon_compaction.h>
18748446bbSMel Gorman #include "internal.h"
19748446bbSMel Gorman 
20*010fc29aSMinchan Kim #ifdef CONFIG_COMPACTION
21*010fc29aSMinchan Kim static inline void count_compact_event(enum vm_event_item item)
22*010fc29aSMinchan Kim {
23*010fc29aSMinchan Kim 	count_vm_event(item);
24*010fc29aSMinchan Kim }
25*010fc29aSMinchan Kim 
26*010fc29aSMinchan Kim static inline void count_compact_events(enum vm_event_item item, long delta)
27*010fc29aSMinchan Kim {
28*010fc29aSMinchan Kim 	count_vm_events(item, delta);
29*010fc29aSMinchan Kim }
30*010fc29aSMinchan Kim #else
31*010fc29aSMinchan Kim #define count_compact_event(item) do { } while (0)
32*010fc29aSMinchan Kim #define count_compact_events(item, delta) do { } while (0)
33*010fc29aSMinchan Kim #endif
34*010fc29aSMinchan Kim 
35ff9543fdSMichal Nazarewicz #if defined CONFIG_COMPACTION || defined CONFIG_CMA
36ff9543fdSMichal Nazarewicz 
37b7aba698SMel Gorman #define CREATE_TRACE_POINTS
38b7aba698SMel Gorman #include <trace/events/compaction.h>
39b7aba698SMel Gorman 
40748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist)
41748446bbSMel Gorman {
42748446bbSMel Gorman 	struct page *page, *next;
43748446bbSMel Gorman 	unsigned long count = 0;
44748446bbSMel Gorman 
45748446bbSMel Gorman 	list_for_each_entry_safe(page, next, freelist, lru) {
46748446bbSMel Gorman 		list_del(&page->lru);
47748446bbSMel Gorman 		__free_page(page);
48748446bbSMel Gorman 		count++;
49748446bbSMel Gorman 	}
50748446bbSMel Gorman 
51748446bbSMel Gorman 	return count;
52748446bbSMel Gorman }
53748446bbSMel Gorman 
54ff9543fdSMichal Nazarewicz static void map_pages(struct list_head *list)
55ff9543fdSMichal Nazarewicz {
56ff9543fdSMichal Nazarewicz 	struct page *page;
57ff9543fdSMichal Nazarewicz 
58ff9543fdSMichal Nazarewicz 	list_for_each_entry(page, list, lru) {
59ff9543fdSMichal Nazarewicz 		arch_alloc_page(page, 0);
60ff9543fdSMichal Nazarewicz 		kernel_map_pages(page, 1, 1);
61ff9543fdSMichal Nazarewicz 	}
62ff9543fdSMichal Nazarewicz }
63ff9543fdSMichal Nazarewicz 
6447118af0SMichal Nazarewicz static inline bool migrate_async_suitable(int migratetype)
6547118af0SMichal Nazarewicz {
6647118af0SMichal Nazarewicz 	return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
6747118af0SMichal Nazarewicz }
6847118af0SMichal Nazarewicz 
69bb13ffebSMel Gorman #ifdef CONFIG_COMPACTION
70bb13ffebSMel Gorman /* Returns true if the pageblock should be scanned for pages to isolate. */
71bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc,
72bb13ffebSMel Gorman 					struct page *page)
73bb13ffebSMel Gorman {
74bb13ffebSMel Gorman 	if (cc->ignore_skip_hint)
75bb13ffebSMel Gorman 		return true;
76bb13ffebSMel Gorman 
77bb13ffebSMel Gorman 	return !get_pageblock_skip(page);
78bb13ffebSMel Gorman }
79bb13ffebSMel Gorman 
80bb13ffebSMel Gorman /*
81bb13ffebSMel Gorman  * This function is called to clear all cached information on pageblocks that
82bb13ffebSMel Gorman  * should be skipped for page isolation when the migrate and free page scanner
83bb13ffebSMel Gorman  * meet.
84bb13ffebSMel Gorman  */
8562997027SMel Gorman static void __reset_isolation_suitable(struct zone *zone)
86bb13ffebSMel Gorman {
87bb13ffebSMel Gorman 	unsigned long start_pfn = zone->zone_start_pfn;
88bb13ffebSMel Gorman 	unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages;
89bb13ffebSMel Gorman 	unsigned long pfn;
90bb13ffebSMel Gorman 
91c89511abSMel Gorman 	zone->compact_cached_migrate_pfn = start_pfn;
92c89511abSMel Gorman 	zone->compact_cached_free_pfn = end_pfn;
9362997027SMel Gorman 	zone->compact_blockskip_flush = false;
94bb13ffebSMel Gorman 
95bb13ffebSMel Gorman 	/* Walk the zone and mark every pageblock as suitable for isolation */
96bb13ffebSMel Gorman 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
97bb13ffebSMel Gorman 		struct page *page;
98bb13ffebSMel Gorman 
99bb13ffebSMel Gorman 		cond_resched();
100bb13ffebSMel Gorman 
101bb13ffebSMel Gorman 		if (!pfn_valid(pfn))
102bb13ffebSMel Gorman 			continue;
103bb13ffebSMel Gorman 
104bb13ffebSMel Gorman 		page = pfn_to_page(pfn);
105bb13ffebSMel Gorman 		if (zone != page_zone(page))
106bb13ffebSMel Gorman 			continue;
107bb13ffebSMel Gorman 
108bb13ffebSMel Gorman 		clear_pageblock_skip(page);
109bb13ffebSMel Gorman 	}
110bb13ffebSMel Gorman }
111bb13ffebSMel Gorman 
11262997027SMel Gorman void reset_isolation_suitable(pg_data_t *pgdat)
11362997027SMel Gorman {
11462997027SMel Gorman 	int zoneid;
11562997027SMel Gorman 
11662997027SMel Gorman 	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
11762997027SMel Gorman 		struct zone *zone = &pgdat->node_zones[zoneid];
11862997027SMel Gorman 		if (!populated_zone(zone))
11962997027SMel Gorman 			continue;
12062997027SMel Gorman 
12162997027SMel Gorman 		/* Only flush if a full compaction finished recently */
12262997027SMel Gorman 		if (zone->compact_blockskip_flush)
12362997027SMel Gorman 			__reset_isolation_suitable(zone);
12462997027SMel Gorman 	}
12562997027SMel Gorman }
12662997027SMel Gorman 
127bb13ffebSMel Gorman /*
128bb13ffebSMel Gorman  * If no pages were isolated then mark this pageblock to be skipped in the
12962997027SMel Gorman  * future. The information is later cleared by __reset_isolation_suitable().
130bb13ffebSMel Gorman  */
131c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc,
132c89511abSMel Gorman 			struct page *page, unsigned long nr_isolated,
133c89511abSMel Gorman 			bool migrate_scanner)
134bb13ffebSMel Gorman {
135c89511abSMel Gorman 	struct zone *zone = cc->zone;
136bb13ffebSMel Gorman 	if (!page)
137bb13ffebSMel Gorman 		return;
138bb13ffebSMel Gorman 
139c89511abSMel Gorman 	if (!nr_isolated) {
140c89511abSMel Gorman 		unsigned long pfn = page_to_pfn(page);
141bb13ffebSMel Gorman 		set_pageblock_skip(page);
142c89511abSMel Gorman 
143c89511abSMel Gorman 		/* Update where compaction should restart */
144c89511abSMel Gorman 		if (migrate_scanner) {
145c89511abSMel Gorman 			if (!cc->finished_update_migrate &&
146c89511abSMel Gorman 			    pfn > zone->compact_cached_migrate_pfn)
147c89511abSMel Gorman 				zone->compact_cached_migrate_pfn = pfn;
148c89511abSMel Gorman 		} else {
149c89511abSMel Gorman 			if (!cc->finished_update_free &&
150c89511abSMel Gorman 			    pfn < zone->compact_cached_free_pfn)
151c89511abSMel Gorman 				zone->compact_cached_free_pfn = pfn;
152c89511abSMel Gorman 		}
153c89511abSMel Gorman 	}
154bb13ffebSMel Gorman }
155bb13ffebSMel Gorman #else
156bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc,
157bb13ffebSMel Gorman 					struct page *page)
158bb13ffebSMel Gorman {
159bb13ffebSMel Gorman 	return true;
160bb13ffebSMel Gorman }
161bb13ffebSMel Gorman 
162c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc,
163c89511abSMel Gorman 			struct page *page, unsigned long nr_isolated,
164c89511abSMel Gorman 			bool migrate_scanner)
165bb13ffebSMel Gorman {
166bb13ffebSMel Gorman }
167bb13ffebSMel Gorman #endif /* CONFIG_COMPACTION */
168bb13ffebSMel Gorman 
1692a1402aaSMel Gorman static inline bool should_release_lock(spinlock_t *lock)
1702a1402aaSMel Gorman {
1712a1402aaSMel Gorman 	return need_resched() || spin_is_contended(lock);
1722a1402aaSMel Gorman }
1732a1402aaSMel Gorman 
17485aa125fSMichal Nazarewicz /*
175c67fe375SMel Gorman  * Compaction requires the taking of some coarse locks that are potentially
176c67fe375SMel Gorman  * very heavily contended. Check if the process needs to be scheduled or
177c67fe375SMel Gorman  * if the lock is contended. For async compaction, back out in the event
178c67fe375SMel Gorman  * if contention is severe. For sync compaction, schedule.
179c67fe375SMel Gorman  *
180c67fe375SMel Gorman  * Returns true if the lock is held.
181c67fe375SMel Gorman  * Returns false if the lock is released and compaction should abort
182c67fe375SMel Gorman  */
183c67fe375SMel Gorman static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
184c67fe375SMel Gorman 				      bool locked, struct compact_control *cc)
185c67fe375SMel Gorman {
1862a1402aaSMel Gorman 	if (should_release_lock(lock)) {
187c67fe375SMel Gorman 		if (locked) {
188c67fe375SMel Gorman 			spin_unlock_irqrestore(lock, *flags);
189c67fe375SMel Gorman 			locked = false;
190c67fe375SMel Gorman 		}
191c67fe375SMel Gorman 
192c67fe375SMel Gorman 		/* async aborts if taking too long or contended */
193c67fe375SMel Gorman 		if (!cc->sync) {
194e64c5237SShaohua Li 			cc->contended = true;
195c67fe375SMel Gorman 			return false;
196c67fe375SMel Gorman 		}
197c67fe375SMel Gorman 
198c67fe375SMel Gorman 		cond_resched();
199c67fe375SMel Gorman 	}
200c67fe375SMel Gorman 
201c67fe375SMel Gorman 	if (!locked)
202c67fe375SMel Gorman 		spin_lock_irqsave(lock, *flags);
203c67fe375SMel Gorman 	return true;
204c67fe375SMel Gorman }
205c67fe375SMel Gorman 
206c67fe375SMel Gorman static inline bool compact_trylock_irqsave(spinlock_t *lock,
207c67fe375SMel Gorman 			unsigned long *flags, struct compact_control *cc)
208c67fe375SMel Gorman {
209c67fe375SMel Gorman 	return compact_checklock_irqsave(lock, flags, false, cc);
210c67fe375SMel Gorman }
211c67fe375SMel Gorman 
212f40d1e42SMel Gorman /* Returns true if the page is within a block suitable for migration to */
213f40d1e42SMel Gorman static bool suitable_migration_target(struct page *page)
214f40d1e42SMel Gorman {
215f40d1e42SMel Gorman 	int migratetype = get_pageblock_migratetype(page);
216f40d1e42SMel Gorman 
217f40d1e42SMel Gorman 	/* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
218f40d1e42SMel Gorman 	if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
219f40d1e42SMel Gorman 		return false;
220f40d1e42SMel Gorman 
221f40d1e42SMel Gorman 	/* If the page is a large free page, then allow migration */
222f40d1e42SMel Gorman 	if (PageBuddy(page) && page_order(page) >= pageblock_order)
223f40d1e42SMel Gorman 		return true;
224f40d1e42SMel Gorman 
225f40d1e42SMel Gorman 	/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
226f40d1e42SMel Gorman 	if (migrate_async_suitable(migratetype))
227f40d1e42SMel Gorman 		return true;
228f40d1e42SMel Gorman 
229f40d1e42SMel Gorman 	/* Otherwise skip the block */
230f40d1e42SMel Gorman 	return false;
231f40d1e42SMel Gorman }
232f40d1e42SMel Gorman 
233c67fe375SMel Gorman /*
23485aa125fSMichal Nazarewicz  * Isolate free pages onto a private freelist. Caller must hold zone->lock.
23585aa125fSMichal Nazarewicz  * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
23685aa125fSMichal Nazarewicz  * pages inside of the pageblock (even though it may still end up isolating
23785aa125fSMichal Nazarewicz  * some pages).
23885aa125fSMichal Nazarewicz  */
239f40d1e42SMel Gorman static unsigned long isolate_freepages_block(struct compact_control *cc,
240f40d1e42SMel Gorman 				unsigned long blockpfn,
24185aa125fSMichal Nazarewicz 				unsigned long end_pfn,
24285aa125fSMichal Nazarewicz 				struct list_head *freelist,
24385aa125fSMichal Nazarewicz 				bool strict)
244748446bbSMel Gorman {
245b7aba698SMel Gorman 	int nr_scanned = 0, total_isolated = 0;
246bb13ffebSMel Gorman 	struct page *cursor, *valid_page = NULL;
247f40d1e42SMel Gorman 	unsigned long nr_strict_required = end_pfn - blockpfn;
248f40d1e42SMel Gorman 	unsigned long flags;
249f40d1e42SMel Gorman 	bool locked = false;
250748446bbSMel Gorman 
251748446bbSMel Gorman 	cursor = pfn_to_page(blockpfn);
252748446bbSMel Gorman 
253f40d1e42SMel Gorman 	/* Isolate free pages. */
254748446bbSMel Gorman 	for (; blockpfn < end_pfn; blockpfn++, cursor++) {
255748446bbSMel Gorman 		int isolated, i;
256748446bbSMel Gorman 		struct page *page = cursor;
257748446bbSMel Gorman 
258b7aba698SMel Gorman 		nr_scanned++;
259f40d1e42SMel Gorman 		if (!pfn_valid_within(blockpfn))
260748446bbSMel Gorman 			continue;
261bb13ffebSMel Gorman 		if (!valid_page)
262bb13ffebSMel Gorman 			valid_page = page;
263f40d1e42SMel Gorman 		if (!PageBuddy(page))
264f40d1e42SMel Gorman 			continue;
265f40d1e42SMel Gorman 
266f40d1e42SMel Gorman 		/*
267f40d1e42SMel Gorman 		 * The zone lock must be held to isolate freepages.
268f40d1e42SMel Gorman 		 * Unfortunately this is a very coarse lock and can be
269f40d1e42SMel Gorman 		 * heavily contended if there are parallel allocations
270f40d1e42SMel Gorman 		 * or parallel compactions. For async compaction do not
271f40d1e42SMel Gorman 		 * spin on the lock and we acquire the lock as late as
272f40d1e42SMel Gorman 		 * possible.
273f40d1e42SMel Gorman 		 */
274f40d1e42SMel Gorman 		locked = compact_checklock_irqsave(&cc->zone->lock, &flags,
275f40d1e42SMel Gorman 								locked, cc);
276f40d1e42SMel Gorman 		if (!locked)
277f40d1e42SMel Gorman 			break;
278f40d1e42SMel Gorman 
279f40d1e42SMel Gorman 		/* Recheck this is a suitable migration target under lock */
280f40d1e42SMel Gorman 		if (!strict && !suitable_migration_target(page))
281f40d1e42SMel Gorman 			break;
282f40d1e42SMel Gorman 
283f40d1e42SMel Gorman 		/* Recheck this is a buddy page under lock */
284f40d1e42SMel Gorman 		if (!PageBuddy(page))
285f40d1e42SMel Gorman 			continue;
286748446bbSMel Gorman 
287748446bbSMel Gorman 		/* Found a free page, break it into order-0 pages */
288748446bbSMel Gorman 		isolated = split_free_page(page);
28985aa125fSMichal Nazarewicz 		if (!isolated && strict)
290f40d1e42SMel Gorman 			break;
291748446bbSMel Gorman 		total_isolated += isolated;
292748446bbSMel Gorman 		for (i = 0; i < isolated; i++) {
293748446bbSMel Gorman 			list_add(&page->lru, freelist);
294748446bbSMel Gorman 			page++;
295748446bbSMel Gorman 		}
296748446bbSMel Gorman 
297748446bbSMel Gorman 		/* If a page was split, advance to the end of it */
298748446bbSMel Gorman 		if (isolated) {
299748446bbSMel Gorman 			blockpfn += isolated - 1;
300748446bbSMel Gorman 			cursor += isolated - 1;
301748446bbSMel Gorman 		}
302748446bbSMel Gorman 	}
303748446bbSMel Gorman 
304b7aba698SMel Gorman 	trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
305f40d1e42SMel Gorman 
306f40d1e42SMel Gorman 	/*
307f40d1e42SMel Gorman 	 * If strict isolation is requested by CMA then check that all the
308f40d1e42SMel Gorman 	 * pages requested were isolated. If there were any failures, 0 is
309f40d1e42SMel Gorman 	 * returned and CMA will fail.
310f40d1e42SMel Gorman 	 */
3110db63d7eSMel Gorman 	if (strict && nr_strict_required > total_isolated)
312f40d1e42SMel Gorman 		total_isolated = 0;
313f40d1e42SMel Gorman 
314f40d1e42SMel Gorman 	if (locked)
315f40d1e42SMel Gorman 		spin_unlock_irqrestore(&cc->zone->lock, flags);
316f40d1e42SMel Gorman 
317bb13ffebSMel Gorman 	/* Update the pageblock-skip if the whole pageblock was scanned */
318bb13ffebSMel Gorman 	if (blockpfn == end_pfn)
319c89511abSMel Gorman 		update_pageblock_skip(cc, valid_page, total_isolated, false);
320bb13ffebSMel Gorman 
321*010fc29aSMinchan Kim 	count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
322397487dbSMel Gorman 	if (total_isolated)
323*010fc29aSMinchan Kim 		count_compact_events(COMPACTISOLATED, total_isolated);
324748446bbSMel Gorman 	return total_isolated;
325748446bbSMel Gorman }
326748446bbSMel Gorman 
32785aa125fSMichal Nazarewicz /**
32885aa125fSMichal Nazarewicz  * isolate_freepages_range() - isolate free pages.
32985aa125fSMichal Nazarewicz  * @start_pfn: The first PFN to start isolating.
33085aa125fSMichal Nazarewicz  * @end_pfn:   The one-past-last PFN.
33185aa125fSMichal Nazarewicz  *
33285aa125fSMichal Nazarewicz  * Non-free pages, invalid PFNs, or zone boundaries within the
33385aa125fSMichal Nazarewicz  * [start_pfn, end_pfn) range are considered errors, cause function to
33485aa125fSMichal Nazarewicz  * undo its actions and return zero.
33585aa125fSMichal Nazarewicz  *
33685aa125fSMichal Nazarewicz  * Otherwise, function returns one-past-the-last PFN of isolated page
33785aa125fSMichal Nazarewicz  * (which may be greater then end_pfn if end fell in a middle of
33885aa125fSMichal Nazarewicz  * a free page).
33985aa125fSMichal Nazarewicz  */
340ff9543fdSMichal Nazarewicz unsigned long
341bb13ffebSMel Gorman isolate_freepages_range(struct compact_control *cc,
342bb13ffebSMel Gorman 			unsigned long start_pfn, unsigned long end_pfn)
34385aa125fSMichal Nazarewicz {
344f40d1e42SMel Gorman 	unsigned long isolated, pfn, block_end_pfn;
34585aa125fSMichal Nazarewicz 	LIST_HEAD(freelist);
34685aa125fSMichal Nazarewicz 
34785aa125fSMichal Nazarewicz 	for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
348bb13ffebSMel Gorman 		if (!pfn_valid(pfn) || cc->zone != page_zone(pfn_to_page(pfn)))
34985aa125fSMichal Nazarewicz 			break;
35085aa125fSMichal Nazarewicz 
35185aa125fSMichal Nazarewicz 		/*
35285aa125fSMichal Nazarewicz 		 * On subsequent iterations ALIGN() is actually not needed,
35385aa125fSMichal Nazarewicz 		 * but we keep it that we not to complicate the code.
35485aa125fSMichal Nazarewicz 		 */
35585aa125fSMichal Nazarewicz 		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
35685aa125fSMichal Nazarewicz 		block_end_pfn = min(block_end_pfn, end_pfn);
35785aa125fSMichal Nazarewicz 
358bb13ffebSMel Gorman 		isolated = isolate_freepages_block(cc, pfn, block_end_pfn,
35985aa125fSMichal Nazarewicz 						   &freelist, true);
36085aa125fSMichal Nazarewicz 
36185aa125fSMichal Nazarewicz 		/*
36285aa125fSMichal Nazarewicz 		 * In strict mode, isolate_freepages_block() returns 0 if
36385aa125fSMichal Nazarewicz 		 * there are any holes in the block (ie. invalid PFNs or
36485aa125fSMichal Nazarewicz 		 * non-free pages).
36585aa125fSMichal Nazarewicz 		 */
36685aa125fSMichal Nazarewicz 		if (!isolated)
36785aa125fSMichal Nazarewicz 			break;
36885aa125fSMichal Nazarewicz 
36985aa125fSMichal Nazarewicz 		/*
37085aa125fSMichal Nazarewicz 		 * If we managed to isolate pages, it is always (1 << n) *
37185aa125fSMichal Nazarewicz 		 * pageblock_nr_pages for some non-negative n.  (Max order
37285aa125fSMichal Nazarewicz 		 * page may span two pageblocks).
37385aa125fSMichal Nazarewicz 		 */
37485aa125fSMichal Nazarewicz 	}
37585aa125fSMichal Nazarewicz 
37685aa125fSMichal Nazarewicz 	/* split_free_page does not map the pages */
37785aa125fSMichal Nazarewicz 	map_pages(&freelist);
37885aa125fSMichal Nazarewicz 
37985aa125fSMichal Nazarewicz 	if (pfn < end_pfn) {
38085aa125fSMichal Nazarewicz 		/* Loop terminated early, cleanup. */
38185aa125fSMichal Nazarewicz 		release_freepages(&freelist);
38285aa125fSMichal Nazarewicz 		return 0;
38385aa125fSMichal Nazarewicz 	}
38485aa125fSMichal Nazarewicz 
38585aa125fSMichal Nazarewicz 	/* We don't use freelists for anything. */
38685aa125fSMichal Nazarewicz 	return pfn;
38785aa125fSMichal Nazarewicz }
38885aa125fSMichal Nazarewicz 
389748446bbSMel Gorman /* Update the number of anon and file isolated pages in the zone */
390c67fe375SMel Gorman static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc)
391748446bbSMel Gorman {
392748446bbSMel Gorman 	struct page *page;
393b9e84ac1SMinchan Kim 	unsigned int count[2] = { 0, };
394748446bbSMel Gorman 
395b9e84ac1SMinchan Kim 	list_for_each_entry(page, &cc->migratepages, lru)
396b9e84ac1SMinchan Kim 		count[!!page_is_file_cache(page)]++;
397748446bbSMel Gorman 
398c67fe375SMel Gorman 	/* If locked we can use the interrupt unsafe versions */
399c67fe375SMel Gorman 	if (locked) {
400b9e84ac1SMinchan Kim 		__mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
401b9e84ac1SMinchan Kim 		__mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
402c67fe375SMel Gorman 	} else {
403c67fe375SMel Gorman 		mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
404c67fe375SMel Gorman 		mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
405c67fe375SMel Gorman 	}
406748446bbSMel Gorman }
407748446bbSMel Gorman 
408748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */
409748446bbSMel Gorman static bool too_many_isolated(struct zone *zone)
410748446bbSMel Gorman {
411bc693045SMinchan Kim 	unsigned long active, inactive, isolated;
412748446bbSMel Gorman 
413748446bbSMel Gorman 	inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
414748446bbSMel Gorman 					zone_page_state(zone, NR_INACTIVE_ANON);
415bc693045SMinchan Kim 	active = zone_page_state(zone, NR_ACTIVE_FILE) +
416bc693045SMinchan Kim 					zone_page_state(zone, NR_ACTIVE_ANON);
417748446bbSMel Gorman 	isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
418748446bbSMel Gorman 					zone_page_state(zone, NR_ISOLATED_ANON);
419748446bbSMel Gorman 
420bc693045SMinchan Kim 	return isolated > (inactive + active) / 2;
421748446bbSMel Gorman }
422748446bbSMel Gorman 
4232fe86e00SMichal Nazarewicz /**
4242fe86e00SMichal Nazarewicz  * isolate_migratepages_range() - isolate all migrate-able pages in range.
4252fe86e00SMichal Nazarewicz  * @zone:	Zone pages are in.
4262fe86e00SMichal Nazarewicz  * @cc:		Compaction control structure.
4272fe86e00SMichal Nazarewicz  * @low_pfn:	The first PFN of the range.
4282fe86e00SMichal Nazarewicz  * @end_pfn:	The one-past-the-last PFN of the range.
429e46a2879SMinchan Kim  * @unevictable: true if it allows to isolate unevictable pages
4302fe86e00SMichal Nazarewicz  *
4312fe86e00SMichal Nazarewicz  * Isolate all pages that can be migrated from the range specified by
4322fe86e00SMichal Nazarewicz  * [low_pfn, end_pfn).  Returns zero if there is a fatal signal
4332fe86e00SMichal Nazarewicz  * pending), otherwise PFN of the first page that was not scanned
4342fe86e00SMichal Nazarewicz  * (which may be both less, equal to or more then end_pfn).
4352fe86e00SMichal Nazarewicz  *
4362fe86e00SMichal Nazarewicz  * Assumes that cc->migratepages is empty and cc->nr_migratepages is
4372fe86e00SMichal Nazarewicz  * zero.
4382fe86e00SMichal Nazarewicz  *
4392fe86e00SMichal Nazarewicz  * Apart from cc->migratepages and cc->nr_migratetypes this function
4402fe86e00SMichal Nazarewicz  * does not modify any cc's fields, in particular it does not modify
4412fe86e00SMichal Nazarewicz  * (or read for that matter) cc->migrate_pfn.
442748446bbSMel Gorman  */
443ff9543fdSMichal Nazarewicz unsigned long
4442fe86e00SMichal Nazarewicz isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
445e46a2879SMinchan Kim 		unsigned long low_pfn, unsigned long end_pfn, bool unevictable)
446748446bbSMel Gorman {
4479927af74SMel Gorman 	unsigned long last_pageblock_nr = 0, pageblock_nr;
448b7aba698SMel Gorman 	unsigned long nr_scanned = 0, nr_isolated = 0;
449748446bbSMel Gorman 	struct list_head *migratelist = &cc->migratepages;
450f3fd4a61SKonstantin Khlebnikov 	isolate_mode_t mode = 0;
451fa9add64SHugh Dickins 	struct lruvec *lruvec;
452c67fe375SMel Gorman 	unsigned long flags;
4532a1402aaSMel Gorman 	bool locked = false;
454bb13ffebSMel Gorman 	struct page *page = NULL, *valid_page = NULL;
455748446bbSMel Gorman 
456748446bbSMel Gorman 	/*
457748446bbSMel Gorman 	 * Ensure that there are not too many pages isolated from the LRU
458748446bbSMel Gorman 	 * list by either parallel reclaimers or compaction. If there are,
459748446bbSMel Gorman 	 * delay for some time until fewer pages are isolated
460748446bbSMel Gorman 	 */
461748446bbSMel Gorman 	while (unlikely(too_many_isolated(zone))) {
462f9e35b3bSMel Gorman 		/* async migration should just abort */
46368e3e926SLinus Torvalds 		if (!cc->sync)
4642fe86e00SMichal Nazarewicz 			return 0;
465f9e35b3bSMel Gorman 
466748446bbSMel Gorman 		congestion_wait(BLK_RW_ASYNC, HZ/10);
467748446bbSMel Gorman 
468748446bbSMel Gorman 		if (fatal_signal_pending(current))
4692fe86e00SMichal Nazarewicz 			return 0;
470748446bbSMel Gorman 	}
471748446bbSMel Gorman 
472748446bbSMel Gorman 	/* Time to isolate some pages for migration */
473b2eef8c0SAndrea Arcangeli 	cond_resched();
474748446bbSMel Gorman 	for (; low_pfn < end_pfn; low_pfn++) {
475b2eef8c0SAndrea Arcangeli 		/* give a chance to irqs before checking need_resched() */
4762a1402aaSMel Gorman 		if (locked && !((low_pfn+1) % SWAP_CLUSTER_MAX)) {
4772a1402aaSMel Gorman 			if (should_release_lock(&zone->lru_lock)) {
478c67fe375SMel Gorman 				spin_unlock_irqrestore(&zone->lru_lock, flags);
479b2eef8c0SAndrea Arcangeli 				locked = false;
480b2eef8c0SAndrea Arcangeli 			}
4812a1402aaSMel Gorman 		}
482b2eef8c0SAndrea Arcangeli 
4830bf380bcSMel Gorman 		/*
4840bf380bcSMel Gorman 		 * migrate_pfn does not necessarily start aligned to a
4850bf380bcSMel Gorman 		 * pageblock. Ensure that pfn_valid is called when moving
4860bf380bcSMel Gorman 		 * into a new MAX_ORDER_NR_PAGES range in case of large
4870bf380bcSMel Gorman 		 * memory holes within the zone
4880bf380bcSMel Gorman 		 */
4890bf380bcSMel Gorman 		if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
4900bf380bcSMel Gorman 			if (!pfn_valid(low_pfn)) {
4910bf380bcSMel Gorman 				low_pfn += MAX_ORDER_NR_PAGES - 1;
4920bf380bcSMel Gorman 				continue;
4930bf380bcSMel Gorman 			}
4940bf380bcSMel Gorman 		}
4950bf380bcSMel Gorman 
496748446bbSMel Gorman 		if (!pfn_valid_within(low_pfn))
497748446bbSMel Gorman 			continue;
498b7aba698SMel Gorman 		nr_scanned++;
499748446bbSMel Gorman 
500dc908600SMel Gorman 		/*
501dc908600SMel Gorman 		 * Get the page and ensure the page is within the same zone.
502dc908600SMel Gorman 		 * See the comment in isolate_freepages about overlapping
503dc908600SMel Gorman 		 * nodes. It is deliberate that the new zone lock is not taken
504dc908600SMel Gorman 		 * as memory compaction should not move pages between nodes.
505dc908600SMel Gorman 		 */
506748446bbSMel Gorman 		page = pfn_to_page(low_pfn);
507dc908600SMel Gorman 		if (page_zone(page) != zone)
508dc908600SMel Gorman 			continue;
509dc908600SMel Gorman 
510bb13ffebSMel Gorman 		if (!valid_page)
511bb13ffebSMel Gorman 			valid_page = page;
512bb13ffebSMel Gorman 
513bb13ffebSMel Gorman 		/* If isolation recently failed, do not retry */
514bb13ffebSMel Gorman 		pageblock_nr = low_pfn >> pageblock_order;
515bb13ffebSMel Gorman 		if (!isolation_suitable(cc, page))
516bb13ffebSMel Gorman 			goto next_pageblock;
517bb13ffebSMel Gorman 
518dc908600SMel Gorman 		/* Skip if free */
519748446bbSMel Gorman 		if (PageBuddy(page))
520748446bbSMel Gorman 			continue;
521748446bbSMel Gorman 
5229927af74SMel Gorman 		/*
5239927af74SMel Gorman 		 * For async migration, also only scan in MOVABLE blocks. Async
5249927af74SMel Gorman 		 * migration is optimistic to see if the minimum amount of work
5259927af74SMel Gorman 		 * satisfies the allocation
5269927af74SMel Gorman 		 */
52768e3e926SLinus Torvalds 		if (!cc->sync && last_pageblock_nr != pageblock_nr &&
52847118af0SMichal Nazarewicz 		    !migrate_async_suitable(get_pageblock_migratetype(page))) {
529c89511abSMel Gorman 			cc->finished_update_migrate = true;
5302a1402aaSMel Gorman 			goto next_pageblock;
5319927af74SMel Gorman 		}
5329927af74SMel Gorman 
533bf6bddf1SRafael Aquini 		/*
534bf6bddf1SRafael Aquini 		 * Check may be lockless but that's ok as we recheck later.
535bf6bddf1SRafael Aquini 		 * It's possible to migrate LRU pages and balloon pages
536bf6bddf1SRafael Aquini 		 * Skip any other type of page
537bf6bddf1SRafael Aquini 		 */
538bf6bddf1SRafael Aquini 		if (!PageLRU(page)) {
539bf6bddf1SRafael Aquini 			if (unlikely(balloon_page_movable(page))) {
540bf6bddf1SRafael Aquini 				if (locked && balloon_page_isolate(page)) {
541bf6bddf1SRafael Aquini 					/* Successfully isolated */
542bf6bddf1SRafael Aquini 					cc->finished_update_migrate = true;
543bf6bddf1SRafael Aquini 					list_add(&page->lru, migratelist);
544bf6bddf1SRafael Aquini 					cc->nr_migratepages++;
545bf6bddf1SRafael Aquini 					nr_isolated++;
546bf6bddf1SRafael Aquini 					goto check_compact_cluster;
547bf6bddf1SRafael Aquini 				}
548bf6bddf1SRafael Aquini 			}
549bc835011SAndrea Arcangeli 			continue;
550bf6bddf1SRafael Aquini 		}
551bc835011SAndrea Arcangeli 
552bc835011SAndrea Arcangeli 		/*
5532a1402aaSMel Gorman 		 * PageLRU is set. lru_lock normally excludes isolation
5542a1402aaSMel Gorman 		 * splitting and collapsing (collapsing has already happened
5552a1402aaSMel Gorman 		 * if PageLRU is set) but the lock is not necessarily taken
5562a1402aaSMel Gorman 		 * here and it is wasteful to take it just to check transhuge.
5572a1402aaSMel Gorman 		 * Check TransHuge without lock and skip the whole pageblock if
5582a1402aaSMel Gorman 		 * it's either a transhuge or hugetlbfs page, as calling
5592a1402aaSMel Gorman 		 * compound_order() without preventing THP from splitting the
5602a1402aaSMel Gorman 		 * page underneath us may return surprising results.
561bc835011SAndrea Arcangeli 		 */
562bc835011SAndrea Arcangeli 		if (PageTransHuge(page)) {
5632a1402aaSMel Gorman 			if (!locked)
5642a1402aaSMel Gorman 				goto next_pageblock;
5652a1402aaSMel Gorman 			low_pfn += (1 << compound_order(page)) - 1;
5662a1402aaSMel Gorman 			continue;
5672a1402aaSMel Gorman 		}
5682a1402aaSMel Gorman 
5692a1402aaSMel Gorman 		/* Check if it is ok to still hold the lock */
5702a1402aaSMel Gorman 		locked = compact_checklock_irqsave(&zone->lru_lock, &flags,
5712a1402aaSMel Gorman 								locked, cc);
5722a1402aaSMel Gorman 		if (!locked || fatal_signal_pending(current))
5732a1402aaSMel Gorman 			break;
5742a1402aaSMel Gorman 
5752a1402aaSMel Gorman 		/* Recheck PageLRU and PageTransHuge under lock */
5762a1402aaSMel Gorman 		if (!PageLRU(page))
5772a1402aaSMel Gorman 			continue;
5782a1402aaSMel Gorman 		if (PageTransHuge(page)) {
579bc835011SAndrea Arcangeli 			low_pfn += (1 << compound_order(page)) - 1;
580bc835011SAndrea Arcangeli 			continue;
581bc835011SAndrea Arcangeli 		}
582bc835011SAndrea Arcangeli 
58368e3e926SLinus Torvalds 		if (!cc->sync)
584c8244935SMel Gorman 			mode |= ISOLATE_ASYNC_MIGRATE;
585c8244935SMel Gorman 
586e46a2879SMinchan Kim 		if (unevictable)
587e46a2879SMinchan Kim 			mode |= ISOLATE_UNEVICTABLE;
588e46a2879SMinchan Kim 
589fa9add64SHugh Dickins 		lruvec = mem_cgroup_page_lruvec(page, zone);
590fa9add64SHugh Dickins 
591748446bbSMel Gorman 		/* Try isolate the page */
592f3fd4a61SKonstantin Khlebnikov 		if (__isolate_lru_page(page, mode) != 0)
593748446bbSMel Gorman 			continue;
594748446bbSMel Gorman 
595bc835011SAndrea Arcangeli 		VM_BUG_ON(PageTransCompound(page));
596bc835011SAndrea Arcangeli 
597748446bbSMel Gorman 		/* Successfully isolated */
598c89511abSMel Gorman 		cc->finished_update_migrate = true;
599fa9add64SHugh Dickins 		del_page_from_lru_list(page, lruvec, page_lru(page));
600748446bbSMel Gorman 		list_add(&page->lru, migratelist);
601748446bbSMel Gorman 		cc->nr_migratepages++;
602b7aba698SMel Gorman 		nr_isolated++;
603748446bbSMel Gorman 
604bf6bddf1SRafael Aquini check_compact_cluster:
605748446bbSMel Gorman 		/* Avoid isolating too much */
60631b8384aSHillf Danton 		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
60731b8384aSHillf Danton 			++low_pfn;
608748446bbSMel Gorman 			break;
609748446bbSMel Gorman 		}
6102a1402aaSMel Gorman 
6112a1402aaSMel Gorman 		continue;
6122a1402aaSMel Gorman 
6132a1402aaSMel Gorman next_pageblock:
6142a1402aaSMel Gorman 		low_pfn += pageblock_nr_pages;
6152a1402aaSMel Gorman 		low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
6162a1402aaSMel Gorman 		last_pageblock_nr = pageblock_nr;
61731b8384aSHillf Danton 	}
618748446bbSMel Gorman 
619c67fe375SMel Gorman 	acct_isolated(zone, locked, cc);
620748446bbSMel Gorman 
621c67fe375SMel Gorman 	if (locked)
622c67fe375SMel Gorman 		spin_unlock_irqrestore(&zone->lru_lock, flags);
623748446bbSMel Gorman 
624bb13ffebSMel Gorman 	/* Update the pageblock-skip if the whole pageblock was scanned */
625bb13ffebSMel Gorman 	if (low_pfn == end_pfn)
626c89511abSMel Gorman 		update_pageblock_skip(cc, valid_page, nr_isolated, true);
627bb13ffebSMel Gorman 
628b7aba698SMel Gorman 	trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
629b7aba698SMel Gorman 
630*010fc29aSMinchan Kim 	count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
631397487dbSMel Gorman 	if (nr_isolated)
632*010fc29aSMinchan Kim 		count_compact_events(COMPACTISOLATED, nr_isolated);
633397487dbSMel Gorman 
6342fe86e00SMichal Nazarewicz 	return low_pfn;
6352fe86e00SMichal Nazarewicz }
6362fe86e00SMichal Nazarewicz 
637ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION || CONFIG_CMA */
638ff9543fdSMichal Nazarewicz #ifdef CONFIG_COMPACTION
639ff9543fdSMichal Nazarewicz /*
640ff9543fdSMichal Nazarewicz  * Based on information in the current compact_control, find blocks
641ff9543fdSMichal Nazarewicz  * suitable for isolating free pages from and then isolate them.
642ff9543fdSMichal Nazarewicz  */
643ff9543fdSMichal Nazarewicz static void isolate_freepages(struct zone *zone,
644ff9543fdSMichal Nazarewicz 				struct compact_control *cc)
645ff9543fdSMichal Nazarewicz {
646ff9543fdSMichal Nazarewicz 	struct page *page;
647ff9543fdSMichal Nazarewicz 	unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn;
648ff9543fdSMichal Nazarewicz 	int nr_freepages = cc->nr_freepages;
649ff9543fdSMichal Nazarewicz 	struct list_head *freelist = &cc->freepages;
6502fe86e00SMichal Nazarewicz 
651ff9543fdSMichal Nazarewicz 	/*
652ff9543fdSMichal Nazarewicz 	 * Initialise the free scanner. The starting point is where we last
653ff9543fdSMichal Nazarewicz 	 * scanned from (or the end of the zone if starting). The low point
654ff9543fdSMichal Nazarewicz 	 * is the end of the pageblock the migration scanner is using.
655ff9543fdSMichal Nazarewicz 	 */
656ff9543fdSMichal Nazarewicz 	pfn = cc->free_pfn;
657ff9543fdSMichal Nazarewicz 	low_pfn = cc->migrate_pfn + pageblock_nr_pages;
6582fe86e00SMichal Nazarewicz 
659ff9543fdSMichal Nazarewicz 	/*
660ff9543fdSMichal Nazarewicz 	 * Take care that if the migration scanner is at the end of the zone
661ff9543fdSMichal Nazarewicz 	 * that the free scanner does not accidentally move to the next zone
662ff9543fdSMichal Nazarewicz 	 * in the next isolation cycle.
663ff9543fdSMichal Nazarewicz 	 */
664ff9543fdSMichal Nazarewicz 	high_pfn = min(low_pfn, pfn);
665ff9543fdSMichal Nazarewicz 
666ff9543fdSMichal Nazarewicz 	zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
667ff9543fdSMichal Nazarewicz 
668ff9543fdSMichal Nazarewicz 	/*
669ff9543fdSMichal Nazarewicz 	 * Isolate free pages until enough are available to migrate the
670ff9543fdSMichal Nazarewicz 	 * pages on cc->migratepages. We stop searching if the migrate
671ff9543fdSMichal Nazarewicz 	 * and free page scanners meet or enough free pages are isolated.
672ff9543fdSMichal Nazarewicz 	 */
673ff9543fdSMichal Nazarewicz 	for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
674ff9543fdSMichal Nazarewicz 					pfn -= pageblock_nr_pages) {
675ff9543fdSMichal Nazarewicz 		unsigned long isolated;
676ff9543fdSMichal Nazarewicz 
677ff9543fdSMichal Nazarewicz 		if (!pfn_valid(pfn))
678ff9543fdSMichal Nazarewicz 			continue;
679ff9543fdSMichal Nazarewicz 
680ff9543fdSMichal Nazarewicz 		/*
681ff9543fdSMichal Nazarewicz 		 * Check for overlapping nodes/zones. It's possible on some
682ff9543fdSMichal Nazarewicz 		 * configurations to have a setup like
683ff9543fdSMichal Nazarewicz 		 * node0 node1 node0
684ff9543fdSMichal Nazarewicz 		 * i.e. it's possible that all pages within a zones range of
685ff9543fdSMichal Nazarewicz 		 * pages do not belong to a single zone.
686ff9543fdSMichal Nazarewicz 		 */
687ff9543fdSMichal Nazarewicz 		page = pfn_to_page(pfn);
688ff9543fdSMichal Nazarewicz 		if (page_zone(page) != zone)
689ff9543fdSMichal Nazarewicz 			continue;
690ff9543fdSMichal Nazarewicz 
691ff9543fdSMichal Nazarewicz 		/* Check the block is suitable for migration */
69268e3e926SLinus Torvalds 		if (!suitable_migration_target(page))
693ff9543fdSMichal Nazarewicz 			continue;
69468e3e926SLinus Torvalds 
695bb13ffebSMel Gorman 		/* If isolation recently failed, do not retry */
696bb13ffebSMel Gorman 		if (!isolation_suitable(cc, page))
697bb13ffebSMel Gorman 			continue;
698bb13ffebSMel Gorman 
699f40d1e42SMel Gorman 		/* Found a block suitable for isolating free pages from */
700ff9543fdSMichal Nazarewicz 		isolated = 0;
70160177d31SMel Gorman 
70260177d31SMel Gorman 		/*
70360177d31SMel Gorman 		 * As pfn may not start aligned, pfn+pageblock_nr_page
70460177d31SMel Gorman 		 * may cross a MAX_ORDER_NR_PAGES boundary and miss
70560177d31SMel Gorman 		 * a pfn_valid check. Ensure isolate_freepages_block()
70660177d31SMel Gorman 		 * only scans within a pageblock
70760177d31SMel Gorman 		 */
70860177d31SMel Gorman 		end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
70960177d31SMel Gorman 		end_pfn = min(end_pfn, zone_end_pfn);
710f40d1e42SMel Gorman 		isolated = isolate_freepages_block(cc, pfn, end_pfn,
711ff9543fdSMichal Nazarewicz 						   freelist, false);
712ff9543fdSMichal Nazarewicz 		nr_freepages += isolated;
713ff9543fdSMichal Nazarewicz 
714ff9543fdSMichal Nazarewicz 		/*
715ff9543fdSMichal Nazarewicz 		 * Record the highest PFN we isolated pages from. When next
716ff9543fdSMichal Nazarewicz 		 * looking for free pages, the search will restart here as
717ff9543fdSMichal Nazarewicz 		 * page migration may have returned some pages to the allocator
718ff9543fdSMichal Nazarewicz 		 */
719c89511abSMel Gorman 		if (isolated) {
720c89511abSMel Gorman 			cc->finished_update_free = true;
721ff9543fdSMichal Nazarewicz 			high_pfn = max(high_pfn, pfn);
722ff9543fdSMichal Nazarewicz 		}
723c89511abSMel Gorman 	}
724ff9543fdSMichal Nazarewicz 
725ff9543fdSMichal Nazarewicz 	/* split_free_page does not map the pages */
726ff9543fdSMichal Nazarewicz 	map_pages(freelist);
727ff9543fdSMichal Nazarewicz 
728ff9543fdSMichal Nazarewicz 	cc->free_pfn = high_pfn;
729ff9543fdSMichal Nazarewicz 	cc->nr_freepages = nr_freepages;
730748446bbSMel Gorman }
731748446bbSMel Gorman 
732748446bbSMel Gorman /*
733748446bbSMel Gorman  * This is a migrate-callback that "allocates" freepages by taking pages
734748446bbSMel Gorman  * from the isolated freelists in the block we are migrating to.
735748446bbSMel Gorman  */
736748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage,
737748446bbSMel Gorman 					unsigned long data,
738748446bbSMel Gorman 					int **result)
739748446bbSMel Gorman {
740748446bbSMel Gorman 	struct compact_control *cc = (struct compact_control *)data;
741748446bbSMel Gorman 	struct page *freepage;
742748446bbSMel Gorman 
743748446bbSMel Gorman 	/* Isolate free pages if necessary */
744748446bbSMel Gorman 	if (list_empty(&cc->freepages)) {
745748446bbSMel Gorman 		isolate_freepages(cc->zone, cc);
746748446bbSMel Gorman 
747748446bbSMel Gorman 		if (list_empty(&cc->freepages))
748748446bbSMel Gorman 			return NULL;
749748446bbSMel Gorman 	}
750748446bbSMel Gorman 
751748446bbSMel Gorman 	freepage = list_entry(cc->freepages.next, struct page, lru);
752748446bbSMel Gorman 	list_del(&freepage->lru);
753748446bbSMel Gorman 	cc->nr_freepages--;
754748446bbSMel Gorman 
755748446bbSMel Gorman 	return freepage;
756748446bbSMel Gorman }
757748446bbSMel Gorman 
758748446bbSMel Gorman /*
759748446bbSMel Gorman  * We cannot control nr_migratepages and nr_freepages fully when migration is
760748446bbSMel Gorman  * running as migrate_pages() has no knowledge of compact_control. When
761748446bbSMel Gorman  * migration is complete, we count the number of pages on the lists by hand.
762748446bbSMel Gorman  */
763748446bbSMel Gorman static void update_nr_listpages(struct compact_control *cc)
764748446bbSMel Gorman {
765748446bbSMel Gorman 	int nr_migratepages = 0;
766748446bbSMel Gorman 	int nr_freepages = 0;
767748446bbSMel Gorman 	struct page *page;
768748446bbSMel Gorman 
769748446bbSMel Gorman 	list_for_each_entry(page, &cc->migratepages, lru)
770748446bbSMel Gorman 		nr_migratepages++;
771748446bbSMel Gorman 	list_for_each_entry(page, &cc->freepages, lru)
772748446bbSMel Gorman 		nr_freepages++;
773748446bbSMel Gorman 
774748446bbSMel Gorman 	cc->nr_migratepages = nr_migratepages;
775748446bbSMel Gorman 	cc->nr_freepages = nr_freepages;
776748446bbSMel Gorman }
777748446bbSMel Gorman 
778ff9543fdSMichal Nazarewicz /* possible outcome of isolate_migratepages */
779ff9543fdSMichal Nazarewicz typedef enum {
780ff9543fdSMichal Nazarewicz 	ISOLATE_ABORT,		/* Abort compaction now */
781ff9543fdSMichal Nazarewicz 	ISOLATE_NONE,		/* No pages isolated, continue scanning */
782ff9543fdSMichal Nazarewicz 	ISOLATE_SUCCESS,	/* Pages isolated, migrate */
783ff9543fdSMichal Nazarewicz } isolate_migrate_t;
784ff9543fdSMichal Nazarewicz 
785ff9543fdSMichal Nazarewicz /*
786ff9543fdSMichal Nazarewicz  * Isolate all pages that can be migrated from the block pointed to by
787ff9543fdSMichal Nazarewicz  * the migrate scanner within compact_control.
788ff9543fdSMichal Nazarewicz  */
789ff9543fdSMichal Nazarewicz static isolate_migrate_t isolate_migratepages(struct zone *zone,
790ff9543fdSMichal Nazarewicz 					struct compact_control *cc)
791ff9543fdSMichal Nazarewicz {
792ff9543fdSMichal Nazarewicz 	unsigned long low_pfn, end_pfn;
793ff9543fdSMichal Nazarewicz 
794ff9543fdSMichal Nazarewicz 	/* Do not scan outside zone boundaries */
795ff9543fdSMichal Nazarewicz 	low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
796ff9543fdSMichal Nazarewicz 
797ff9543fdSMichal Nazarewicz 	/* Only scan within a pageblock boundary */
798ff9543fdSMichal Nazarewicz 	end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
799ff9543fdSMichal Nazarewicz 
800ff9543fdSMichal Nazarewicz 	/* Do not cross the free scanner or scan within a memory hole */
801ff9543fdSMichal Nazarewicz 	if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
802ff9543fdSMichal Nazarewicz 		cc->migrate_pfn = end_pfn;
803ff9543fdSMichal Nazarewicz 		return ISOLATE_NONE;
804ff9543fdSMichal Nazarewicz 	}
805ff9543fdSMichal Nazarewicz 
806ff9543fdSMichal Nazarewicz 	/* Perform the isolation */
807e46a2879SMinchan Kim 	low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false);
808e64c5237SShaohua Li 	if (!low_pfn || cc->contended)
809ff9543fdSMichal Nazarewicz 		return ISOLATE_ABORT;
810ff9543fdSMichal Nazarewicz 
811ff9543fdSMichal Nazarewicz 	cc->migrate_pfn = low_pfn;
812ff9543fdSMichal Nazarewicz 
813ff9543fdSMichal Nazarewicz 	return ISOLATE_SUCCESS;
814ff9543fdSMichal Nazarewicz }
815ff9543fdSMichal Nazarewicz 
816748446bbSMel Gorman static int compact_finished(struct zone *zone,
817748446bbSMel Gorman 			    struct compact_control *cc)
818748446bbSMel Gorman {
8195a03b051SAndrea Arcangeli 	unsigned long watermark;
82056de7263SMel Gorman 
821748446bbSMel Gorman 	if (fatal_signal_pending(current))
822748446bbSMel Gorman 		return COMPACT_PARTIAL;
823748446bbSMel Gorman 
824753341a4SMel Gorman 	/* Compaction run completes if the migrate and free scanner meet */
825bb13ffebSMel Gorman 	if (cc->free_pfn <= cc->migrate_pfn) {
82662997027SMel Gorman 		/*
82762997027SMel Gorman 		 * Mark that the PG_migrate_skip information should be cleared
82862997027SMel Gorman 		 * by kswapd when it goes to sleep. kswapd does not set the
82962997027SMel Gorman 		 * flag itself as the decision to be clear should be directly
83062997027SMel Gorman 		 * based on an allocation request.
83162997027SMel Gorman 		 */
83262997027SMel Gorman 		if (!current_is_kswapd())
83362997027SMel Gorman 			zone->compact_blockskip_flush = true;
83462997027SMel Gorman 
835748446bbSMel Gorman 		return COMPACT_COMPLETE;
836bb13ffebSMel Gorman 	}
837748446bbSMel Gorman 
83882478fb7SJohannes Weiner 	/*
83982478fb7SJohannes Weiner 	 * order == -1 is expected when compacting via
84082478fb7SJohannes Weiner 	 * /proc/sys/vm/compact_memory
84182478fb7SJohannes Weiner 	 */
84256de7263SMel Gorman 	if (cc->order == -1)
84356de7263SMel Gorman 		return COMPACT_CONTINUE;
84456de7263SMel Gorman 
8453957c776SMichal Hocko 	/* Compaction run is not finished if the watermark is not met */
8463957c776SMichal Hocko 	watermark = low_wmark_pages(zone);
8473957c776SMichal Hocko 	watermark += (1 << cc->order);
8483957c776SMichal Hocko 
8493957c776SMichal Hocko 	if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
8503957c776SMichal Hocko 		return COMPACT_CONTINUE;
8513957c776SMichal Hocko 
85256de7263SMel Gorman 	/* Direct compactor: Is a suitable page free? */
8531fb3f8caSMel Gorman 	if (cc->page) {
8541fb3f8caSMel Gorman 		/* Was a suitable page captured? */
8551fb3f8caSMel Gorman 		if (*cc->page)
8561fb3f8caSMel Gorman 			return COMPACT_PARTIAL;
8571fb3f8caSMel Gorman 	} else {
8581fb3f8caSMel Gorman 		unsigned int order;
85956de7263SMel Gorman 		for (order = cc->order; order < MAX_ORDER; order++) {
8601fb3f8caSMel Gorman 			struct free_area *area = &zone->free_area[cc->order];
86156de7263SMel Gorman 			/* Job done if page is free of the right migratetype */
8621fb3f8caSMel Gorman 			if (!list_empty(&area->free_list[cc->migratetype]))
86356de7263SMel Gorman 				return COMPACT_PARTIAL;
86456de7263SMel Gorman 
86556de7263SMel Gorman 			/* Job done if allocation would set block type */
8661fb3f8caSMel Gorman 			if (cc->order >= pageblock_order && area->nr_free)
86756de7263SMel Gorman 				return COMPACT_PARTIAL;
86856de7263SMel Gorman 		}
8691fb3f8caSMel Gorman 	}
87056de7263SMel Gorman 
871748446bbSMel Gorman 	return COMPACT_CONTINUE;
872748446bbSMel Gorman }
873748446bbSMel Gorman 
8743e7d3449SMel Gorman /*
8753e7d3449SMel Gorman  * compaction_suitable: Is this suitable to run compaction on this zone now?
8763e7d3449SMel Gorman  * Returns
8773e7d3449SMel Gorman  *   COMPACT_SKIPPED  - If there are too few free pages for compaction
8783e7d3449SMel Gorman  *   COMPACT_PARTIAL  - If the allocation would succeed without compaction
8793e7d3449SMel Gorman  *   COMPACT_CONTINUE - If compaction should run now
8803e7d3449SMel Gorman  */
8813e7d3449SMel Gorman unsigned long compaction_suitable(struct zone *zone, int order)
8823e7d3449SMel Gorman {
8833e7d3449SMel Gorman 	int fragindex;
8843e7d3449SMel Gorman 	unsigned long watermark;
8853e7d3449SMel Gorman 
8863e7d3449SMel Gorman 	/*
8873957c776SMichal Hocko 	 * order == -1 is expected when compacting via
8883957c776SMichal Hocko 	 * /proc/sys/vm/compact_memory
8893957c776SMichal Hocko 	 */
8903957c776SMichal Hocko 	if (order == -1)
8913957c776SMichal Hocko 		return COMPACT_CONTINUE;
8923957c776SMichal Hocko 
8933957c776SMichal Hocko 	/*
8943e7d3449SMel Gorman 	 * Watermarks for order-0 must be met for compaction. Note the 2UL.
8953e7d3449SMel Gorman 	 * This is because during migration, copies of pages need to be
8963e7d3449SMel Gorman 	 * allocated and for a short time, the footprint is higher
8973e7d3449SMel Gorman 	 */
8983e7d3449SMel Gorman 	watermark = low_wmark_pages(zone) + (2UL << order);
8993e7d3449SMel Gorman 	if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
9003e7d3449SMel Gorman 		return COMPACT_SKIPPED;
9013e7d3449SMel Gorman 
9023e7d3449SMel Gorman 	/*
9033e7d3449SMel Gorman 	 * fragmentation index determines if allocation failures are due to
9043e7d3449SMel Gorman 	 * low memory or external fragmentation
9053e7d3449SMel Gorman 	 *
906a582a738SShaohua Li 	 * index of -1000 implies allocations might succeed depending on
907a582a738SShaohua Li 	 * watermarks
9083e7d3449SMel Gorman 	 * index towards 0 implies failure is due to lack of memory
9093e7d3449SMel Gorman 	 * index towards 1000 implies failure is due to fragmentation
9103e7d3449SMel Gorman 	 *
9113e7d3449SMel Gorman 	 * Only compact if a failure would be due to fragmentation.
9123e7d3449SMel Gorman 	 */
9133e7d3449SMel Gorman 	fragindex = fragmentation_index(zone, order);
9143e7d3449SMel Gorman 	if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
9153e7d3449SMel Gorman 		return COMPACT_SKIPPED;
9163e7d3449SMel Gorman 
917a582a738SShaohua Li 	if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark,
918a582a738SShaohua Li 	    0, 0))
9193e7d3449SMel Gorman 		return COMPACT_PARTIAL;
9203e7d3449SMel Gorman 
9213e7d3449SMel Gorman 	return COMPACT_CONTINUE;
9223e7d3449SMel Gorman }
9233e7d3449SMel Gorman 
924c8bf2d8bSThierry Reding static void compact_capture_page(struct compact_control *cc)
925c8bf2d8bSThierry Reding {
926c8bf2d8bSThierry Reding 	unsigned long flags;
927c8bf2d8bSThierry Reding 	int mtype, mtype_low, mtype_high;
928c8bf2d8bSThierry Reding 
929c8bf2d8bSThierry Reding 	if (!cc->page || *cc->page)
930c8bf2d8bSThierry Reding 		return;
931c8bf2d8bSThierry Reding 
932c8bf2d8bSThierry Reding 	/*
933c8bf2d8bSThierry Reding 	 * For MIGRATE_MOVABLE allocations we capture a suitable page ASAP
934c8bf2d8bSThierry Reding 	 * regardless of the migratetype of the freelist is is captured from.
935c8bf2d8bSThierry Reding 	 * This is fine because the order for a high-order MIGRATE_MOVABLE
936c8bf2d8bSThierry Reding 	 * allocation is typically at least a pageblock size and overall
937c8bf2d8bSThierry Reding 	 * fragmentation is not impaired. Other allocation types must
938c8bf2d8bSThierry Reding 	 * capture pages from their own migratelist because otherwise they
939c8bf2d8bSThierry Reding 	 * could pollute other pageblocks like MIGRATE_MOVABLE with
940c8bf2d8bSThierry Reding 	 * difficult to move pages and making fragmentation worse overall.
941c8bf2d8bSThierry Reding 	 */
942c8bf2d8bSThierry Reding 	if (cc->migratetype == MIGRATE_MOVABLE) {
943c8bf2d8bSThierry Reding 		mtype_low = 0;
944c8bf2d8bSThierry Reding 		mtype_high = MIGRATE_PCPTYPES;
945c8bf2d8bSThierry Reding 	} else {
946c8bf2d8bSThierry Reding 		mtype_low = cc->migratetype;
947c8bf2d8bSThierry Reding 		mtype_high = cc->migratetype + 1;
948c8bf2d8bSThierry Reding 	}
949c8bf2d8bSThierry Reding 
950c8bf2d8bSThierry Reding 	/* Speculatively examine the free lists without zone lock */
951c8bf2d8bSThierry Reding 	for (mtype = mtype_low; mtype < mtype_high; mtype++) {
952c8bf2d8bSThierry Reding 		int order;
953c8bf2d8bSThierry Reding 		for (order = cc->order; order < MAX_ORDER; order++) {
954c8bf2d8bSThierry Reding 			struct page *page;
955c8bf2d8bSThierry Reding 			struct free_area *area;
956c8bf2d8bSThierry Reding 			area = &(cc->zone->free_area[order]);
957c8bf2d8bSThierry Reding 			if (list_empty(&area->free_list[mtype]))
958c8bf2d8bSThierry Reding 				continue;
959c8bf2d8bSThierry Reding 
960c8bf2d8bSThierry Reding 			/* Take the lock and attempt capture of the page */
961c8bf2d8bSThierry Reding 			if (!compact_trylock_irqsave(&cc->zone->lock, &flags, cc))
962c8bf2d8bSThierry Reding 				return;
963c8bf2d8bSThierry Reding 			if (!list_empty(&area->free_list[mtype])) {
964c8bf2d8bSThierry Reding 				page = list_entry(area->free_list[mtype].next,
965c8bf2d8bSThierry Reding 							struct page, lru);
966c8bf2d8bSThierry Reding 				if (capture_free_page(page, cc->order, mtype)) {
967c8bf2d8bSThierry Reding 					spin_unlock_irqrestore(&cc->zone->lock,
968c8bf2d8bSThierry Reding 									flags);
969c8bf2d8bSThierry Reding 					*cc->page = page;
970c8bf2d8bSThierry Reding 					return;
971c8bf2d8bSThierry Reding 				}
972c8bf2d8bSThierry Reding 			}
973c8bf2d8bSThierry Reding 			spin_unlock_irqrestore(&cc->zone->lock, flags);
974c8bf2d8bSThierry Reding 		}
975c8bf2d8bSThierry Reding 	}
976c8bf2d8bSThierry Reding }
977c8bf2d8bSThierry Reding 
978748446bbSMel Gorman static int compact_zone(struct zone *zone, struct compact_control *cc)
979748446bbSMel Gorman {
980748446bbSMel Gorman 	int ret;
981c89511abSMel Gorman 	unsigned long start_pfn = zone->zone_start_pfn;
982c89511abSMel Gorman 	unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages;
983748446bbSMel Gorman 
9843e7d3449SMel Gorman 	ret = compaction_suitable(zone, cc->order);
9853e7d3449SMel Gorman 	switch (ret) {
9863e7d3449SMel Gorman 	case COMPACT_PARTIAL:
9873e7d3449SMel Gorman 	case COMPACT_SKIPPED:
9883e7d3449SMel Gorman 		/* Compaction is likely to fail */
9893e7d3449SMel Gorman 		return ret;
9903e7d3449SMel Gorman 	case COMPACT_CONTINUE:
9913e7d3449SMel Gorman 		/* Fall through to compaction */
9923e7d3449SMel Gorman 		;
9933e7d3449SMel Gorman 	}
9943e7d3449SMel Gorman 
995c89511abSMel Gorman 	/*
996c89511abSMel Gorman 	 * Setup to move all movable pages to the end of the zone. Used cached
997c89511abSMel Gorman 	 * information on where the scanners should start but check that it
998c89511abSMel Gorman 	 * is initialised by ensuring the values are within zone boundaries.
999c89511abSMel Gorman 	 */
1000c89511abSMel Gorman 	cc->migrate_pfn = zone->compact_cached_migrate_pfn;
1001c89511abSMel Gorman 	cc->free_pfn = zone->compact_cached_free_pfn;
1002c89511abSMel Gorman 	if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
1003c89511abSMel Gorman 		cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
1004c89511abSMel Gorman 		zone->compact_cached_free_pfn = cc->free_pfn;
1005c89511abSMel Gorman 	}
1006c89511abSMel Gorman 	if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
1007c89511abSMel Gorman 		cc->migrate_pfn = start_pfn;
1008c89511abSMel Gorman 		zone->compact_cached_migrate_pfn = cc->migrate_pfn;
1009c89511abSMel Gorman 	}
1010748446bbSMel Gorman 
101162997027SMel Gorman 	/*
101262997027SMel Gorman 	 * Clear pageblock skip if there were failures recently and compaction
101362997027SMel Gorman 	 * is about to be retried after being deferred. kswapd does not do
101462997027SMel Gorman 	 * this reset as it'll reset the cached information when going to sleep.
101562997027SMel Gorman 	 */
101662997027SMel Gorman 	if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
101762997027SMel Gorman 		__reset_isolation_suitable(zone);
1018bb13ffebSMel Gorman 
1019748446bbSMel Gorman 	migrate_prep_local();
1020748446bbSMel Gorman 
1021748446bbSMel Gorman 	while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
1022748446bbSMel Gorman 		unsigned long nr_migrate, nr_remaining;
10239d502c1cSMinchan Kim 		int err;
1024748446bbSMel Gorman 
1025f9e35b3bSMel Gorman 		switch (isolate_migratepages(zone, cc)) {
1026f9e35b3bSMel Gorman 		case ISOLATE_ABORT:
1027f9e35b3bSMel Gorman 			ret = COMPACT_PARTIAL;
10285733c7d1SRafael Aquini 			putback_movable_pages(&cc->migratepages);
1029e64c5237SShaohua Li 			cc->nr_migratepages = 0;
1030f9e35b3bSMel Gorman 			goto out;
1031f9e35b3bSMel Gorman 		case ISOLATE_NONE:
1032748446bbSMel Gorman 			continue;
1033f9e35b3bSMel Gorman 		case ISOLATE_SUCCESS:
1034f9e35b3bSMel Gorman 			;
1035f9e35b3bSMel Gorman 		}
1036748446bbSMel Gorman 
1037748446bbSMel Gorman 		nr_migrate = cc->nr_migratepages;
10389d502c1cSMinchan Kim 		err = migrate_pages(&cc->migratepages, compaction_alloc,
103968e3e926SLinus Torvalds 				(unsigned long)cc, false,
10407b2a2d4aSMel Gorman 				cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC,
10417b2a2d4aSMel Gorman 				MR_COMPACTION);
1042748446bbSMel Gorman 		update_nr_listpages(cc);
1043748446bbSMel Gorman 		nr_remaining = cc->nr_migratepages;
1044748446bbSMel Gorman 
1045b7aba698SMel Gorman 		trace_mm_compaction_migratepages(nr_migrate - nr_remaining,
1046b7aba698SMel Gorman 						nr_remaining);
1047748446bbSMel Gorman 
10485733c7d1SRafael Aquini 		/* Release isolated pages not migrated */
10499d502c1cSMinchan Kim 		if (err) {
10505733c7d1SRafael Aquini 			putback_movable_pages(&cc->migratepages);
1051748446bbSMel Gorman 			cc->nr_migratepages = 0;
10524bf2bba3SDavid Rientjes 			if (err == -ENOMEM) {
10534bf2bba3SDavid Rientjes 				ret = COMPACT_PARTIAL;
10544bf2bba3SDavid Rientjes 				goto out;
1055748446bbSMel Gorman 			}
10564bf2bba3SDavid Rientjes 		}
10571fb3f8caSMel Gorman 
10581fb3f8caSMel Gorman 		/* Capture a page now if it is a suitable size */
10591fb3f8caSMel Gorman 		compact_capture_page(cc);
1060748446bbSMel Gorman 	}
1061748446bbSMel Gorman 
1062f9e35b3bSMel Gorman out:
1063748446bbSMel Gorman 	/* Release free pages and check accounting */
1064748446bbSMel Gorman 	cc->nr_freepages -= release_freepages(&cc->freepages);
1065748446bbSMel Gorman 	VM_BUG_ON(cc->nr_freepages != 0);
1066748446bbSMel Gorman 
1067748446bbSMel Gorman 	return ret;
1068748446bbSMel Gorman }
106976ab0f53SMel Gorman 
1070d43a87e6SKyungmin Park static unsigned long compact_zone_order(struct zone *zone,
107177f1fe6bSMel Gorman 				 int order, gfp_t gfp_mask,
10721fb3f8caSMel Gorman 				 bool sync, bool *contended,
10731fb3f8caSMel Gorman 				 struct page **page)
107456de7263SMel Gorman {
1075e64c5237SShaohua Li 	unsigned long ret;
107656de7263SMel Gorman 	struct compact_control cc = {
107756de7263SMel Gorman 		.nr_freepages = 0,
107856de7263SMel Gorman 		.nr_migratepages = 0,
107956de7263SMel Gorman 		.order = order,
108056de7263SMel Gorman 		.migratetype = allocflags_to_migratetype(gfp_mask),
108156de7263SMel Gorman 		.zone = zone,
108268e3e926SLinus Torvalds 		.sync = sync,
10831fb3f8caSMel Gorman 		.page = page,
108456de7263SMel Gorman 	};
108556de7263SMel Gorman 	INIT_LIST_HEAD(&cc.freepages);
108656de7263SMel Gorman 	INIT_LIST_HEAD(&cc.migratepages);
108756de7263SMel Gorman 
1088e64c5237SShaohua Li 	ret = compact_zone(zone, &cc);
1089e64c5237SShaohua Li 
1090e64c5237SShaohua Li 	VM_BUG_ON(!list_empty(&cc.freepages));
1091e64c5237SShaohua Li 	VM_BUG_ON(!list_empty(&cc.migratepages));
1092e64c5237SShaohua Li 
1093e64c5237SShaohua Li 	*contended = cc.contended;
1094e64c5237SShaohua Li 	return ret;
109556de7263SMel Gorman }
109656de7263SMel Gorman 
10975e771905SMel Gorman int sysctl_extfrag_threshold = 500;
10985e771905SMel Gorman 
109956de7263SMel Gorman /**
110056de7263SMel Gorman  * try_to_compact_pages - Direct compact to satisfy a high-order allocation
110156de7263SMel Gorman  * @zonelist: The zonelist used for the current allocation
110256de7263SMel Gorman  * @order: The order of the current allocation
110356de7263SMel Gorman  * @gfp_mask: The GFP mask of the current allocation
110456de7263SMel Gorman  * @nodemask: The allowed nodes to allocate from
110577f1fe6bSMel Gorman  * @sync: Whether migration is synchronous or not
1106661c4cb9SMel Gorman  * @contended: Return value that is true if compaction was aborted due to lock contention
1107661c4cb9SMel Gorman  * @page: Optionally capture a free page of the requested order during compaction
110856de7263SMel Gorman  *
110956de7263SMel Gorman  * This is the main entry point for direct page compaction.
111056de7263SMel Gorman  */
111156de7263SMel Gorman unsigned long try_to_compact_pages(struct zonelist *zonelist,
111277f1fe6bSMel Gorman 			int order, gfp_t gfp_mask, nodemask_t *nodemask,
11131fb3f8caSMel Gorman 			bool sync, bool *contended, struct page **page)
111456de7263SMel Gorman {
111556de7263SMel Gorman 	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
111656de7263SMel Gorman 	int may_enter_fs = gfp_mask & __GFP_FS;
111756de7263SMel Gorman 	int may_perform_io = gfp_mask & __GFP_IO;
111856de7263SMel Gorman 	struct zoneref *z;
111956de7263SMel Gorman 	struct zone *zone;
112056de7263SMel Gorman 	int rc = COMPACT_SKIPPED;
1121d95ea5d1SBartlomiej Zolnierkiewicz 	int alloc_flags = 0;
112256de7263SMel Gorman 
11234ffb6335SMel Gorman 	/* Check if the GFP flags allow compaction */
1124c5a73c3dSAndrea Arcangeli 	if (!order || !may_enter_fs || !may_perform_io)
112556de7263SMel Gorman 		return rc;
112656de7263SMel Gorman 
1127*010fc29aSMinchan Kim 	count_compact_event(COMPACTSTALL);
112856de7263SMel Gorman 
1129d95ea5d1SBartlomiej Zolnierkiewicz #ifdef CONFIG_CMA
1130d95ea5d1SBartlomiej Zolnierkiewicz 	if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
1131d95ea5d1SBartlomiej Zolnierkiewicz 		alloc_flags |= ALLOC_CMA;
1132d95ea5d1SBartlomiej Zolnierkiewicz #endif
113356de7263SMel Gorman 	/* Compact each zone in the list */
113456de7263SMel Gorman 	for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
113556de7263SMel Gorman 								nodemask) {
113656de7263SMel Gorman 		int status;
113756de7263SMel Gorman 
1138c67fe375SMel Gorman 		status = compact_zone_order(zone, order, gfp_mask, sync,
11391fb3f8caSMel Gorman 						contended, page);
114056de7263SMel Gorman 		rc = max(status, rc);
114156de7263SMel Gorman 
11423e7d3449SMel Gorman 		/* If a normal allocation would succeed, stop compacting */
1143d95ea5d1SBartlomiej Zolnierkiewicz 		if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0,
1144d95ea5d1SBartlomiej Zolnierkiewicz 				      alloc_flags))
114556de7263SMel Gorman 			break;
114656de7263SMel Gorman 	}
114756de7263SMel Gorman 
114856de7263SMel Gorman 	return rc;
114956de7263SMel Gorman }
115056de7263SMel Gorman 
115156de7263SMel Gorman 
115276ab0f53SMel Gorman /* Compact all zones within a node */
11537be62de9SRik van Riel static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
115476ab0f53SMel Gorman {
115576ab0f53SMel Gorman 	int zoneid;
115676ab0f53SMel Gorman 	struct zone *zone;
115776ab0f53SMel Gorman 
115876ab0f53SMel Gorman 	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
115976ab0f53SMel Gorman 
116076ab0f53SMel Gorman 		zone = &pgdat->node_zones[zoneid];
116176ab0f53SMel Gorman 		if (!populated_zone(zone))
116276ab0f53SMel Gorman 			continue;
116376ab0f53SMel Gorman 
11647be62de9SRik van Riel 		cc->nr_freepages = 0;
11657be62de9SRik van Riel 		cc->nr_migratepages = 0;
11667be62de9SRik van Riel 		cc->zone = zone;
11677be62de9SRik van Riel 		INIT_LIST_HEAD(&cc->freepages);
11687be62de9SRik van Riel 		INIT_LIST_HEAD(&cc->migratepages);
116976ab0f53SMel Gorman 
1170aad6ec37SDan Carpenter 		if (cc->order == -1 || !compaction_deferred(zone, cc->order))
11717be62de9SRik van Riel 			compact_zone(zone, cc);
117276ab0f53SMel Gorman 
1173aff62249SRik van Riel 		if (cc->order > 0) {
1174aff62249SRik van Riel 			int ok = zone_watermark_ok(zone, cc->order,
1175aff62249SRik van Riel 						low_wmark_pages(zone), 0, 0);
1176c81758fbSMinchan Kim 			if (ok && cc->order >= zone->compact_order_failed)
1177aff62249SRik van Riel 				zone->compact_order_failed = cc->order + 1;
1178aff62249SRik van Riel 			/* Currently async compaction is never deferred. */
117968e3e926SLinus Torvalds 			else if (!ok && cc->sync)
1180aff62249SRik van Riel 				defer_compaction(zone, cc->order);
1181aff62249SRik van Riel 		}
1182aff62249SRik van Riel 
11837be62de9SRik van Riel 		VM_BUG_ON(!list_empty(&cc->freepages));
11847be62de9SRik van Riel 		VM_BUG_ON(!list_empty(&cc->migratepages));
118576ab0f53SMel Gorman 	}
118676ab0f53SMel Gorman 
118776ab0f53SMel Gorman 	return 0;
118876ab0f53SMel Gorman }
118976ab0f53SMel Gorman 
11907be62de9SRik van Riel int compact_pgdat(pg_data_t *pgdat, int order)
11917be62de9SRik van Riel {
11927be62de9SRik van Riel 	struct compact_control cc = {
11937be62de9SRik van Riel 		.order = order,
119468e3e926SLinus Torvalds 		.sync = false,
11951fb3f8caSMel Gorman 		.page = NULL,
11967be62de9SRik van Riel 	};
11977be62de9SRik van Riel 
11987be62de9SRik van Riel 	return __compact_pgdat(pgdat, &cc);
11997be62de9SRik van Riel }
12007be62de9SRik van Riel 
12017be62de9SRik van Riel static int compact_node(int nid)
12027be62de9SRik van Riel {
12037be62de9SRik van Riel 	struct compact_control cc = {
12047be62de9SRik van Riel 		.order = -1,
120568e3e926SLinus Torvalds 		.sync = true,
12061fb3f8caSMel Gorman 		.page = NULL,
12077be62de9SRik van Riel 	};
12087be62de9SRik van Riel 
12098575ec29SHugh Dickins 	return __compact_pgdat(NODE_DATA(nid), &cc);
12107be62de9SRik van Riel }
12117be62de9SRik van Riel 
121276ab0f53SMel Gorman /* Compact all nodes in the system */
121376ab0f53SMel Gorman static int compact_nodes(void)
121476ab0f53SMel Gorman {
121576ab0f53SMel Gorman 	int nid;
121676ab0f53SMel Gorman 
12178575ec29SHugh Dickins 	/* Flush pending updates to the LRU lists */
12188575ec29SHugh Dickins 	lru_add_drain_all();
12198575ec29SHugh Dickins 
122076ab0f53SMel Gorman 	for_each_online_node(nid)
122176ab0f53SMel Gorman 		compact_node(nid);
122276ab0f53SMel Gorman 
122376ab0f53SMel Gorman 	return COMPACT_COMPLETE;
122476ab0f53SMel Gorman }
122576ab0f53SMel Gorman 
122676ab0f53SMel Gorman /* The written value is actually unused, all memory is compacted */
122776ab0f53SMel Gorman int sysctl_compact_memory;
122876ab0f53SMel Gorman 
122976ab0f53SMel Gorman /* This is the entry point for compacting all nodes via /proc/sys/vm */
123076ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write,
123176ab0f53SMel Gorman 			void __user *buffer, size_t *length, loff_t *ppos)
123276ab0f53SMel Gorman {
123376ab0f53SMel Gorman 	if (write)
123476ab0f53SMel Gorman 		return compact_nodes();
123576ab0f53SMel Gorman 
123676ab0f53SMel Gorman 	return 0;
123776ab0f53SMel Gorman }
1238ed4a6d7fSMel Gorman 
12395e771905SMel Gorman int sysctl_extfrag_handler(struct ctl_table *table, int write,
12405e771905SMel Gorman 			void __user *buffer, size_t *length, loff_t *ppos)
12415e771905SMel Gorman {
12425e771905SMel Gorman 	proc_dointvec_minmax(table, write, buffer, length, ppos);
12435e771905SMel Gorman 
12445e771905SMel Gorman 	return 0;
12455e771905SMel Gorman }
12465e771905SMel Gorman 
1247ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
124810fbcf4cSKay Sievers ssize_t sysfs_compact_node(struct device *dev,
124910fbcf4cSKay Sievers 			struct device_attribute *attr,
1250ed4a6d7fSMel Gorman 			const char *buf, size_t count)
1251ed4a6d7fSMel Gorman {
12528575ec29SHugh Dickins 	int nid = dev->id;
12538575ec29SHugh Dickins 
12548575ec29SHugh Dickins 	if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
12558575ec29SHugh Dickins 		/* Flush pending updates to the LRU lists */
12568575ec29SHugh Dickins 		lru_add_drain_all();
12578575ec29SHugh Dickins 
12588575ec29SHugh Dickins 		compact_node(nid);
12598575ec29SHugh Dickins 	}
1260ed4a6d7fSMel Gorman 
1261ed4a6d7fSMel Gorman 	return count;
1262ed4a6d7fSMel Gorman }
126310fbcf4cSKay Sievers static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
1264ed4a6d7fSMel Gorman 
1265ed4a6d7fSMel Gorman int compaction_register_node(struct node *node)
1266ed4a6d7fSMel Gorman {
126710fbcf4cSKay Sievers 	return device_create_file(&node->dev, &dev_attr_compact);
1268ed4a6d7fSMel Gorman }
1269ed4a6d7fSMel Gorman 
1270ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node)
1271ed4a6d7fSMel Gorman {
127210fbcf4cSKay Sievers 	return device_remove_file(&node->dev, &dev_attr_compact);
1273ed4a6d7fSMel Gorman }
1274ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */
1275ff9543fdSMichal Nazarewicz 
1276ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION */
1277