xref: /linux/mm/compaction.c (revision f40d1e42bb988d2a26e8e111ea4c4c7bac819b7e)
1748446bbSMel Gorman /*
2748446bbSMel Gorman  * linux/mm/compaction.c
3748446bbSMel Gorman  *
4748446bbSMel Gorman  * Memory compaction for the reduction of external fragmentation. Note that
5748446bbSMel Gorman  * this heavily depends upon page migration to do all the real heavy
6748446bbSMel Gorman  * lifting
7748446bbSMel Gorman  *
8748446bbSMel Gorman  * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
9748446bbSMel Gorman  */
10748446bbSMel Gorman #include <linux/swap.h>
11748446bbSMel Gorman #include <linux/migrate.h>
12748446bbSMel Gorman #include <linux/compaction.h>
13748446bbSMel Gorman #include <linux/mm_inline.h>
14748446bbSMel Gorman #include <linux/backing-dev.h>
1576ab0f53SMel Gorman #include <linux/sysctl.h>
16ed4a6d7fSMel Gorman #include <linux/sysfs.h>
17748446bbSMel Gorman #include "internal.h"
18748446bbSMel Gorman 
19ff9543fdSMichal Nazarewicz #if defined CONFIG_COMPACTION || defined CONFIG_CMA
20ff9543fdSMichal Nazarewicz 
21b7aba698SMel Gorman #define CREATE_TRACE_POINTS
22b7aba698SMel Gorman #include <trace/events/compaction.h>
23b7aba698SMel Gorman 
24748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist)
25748446bbSMel Gorman {
26748446bbSMel Gorman 	struct page *page, *next;
27748446bbSMel Gorman 	unsigned long count = 0;
28748446bbSMel Gorman 
29748446bbSMel Gorman 	list_for_each_entry_safe(page, next, freelist, lru) {
30748446bbSMel Gorman 		list_del(&page->lru);
31748446bbSMel Gorman 		__free_page(page);
32748446bbSMel Gorman 		count++;
33748446bbSMel Gorman 	}
34748446bbSMel Gorman 
35748446bbSMel Gorman 	return count;
36748446bbSMel Gorman }
37748446bbSMel Gorman 
38ff9543fdSMichal Nazarewicz static void map_pages(struct list_head *list)
39ff9543fdSMichal Nazarewicz {
40ff9543fdSMichal Nazarewicz 	struct page *page;
41ff9543fdSMichal Nazarewicz 
42ff9543fdSMichal Nazarewicz 	list_for_each_entry(page, list, lru) {
43ff9543fdSMichal Nazarewicz 		arch_alloc_page(page, 0);
44ff9543fdSMichal Nazarewicz 		kernel_map_pages(page, 1, 1);
45ff9543fdSMichal Nazarewicz 	}
46ff9543fdSMichal Nazarewicz }
47ff9543fdSMichal Nazarewicz 
4847118af0SMichal Nazarewicz static inline bool migrate_async_suitable(int migratetype)
4947118af0SMichal Nazarewicz {
5047118af0SMichal Nazarewicz 	return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
5147118af0SMichal Nazarewicz }
5247118af0SMichal Nazarewicz 
532a1402aaSMel Gorman static inline bool should_release_lock(spinlock_t *lock)
542a1402aaSMel Gorman {
552a1402aaSMel Gorman 	return need_resched() || spin_is_contended(lock);
562a1402aaSMel Gorman }
572a1402aaSMel Gorman 
5885aa125fSMichal Nazarewicz /*
59c67fe375SMel Gorman  * Compaction requires the taking of some coarse locks that are potentially
60c67fe375SMel Gorman  * very heavily contended. Check if the process needs to be scheduled or
61c67fe375SMel Gorman  * if the lock is contended. For async compaction, back out in the event
62c67fe375SMel Gorman  * if contention is severe. For sync compaction, schedule.
63c67fe375SMel Gorman  *
64c67fe375SMel Gorman  * Returns true if the lock is held.
65c67fe375SMel Gorman  * Returns false if the lock is released and compaction should abort
66c67fe375SMel Gorman  */
67c67fe375SMel Gorman static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
68c67fe375SMel Gorman 				      bool locked, struct compact_control *cc)
69c67fe375SMel Gorman {
702a1402aaSMel Gorman 	if (should_release_lock(lock)) {
71c67fe375SMel Gorman 		if (locked) {
72c67fe375SMel Gorman 			spin_unlock_irqrestore(lock, *flags);
73c67fe375SMel Gorman 			locked = false;
74c67fe375SMel Gorman 		}
75c67fe375SMel Gorman 
76c67fe375SMel Gorman 		/* async aborts if taking too long or contended */
77c67fe375SMel Gorman 		if (!cc->sync) {
78e64c5237SShaohua Li 			cc->contended = true;
79c67fe375SMel Gorman 			return false;
80c67fe375SMel Gorman 		}
81c67fe375SMel Gorman 
82c67fe375SMel Gorman 		cond_resched();
83c67fe375SMel Gorman 	}
84c67fe375SMel Gorman 
85c67fe375SMel Gorman 	if (!locked)
86c67fe375SMel Gorman 		spin_lock_irqsave(lock, *flags);
87c67fe375SMel Gorman 	return true;
88c67fe375SMel Gorman }
89c67fe375SMel Gorman 
90c67fe375SMel Gorman static inline bool compact_trylock_irqsave(spinlock_t *lock,
91c67fe375SMel Gorman 			unsigned long *flags, struct compact_control *cc)
92c67fe375SMel Gorman {
93c67fe375SMel Gorman 	return compact_checklock_irqsave(lock, flags, false, cc);
94c67fe375SMel Gorman }
95c67fe375SMel Gorman 
96*f40d1e42SMel Gorman /* Returns true if the page is within a block suitable for migration to */
97*f40d1e42SMel Gorman static bool suitable_migration_target(struct page *page)
98*f40d1e42SMel Gorman {
99*f40d1e42SMel Gorman 	int migratetype = get_pageblock_migratetype(page);
100*f40d1e42SMel Gorman 
101*f40d1e42SMel Gorman 	/* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
102*f40d1e42SMel Gorman 	if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
103*f40d1e42SMel Gorman 		return false;
104*f40d1e42SMel Gorman 
105*f40d1e42SMel Gorman 	/* If the page is a large free page, then allow migration */
106*f40d1e42SMel Gorman 	if (PageBuddy(page) && page_order(page) >= pageblock_order)
107*f40d1e42SMel Gorman 		return true;
108*f40d1e42SMel Gorman 
109*f40d1e42SMel Gorman 	/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
110*f40d1e42SMel Gorman 	if (migrate_async_suitable(migratetype))
111*f40d1e42SMel Gorman 		return true;
112*f40d1e42SMel Gorman 
113*f40d1e42SMel Gorman 	/* Otherwise skip the block */
114*f40d1e42SMel Gorman 	return false;
115*f40d1e42SMel Gorman }
116*f40d1e42SMel Gorman 
1171fb3f8caSMel Gorman static void compact_capture_page(struct compact_control *cc)
1181fb3f8caSMel Gorman {
1191fb3f8caSMel Gorman 	unsigned long flags;
1201fb3f8caSMel Gorman 	int mtype, mtype_low, mtype_high;
1211fb3f8caSMel Gorman 
1221fb3f8caSMel Gorman 	if (!cc->page || *cc->page)
1231fb3f8caSMel Gorman 		return;
1241fb3f8caSMel Gorman 
1251fb3f8caSMel Gorman 	/*
1261fb3f8caSMel Gorman 	 * For MIGRATE_MOVABLE allocations we capture a suitable page ASAP
1271fb3f8caSMel Gorman 	 * regardless of the migratetype of the freelist is is captured from.
1281fb3f8caSMel Gorman 	 * This is fine because the order for a high-order MIGRATE_MOVABLE
1291fb3f8caSMel Gorman 	 * allocation is typically at least a pageblock size and overall
1301fb3f8caSMel Gorman 	 * fragmentation is not impaired. Other allocation types must
1311fb3f8caSMel Gorman 	 * capture pages from their own migratelist because otherwise they
1321fb3f8caSMel Gorman 	 * could pollute other pageblocks like MIGRATE_MOVABLE with
1331fb3f8caSMel Gorman 	 * difficult to move pages and making fragmentation worse overall.
1341fb3f8caSMel Gorman 	 */
1351fb3f8caSMel Gorman 	if (cc->migratetype == MIGRATE_MOVABLE) {
1361fb3f8caSMel Gorman 		mtype_low = 0;
1371fb3f8caSMel Gorman 		mtype_high = MIGRATE_PCPTYPES;
1381fb3f8caSMel Gorman 	} else {
1391fb3f8caSMel Gorman 		mtype_low = cc->migratetype;
1401fb3f8caSMel Gorman 		mtype_high = cc->migratetype + 1;
1411fb3f8caSMel Gorman 	}
1421fb3f8caSMel Gorman 
1431fb3f8caSMel Gorman 	/* Speculatively examine the free lists without zone lock */
1441fb3f8caSMel Gorman 	for (mtype = mtype_low; mtype < mtype_high; mtype++) {
1451fb3f8caSMel Gorman 		int order;
1461fb3f8caSMel Gorman 		for (order = cc->order; order < MAX_ORDER; order++) {
1471fb3f8caSMel Gorman 			struct page *page;
1481fb3f8caSMel Gorman 			struct free_area *area;
1491fb3f8caSMel Gorman 			area = &(cc->zone->free_area[order]);
1501fb3f8caSMel Gorman 			if (list_empty(&area->free_list[mtype]))
1511fb3f8caSMel Gorman 				continue;
1521fb3f8caSMel Gorman 
1531fb3f8caSMel Gorman 			/* Take the lock and attempt capture of the page */
1541fb3f8caSMel Gorman 			if (!compact_trylock_irqsave(&cc->zone->lock, &flags, cc))
1551fb3f8caSMel Gorman 				return;
1561fb3f8caSMel Gorman 			if (!list_empty(&area->free_list[mtype])) {
1571fb3f8caSMel Gorman 				page = list_entry(area->free_list[mtype].next,
1581fb3f8caSMel Gorman 							struct page, lru);
1591fb3f8caSMel Gorman 				if (capture_free_page(page, cc->order, mtype)) {
1601fb3f8caSMel Gorman 					spin_unlock_irqrestore(&cc->zone->lock,
1611fb3f8caSMel Gorman 									flags);
1621fb3f8caSMel Gorman 					*cc->page = page;
1631fb3f8caSMel Gorman 					return;
1641fb3f8caSMel Gorman 				}
1651fb3f8caSMel Gorman 			}
1661fb3f8caSMel Gorman 			spin_unlock_irqrestore(&cc->zone->lock, flags);
1671fb3f8caSMel Gorman 		}
1681fb3f8caSMel Gorman 	}
1691fb3f8caSMel Gorman }
1701fb3f8caSMel Gorman 
171c67fe375SMel Gorman /*
17285aa125fSMichal Nazarewicz  * Isolate free pages onto a private freelist. Caller must hold zone->lock.
17385aa125fSMichal Nazarewicz  * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
17485aa125fSMichal Nazarewicz  * pages inside of the pageblock (even though it may still end up isolating
17585aa125fSMichal Nazarewicz  * some pages).
17685aa125fSMichal Nazarewicz  */
177*f40d1e42SMel Gorman static unsigned long isolate_freepages_block(struct compact_control *cc,
178*f40d1e42SMel Gorman 				unsigned long blockpfn,
17985aa125fSMichal Nazarewicz 				unsigned long end_pfn,
18085aa125fSMichal Nazarewicz 				struct list_head *freelist,
18185aa125fSMichal Nazarewicz 				bool strict)
182748446bbSMel Gorman {
183b7aba698SMel Gorman 	int nr_scanned = 0, total_isolated = 0;
184748446bbSMel Gorman 	struct page *cursor;
185*f40d1e42SMel Gorman 	unsigned long nr_strict_required = end_pfn - blockpfn;
186*f40d1e42SMel Gorman 	unsigned long flags;
187*f40d1e42SMel Gorman 	bool locked = false;
188748446bbSMel Gorman 
189748446bbSMel Gorman 	cursor = pfn_to_page(blockpfn);
190748446bbSMel Gorman 
191*f40d1e42SMel Gorman 	/* Isolate free pages. */
192748446bbSMel Gorman 	for (; blockpfn < end_pfn; blockpfn++, cursor++) {
193748446bbSMel Gorman 		int isolated, i;
194748446bbSMel Gorman 		struct page *page = cursor;
195748446bbSMel Gorman 
196b7aba698SMel Gorman 		nr_scanned++;
197*f40d1e42SMel Gorman 		if (!pfn_valid_within(blockpfn))
198748446bbSMel Gorman 			continue;
199*f40d1e42SMel Gorman 		if (!PageBuddy(page))
200*f40d1e42SMel Gorman 			continue;
201*f40d1e42SMel Gorman 
202*f40d1e42SMel Gorman 		/*
203*f40d1e42SMel Gorman 		 * The zone lock must be held to isolate freepages.
204*f40d1e42SMel Gorman 		 * Unfortunately this is a very coarse lock and can be
205*f40d1e42SMel Gorman 		 * heavily contended if there are parallel allocations
206*f40d1e42SMel Gorman 		 * or parallel compactions. For async compaction do not
207*f40d1e42SMel Gorman 		 * spin on the lock and we acquire the lock as late as
208*f40d1e42SMel Gorman 		 * possible.
209*f40d1e42SMel Gorman 		 */
210*f40d1e42SMel Gorman 		locked = compact_checklock_irqsave(&cc->zone->lock, &flags,
211*f40d1e42SMel Gorman 								locked, cc);
212*f40d1e42SMel Gorman 		if (!locked)
213*f40d1e42SMel Gorman 			break;
214*f40d1e42SMel Gorman 
215*f40d1e42SMel Gorman 		/* Recheck this is a suitable migration target under lock */
216*f40d1e42SMel Gorman 		if (!strict && !suitable_migration_target(page))
217*f40d1e42SMel Gorman 			break;
218*f40d1e42SMel Gorman 
219*f40d1e42SMel Gorman 		/* Recheck this is a buddy page under lock */
220*f40d1e42SMel Gorman 		if (!PageBuddy(page))
221*f40d1e42SMel Gorman 			continue;
222748446bbSMel Gorman 
223748446bbSMel Gorman 		/* Found a free page, break it into order-0 pages */
224748446bbSMel Gorman 		isolated = split_free_page(page);
22585aa125fSMichal Nazarewicz 		if (!isolated && strict)
226*f40d1e42SMel Gorman 			break;
227748446bbSMel Gorman 		total_isolated += isolated;
228748446bbSMel Gorman 		for (i = 0; i < isolated; i++) {
229748446bbSMel Gorman 			list_add(&page->lru, freelist);
230748446bbSMel Gorman 			page++;
231748446bbSMel Gorman 		}
232748446bbSMel Gorman 
233748446bbSMel Gorman 		/* If a page was split, advance to the end of it */
234748446bbSMel Gorman 		if (isolated) {
235748446bbSMel Gorman 			blockpfn += isolated - 1;
236748446bbSMel Gorman 			cursor += isolated - 1;
237748446bbSMel Gorman 		}
238748446bbSMel Gorman 	}
239748446bbSMel Gorman 
240b7aba698SMel Gorman 	trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
241*f40d1e42SMel Gorman 
242*f40d1e42SMel Gorman 	/*
243*f40d1e42SMel Gorman 	 * If strict isolation is requested by CMA then check that all the
244*f40d1e42SMel Gorman 	 * pages requested were isolated. If there were any failures, 0 is
245*f40d1e42SMel Gorman 	 * returned and CMA will fail.
246*f40d1e42SMel Gorman 	 */
247*f40d1e42SMel Gorman 	if (strict && nr_strict_required != total_isolated)
248*f40d1e42SMel Gorman 		total_isolated = 0;
249*f40d1e42SMel Gorman 
250*f40d1e42SMel Gorman 	if (locked)
251*f40d1e42SMel Gorman 		spin_unlock_irqrestore(&cc->zone->lock, flags);
252*f40d1e42SMel Gorman 
253748446bbSMel Gorman 	return total_isolated;
254748446bbSMel Gorman }
255748446bbSMel Gorman 
25685aa125fSMichal Nazarewicz /**
25785aa125fSMichal Nazarewicz  * isolate_freepages_range() - isolate free pages.
25885aa125fSMichal Nazarewicz  * @start_pfn: The first PFN to start isolating.
25985aa125fSMichal Nazarewicz  * @end_pfn:   The one-past-last PFN.
26085aa125fSMichal Nazarewicz  *
26185aa125fSMichal Nazarewicz  * Non-free pages, invalid PFNs, or zone boundaries within the
26285aa125fSMichal Nazarewicz  * [start_pfn, end_pfn) range are considered errors, cause function to
26385aa125fSMichal Nazarewicz  * undo its actions and return zero.
26485aa125fSMichal Nazarewicz  *
26585aa125fSMichal Nazarewicz  * Otherwise, function returns one-past-the-last PFN of isolated page
26685aa125fSMichal Nazarewicz  * (which may be greater then end_pfn if end fell in a middle of
26785aa125fSMichal Nazarewicz  * a free page).
26885aa125fSMichal Nazarewicz  */
269ff9543fdSMichal Nazarewicz unsigned long
27085aa125fSMichal Nazarewicz isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
27185aa125fSMichal Nazarewicz {
272*f40d1e42SMel Gorman 	unsigned long isolated, pfn, block_end_pfn;
27385aa125fSMichal Nazarewicz 	struct zone *zone = NULL;
27485aa125fSMichal Nazarewicz 	LIST_HEAD(freelist);
27585aa125fSMichal Nazarewicz 
276*f40d1e42SMel Gorman 	/* cc needed for isolate_freepages_block to acquire zone->lock */
277*f40d1e42SMel Gorman 	struct compact_control cc = {
278*f40d1e42SMel Gorman 		.sync = true,
279*f40d1e42SMel Gorman 	};
280*f40d1e42SMel Gorman 
28185aa125fSMichal Nazarewicz 	if (pfn_valid(start_pfn))
282*f40d1e42SMel Gorman 		cc.zone = zone = page_zone(pfn_to_page(start_pfn));
28385aa125fSMichal Nazarewicz 
28485aa125fSMichal Nazarewicz 	for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
28585aa125fSMichal Nazarewicz 		if (!pfn_valid(pfn) || zone != page_zone(pfn_to_page(pfn)))
28685aa125fSMichal Nazarewicz 			break;
28785aa125fSMichal Nazarewicz 
28885aa125fSMichal Nazarewicz 		/*
28985aa125fSMichal Nazarewicz 		 * On subsequent iterations ALIGN() is actually not needed,
29085aa125fSMichal Nazarewicz 		 * but we keep it that we not to complicate the code.
29185aa125fSMichal Nazarewicz 		 */
29285aa125fSMichal Nazarewicz 		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
29385aa125fSMichal Nazarewicz 		block_end_pfn = min(block_end_pfn, end_pfn);
29485aa125fSMichal Nazarewicz 
295*f40d1e42SMel Gorman 		isolated = isolate_freepages_block(&cc, pfn, block_end_pfn,
29685aa125fSMichal Nazarewicz 						   &freelist, true);
29785aa125fSMichal Nazarewicz 
29885aa125fSMichal Nazarewicz 		/*
29985aa125fSMichal Nazarewicz 		 * In strict mode, isolate_freepages_block() returns 0 if
30085aa125fSMichal Nazarewicz 		 * there are any holes in the block (ie. invalid PFNs or
30185aa125fSMichal Nazarewicz 		 * non-free pages).
30285aa125fSMichal Nazarewicz 		 */
30385aa125fSMichal Nazarewicz 		if (!isolated)
30485aa125fSMichal Nazarewicz 			break;
30585aa125fSMichal Nazarewicz 
30685aa125fSMichal Nazarewicz 		/*
30785aa125fSMichal Nazarewicz 		 * If we managed to isolate pages, it is always (1 << n) *
30885aa125fSMichal Nazarewicz 		 * pageblock_nr_pages for some non-negative n.  (Max order
30985aa125fSMichal Nazarewicz 		 * page may span two pageblocks).
31085aa125fSMichal Nazarewicz 		 */
31185aa125fSMichal Nazarewicz 	}
31285aa125fSMichal Nazarewicz 
31385aa125fSMichal Nazarewicz 	/* split_free_page does not map the pages */
31485aa125fSMichal Nazarewicz 	map_pages(&freelist);
31585aa125fSMichal Nazarewicz 
31685aa125fSMichal Nazarewicz 	if (pfn < end_pfn) {
31785aa125fSMichal Nazarewicz 		/* Loop terminated early, cleanup. */
31885aa125fSMichal Nazarewicz 		release_freepages(&freelist);
31985aa125fSMichal Nazarewicz 		return 0;
32085aa125fSMichal Nazarewicz 	}
32185aa125fSMichal Nazarewicz 
32285aa125fSMichal Nazarewicz 	/* We don't use freelists for anything. */
32385aa125fSMichal Nazarewicz 	return pfn;
32485aa125fSMichal Nazarewicz }
32585aa125fSMichal Nazarewicz 
326748446bbSMel Gorman /* Update the number of anon and file isolated pages in the zone */
327c67fe375SMel Gorman static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc)
328748446bbSMel Gorman {
329748446bbSMel Gorman 	struct page *page;
330b9e84ac1SMinchan Kim 	unsigned int count[2] = { 0, };
331748446bbSMel Gorman 
332b9e84ac1SMinchan Kim 	list_for_each_entry(page, &cc->migratepages, lru)
333b9e84ac1SMinchan Kim 		count[!!page_is_file_cache(page)]++;
334748446bbSMel Gorman 
335c67fe375SMel Gorman 	/* If locked we can use the interrupt unsafe versions */
336c67fe375SMel Gorman 	if (locked) {
337b9e84ac1SMinchan Kim 		__mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
338b9e84ac1SMinchan Kim 		__mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
339c67fe375SMel Gorman 	} else {
340c67fe375SMel Gorman 		mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
341c67fe375SMel Gorman 		mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
342c67fe375SMel Gorman 	}
343748446bbSMel Gorman }
344748446bbSMel Gorman 
345748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */
346748446bbSMel Gorman static bool too_many_isolated(struct zone *zone)
347748446bbSMel Gorman {
348bc693045SMinchan Kim 	unsigned long active, inactive, isolated;
349748446bbSMel Gorman 
350748446bbSMel Gorman 	inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
351748446bbSMel Gorman 					zone_page_state(zone, NR_INACTIVE_ANON);
352bc693045SMinchan Kim 	active = zone_page_state(zone, NR_ACTIVE_FILE) +
353bc693045SMinchan Kim 					zone_page_state(zone, NR_ACTIVE_ANON);
354748446bbSMel Gorman 	isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
355748446bbSMel Gorman 					zone_page_state(zone, NR_ISOLATED_ANON);
356748446bbSMel Gorman 
357bc693045SMinchan Kim 	return isolated > (inactive + active) / 2;
358748446bbSMel Gorman }
359748446bbSMel Gorman 
3602fe86e00SMichal Nazarewicz /**
3612fe86e00SMichal Nazarewicz  * isolate_migratepages_range() - isolate all migrate-able pages in range.
3622fe86e00SMichal Nazarewicz  * @zone:	Zone pages are in.
3632fe86e00SMichal Nazarewicz  * @cc:		Compaction control structure.
3642fe86e00SMichal Nazarewicz  * @low_pfn:	The first PFN of the range.
3652fe86e00SMichal Nazarewicz  * @end_pfn:	The one-past-the-last PFN of the range.
3662fe86e00SMichal Nazarewicz  *
3672fe86e00SMichal Nazarewicz  * Isolate all pages that can be migrated from the range specified by
3682fe86e00SMichal Nazarewicz  * [low_pfn, end_pfn).  Returns zero if there is a fatal signal
3692fe86e00SMichal Nazarewicz  * pending), otherwise PFN of the first page that was not scanned
3702fe86e00SMichal Nazarewicz  * (which may be both less, equal to or more then end_pfn).
3712fe86e00SMichal Nazarewicz  *
3722fe86e00SMichal Nazarewicz  * Assumes that cc->migratepages is empty and cc->nr_migratepages is
3732fe86e00SMichal Nazarewicz  * zero.
3742fe86e00SMichal Nazarewicz  *
3752fe86e00SMichal Nazarewicz  * Apart from cc->migratepages and cc->nr_migratetypes this function
3762fe86e00SMichal Nazarewicz  * does not modify any cc's fields, in particular it does not modify
3772fe86e00SMichal Nazarewicz  * (or read for that matter) cc->migrate_pfn.
378748446bbSMel Gorman  */
379ff9543fdSMichal Nazarewicz unsigned long
3802fe86e00SMichal Nazarewicz isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
3812fe86e00SMichal Nazarewicz 			   unsigned long low_pfn, unsigned long end_pfn)
382748446bbSMel Gorman {
3839927af74SMel Gorman 	unsigned long last_pageblock_nr = 0, pageblock_nr;
384b7aba698SMel Gorman 	unsigned long nr_scanned = 0, nr_isolated = 0;
385748446bbSMel Gorman 	struct list_head *migratelist = &cc->migratepages;
386f3fd4a61SKonstantin Khlebnikov 	isolate_mode_t mode = 0;
387fa9add64SHugh Dickins 	struct lruvec *lruvec;
388c67fe375SMel Gorman 	unsigned long flags;
3892a1402aaSMel Gorman 	bool locked = false;
390748446bbSMel Gorman 
391748446bbSMel Gorman 	/*
392748446bbSMel Gorman 	 * Ensure that there are not too many pages isolated from the LRU
393748446bbSMel Gorman 	 * list by either parallel reclaimers or compaction. If there are,
394748446bbSMel Gorman 	 * delay for some time until fewer pages are isolated
395748446bbSMel Gorman 	 */
396748446bbSMel Gorman 	while (unlikely(too_many_isolated(zone))) {
397f9e35b3bSMel Gorman 		/* async migration should just abort */
39868e3e926SLinus Torvalds 		if (!cc->sync)
3992fe86e00SMichal Nazarewicz 			return 0;
400f9e35b3bSMel Gorman 
401748446bbSMel Gorman 		congestion_wait(BLK_RW_ASYNC, HZ/10);
402748446bbSMel Gorman 
403748446bbSMel Gorman 		if (fatal_signal_pending(current))
4042fe86e00SMichal Nazarewicz 			return 0;
405748446bbSMel Gorman 	}
406748446bbSMel Gorman 
407748446bbSMel Gorman 	/* Time to isolate some pages for migration */
408b2eef8c0SAndrea Arcangeli 	cond_resched();
409748446bbSMel Gorman 	for (; low_pfn < end_pfn; low_pfn++) {
410748446bbSMel Gorman 		struct page *page;
411b2eef8c0SAndrea Arcangeli 
412b2eef8c0SAndrea Arcangeli 		/* give a chance to irqs before checking need_resched() */
4132a1402aaSMel Gorman 		if (locked && !((low_pfn+1) % SWAP_CLUSTER_MAX)) {
4142a1402aaSMel Gorman 			if (should_release_lock(&zone->lru_lock)) {
415c67fe375SMel Gorman 				spin_unlock_irqrestore(&zone->lru_lock, flags);
416b2eef8c0SAndrea Arcangeli 				locked = false;
417b2eef8c0SAndrea Arcangeli 			}
4182a1402aaSMel Gorman 		}
419b2eef8c0SAndrea Arcangeli 
4200bf380bcSMel Gorman 		/*
4210bf380bcSMel Gorman 		 * migrate_pfn does not necessarily start aligned to a
4220bf380bcSMel Gorman 		 * pageblock. Ensure that pfn_valid is called when moving
4230bf380bcSMel Gorman 		 * into a new MAX_ORDER_NR_PAGES range in case of large
4240bf380bcSMel Gorman 		 * memory holes within the zone
4250bf380bcSMel Gorman 		 */
4260bf380bcSMel Gorman 		if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
4270bf380bcSMel Gorman 			if (!pfn_valid(low_pfn)) {
4280bf380bcSMel Gorman 				low_pfn += MAX_ORDER_NR_PAGES - 1;
4290bf380bcSMel Gorman 				continue;
4300bf380bcSMel Gorman 			}
4310bf380bcSMel Gorman 		}
4320bf380bcSMel Gorman 
433748446bbSMel Gorman 		if (!pfn_valid_within(low_pfn))
434748446bbSMel Gorman 			continue;
435b7aba698SMel Gorman 		nr_scanned++;
436748446bbSMel Gorman 
437dc908600SMel Gorman 		/*
438dc908600SMel Gorman 		 * Get the page and ensure the page is within the same zone.
439dc908600SMel Gorman 		 * See the comment in isolate_freepages about overlapping
440dc908600SMel Gorman 		 * nodes. It is deliberate that the new zone lock is not taken
441dc908600SMel Gorman 		 * as memory compaction should not move pages between nodes.
442dc908600SMel Gorman 		 */
443748446bbSMel Gorman 		page = pfn_to_page(low_pfn);
444dc908600SMel Gorman 		if (page_zone(page) != zone)
445dc908600SMel Gorman 			continue;
446dc908600SMel Gorman 
447dc908600SMel Gorman 		/* Skip if free */
448748446bbSMel Gorman 		if (PageBuddy(page))
449748446bbSMel Gorman 			continue;
450748446bbSMel Gorman 
4519927af74SMel Gorman 		/*
4529927af74SMel Gorman 		 * For async migration, also only scan in MOVABLE blocks. Async
4539927af74SMel Gorman 		 * migration is optimistic to see if the minimum amount of work
4549927af74SMel Gorman 		 * satisfies the allocation
4559927af74SMel Gorman 		 */
4569927af74SMel Gorman 		pageblock_nr = low_pfn >> pageblock_order;
45768e3e926SLinus Torvalds 		if (!cc->sync && last_pageblock_nr != pageblock_nr &&
45847118af0SMichal Nazarewicz 		    !migrate_async_suitable(get_pageblock_migratetype(page))) {
4592a1402aaSMel Gorman 			goto next_pageblock;
4609927af74SMel Gorman 		}
4619927af74SMel Gorman 
4622a1402aaSMel Gorman 		/* Check may be lockless but that's ok as we recheck later */
463bc835011SAndrea Arcangeli 		if (!PageLRU(page))
464bc835011SAndrea Arcangeli 			continue;
465bc835011SAndrea Arcangeli 
466bc835011SAndrea Arcangeli 		/*
4672a1402aaSMel Gorman 		 * PageLRU is set. lru_lock normally excludes isolation
4682a1402aaSMel Gorman 		 * splitting and collapsing (collapsing has already happened
4692a1402aaSMel Gorman 		 * if PageLRU is set) but the lock is not necessarily taken
4702a1402aaSMel Gorman 		 * here and it is wasteful to take it just to check transhuge.
4712a1402aaSMel Gorman 		 * Check TransHuge without lock and skip the whole pageblock if
4722a1402aaSMel Gorman 		 * it's either a transhuge or hugetlbfs page, as calling
4732a1402aaSMel Gorman 		 * compound_order() without preventing THP from splitting the
4742a1402aaSMel Gorman 		 * page underneath us may return surprising results.
475bc835011SAndrea Arcangeli 		 */
476bc835011SAndrea Arcangeli 		if (PageTransHuge(page)) {
4772a1402aaSMel Gorman 			if (!locked)
4782a1402aaSMel Gorman 				goto next_pageblock;
4792a1402aaSMel Gorman 			low_pfn += (1 << compound_order(page)) - 1;
4802a1402aaSMel Gorman 			continue;
4812a1402aaSMel Gorman 		}
4822a1402aaSMel Gorman 
4832a1402aaSMel Gorman 		/* Check if it is ok to still hold the lock */
4842a1402aaSMel Gorman 		locked = compact_checklock_irqsave(&zone->lru_lock, &flags,
4852a1402aaSMel Gorman 								locked, cc);
4862a1402aaSMel Gorman 		if (!locked || fatal_signal_pending(current))
4872a1402aaSMel Gorman 			break;
4882a1402aaSMel Gorman 
4892a1402aaSMel Gorman 		/* Recheck PageLRU and PageTransHuge under lock */
4902a1402aaSMel Gorman 		if (!PageLRU(page))
4912a1402aaSMel Gorman 			continue;
4922a1402aaSMel Gorman 		if (PageTransHuge(page)) {
493bc835011SAndrea Arcangeli 			low_pfn += (1 << compound_order(page)) - 1;
494bc835011SAndrea Arcangeli 			continue;
495bc835011SAndrea Arcangeli 		}
496bc835011SAndrea Arcangeli 
49768e3e926SLinus Torvalds 		if (!cc->sync)
498c8244935SMel Gorman 			mode |= ISOLATE_ASYNC_MIGRATE;
499c8244935SMel Gorman 
500fa9add64SHugh Dickins 		lruvec = mem_cgroup_page_lruvec(page, zone);
501fa9add64SHugh Dickins 
502748446bbSMel Gorman 		/* Try isolate the page */
503f3fd4a61SKonstantin Khlebnikov 		if (__isolate_lru_page(page, mode) != 0)
504748446bbSMel Gorman 			continue;
505748446bbSMel Gorman 
506bc835011SAndrea Arcangeli 		VM_BUG_ON(PageTransCompound(page));
507bc835011SAndrea Arcangeli 
508748446bbSMel Gorman 		/* Successfully isolated */
509fa9add64SHugh Dickins 		del_page_from_lru_list(page, lruvec, page_lru(page));
510748446bbSMel Gorman 		list_add(&page->lru, migratelist);
511748446bbSMel Gorman 		cc->nr_migratepages++;
512b7aba698SMel Gorman 		nr_isolated++;
513748446bbSMel Gorman 
514748446bbSMel Gorman 		/* Avoid isolating too much */
51531b8384aSHillf Danton 		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
51631b8384aSHillf Danton 			++low_pfn;
517748446bbSMel Gorman 			break;
518748446bbSMel Gorman 		}
5192a1402aaSMel Gorman 
5202a1402aaSMel Gorman 		continue;
5212a1402aaSMel Gorman 
5222a1402aaSMel Gorman next_pageblock:
5232a1402aaSMel Gorman 		low_pfn += pageblock_nr_pages;
5242a1402aaSMel Gorman 		low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
5252a1402aaSMel Gorman 		last_pageblock_nr = pageblock_nr;
52631b8384aSHillf Danton 	}
527748446bbSMel Gorman 
528c67fe375SMel Gorman 	acct_isolated(zone, locked, cc);
529748446bbSMel Gorman 
530c67fe375SMel Gorman 	if (locked)
531c67fe375SMel Gorman 		spin_unlock_irqrestore(&zone->lru_lock, flags);
532748446bbSMel Gorman 
533b7aba698SMel Gorman 	trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
534b7aba698SMel Gorman 
5352fe86e00SMichal Nazarewicz 	return low_pfn;
5362fe86e00SMichal Nazarewicz }
5372fe86e00SMichal Nazarewicz 
538ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION || CONFIG_CMA */
539ff9543fdSMichal Nazarewicz #ifdef CONFIG_COMPACTION
540ff9543fdSMichal Nazarewicz /*
541de74f1ccSMel Gorman  * Returns the start pfn of the last page block in a zone.  This is the starting
542de74f1ccSMel Gorman  * point for full compaction of a zone.  Compaction searches for free pages from
543de74f1ccSMel Gorman  * the end of each zone, while isolate_freepages_block scans forward inside each
544de74f1ccSMel Gorman  * page block.
545de74f1ccSMel Gorman  */
546de74f1ccSMel Gorman static unsigned long start_free_pfn(struct zone *zone)
547de74f1ccSMel Gorman {
548de74f1ccSMel Gorman 	unsigned long free_pfn;
549de74f1ccSMel Gorman 	free_pfn = zone->zone_start_pfn + zone->spanned_pages;
550de74f1ccSMel Gorman 	free_pfn &= ~(pageblock_nr_pages-1);
551de74f1ccSMel Gorman 	return free_pfn;
552de74f1ccSMel Gorman }
553de74f1ccSMel Gorman 
554de74f1ccSMel Gorman /*
555ff9543fdSMichal Nazarewicz  * Based on information in the current compact_control, find blocks
556ff9543fdSMichal Nazarewicz  * suitable for isolating free pages from and then isolate them.
557ff9543fdSMichal Nazarewicz  */
558ff9543fdSMichal Nazarewicz static void isolate_freepages(struct zone *zone,
559ff9543fdSMichal Nazarewicz 				struct compact_control *cc)
560ff9543fdSMichal Nazarewicz {
561ff9543fdSMichal Nazarewicz 	struct page *page;
562ff9543fdSMichal Nazarewicz 	unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn;
563ff9543fdSMichal Nazarewicz 	int nr_freepages = cc->nr_freepages;
564ff9543fdSMichal Nazarewicz 	struct list_head *freelist = &cc->freepages;
5652fe86e00SMichal Nazarewicz 
566ff9543fdSMichal Nazarewicz 	/*
567ff9543fdSMichal Nazarewicz 	 * Initialise the free scanner. The starting point is where we last
568ff9543fdSMichal Nazarewicz 	 * scanned from (or the end of the zone if starting). The low point
569ff9543fdSMichal Nazarewicz 	 * is the end of the pageblock the migration scanner is using.
570ff9543fdSMichal Nazarewicz 	 */
571ff9543fdSMichal Nazarewicz 	pfn = cc->free_pfn;
572ff9543fdSMichal Nazarewicz 	low_pfn = cc->migrate_pfn + pageblock_nr_pages;
5732fe86e00SMichal Nazarewicz 
574ff9543fdSMichal Nazarewicz 	/*
575ff9543fdSMichal Nazarewicz 	 * Take care that if the migration scanner is at the end of the zone
576ff9543fdSMichal Nazarewicz 	 * that the free scanner does not accidentally move to the next zone
577ff9543fdSMichal Nazarewicz 	 * in the next isolation cycle.
578ff9543fdSMichal Nazarewicz 	 */
579ff9543fdSMichal Nazarewicz 	high_pfn = min(low_pfn, pfn);
580ff9543fdSMichal Nazarewicz 
581ff9543fdSMichal Nazarewicz 	zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
582ff9543fdSMichal Nazarewicz 
583ff9543fdSMichal Nazarewicz 	/*
584ff9543fdSMichal Nazarewicz 	 * Isolate free pages until enough are available to migrate the
585ff9543fdSMichal Nazarewicz 	 * pages on cc->migratepages. We stop searching if the migrate
586ff9543fdSMichal Nazarewicz 	 * and free page scanners meet or enough free pages are isolated.
587ff9543fdSMichal Nazarewicz 	 */
588ff9543fdSMichal Nazarewicz 	for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
589ff9543fdSMichal Nazarewicz 					pfn -= pageblock_nr_pages) {
590ff9543fdSMichal Nazarewicz 		unsigned long isolated;
591ff9543fdSMichal Nazarewicz 
592ff9543fdSMichal Nazarewicz 		if (!pfn_valid(pfn))
593ff9543fdSMichal Nazarewicz 			continue;
594ff9543fdSMichal Nazarewicz 
595ff9543fdSMichal Nazarewicz 		/*
596ff9543fdSMichal Nazarewicz 		 * Check for overlapping nodes/zones. It's possible on some
597ff9543fdSMichal Nazarewicz 		 * configurations to have a setup like
598ff9543fdSMichal Nazarewicz 		 * node0 node1 node0
599ff9543fdSMichal Nazarewicz 		 * i.e. it's possible that all pages within a zones range of
600ff9543fdSMichal Nazarewicz 		 * pages do not belong to a single zone.
601ff9543fdSMichal Nazarewicz 		 */
602ff9543fdSMichal Nazarewicz 		page = pfn_to_page(pfn);
603ff9543fdSMichal Nazarewicz 		if (page_zone(page) != zone)
604ff9543fdSMichal Nazarewicz 			continue;
605ff9543fdSMichal Nazarewicz 
606ff9543fdSMichal Nazarewicz 		/* Check the block is suitable for migration */
60768e3e926SLinus Torvalds 		if (!suitable_migration_target(page))
608ff9543fdSMichal Nazarewicz 			continue;
60968e3e926SLinus Torvalds 
610*f40d1e42SMel Gorman 		/* Found a block suitable for isolating free pages from */
611ff9543fdSMichal Nazarewicz 		isolated = 0;
612ff9543fdSMichal Nazarewicz 		end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
613*f40d1e42SMel Gorman 		isolated = isolate_freepages_block(cc, pfn, end_pfn,
614ff9543fdSMichal Nazarewicz 						   freelist, false);
615ff9543fdSMichal Nazarewicz 		nr_freepages += isolated;
616ff9543fdSMichal Nazarewicz 
617ff9543fdSMichal Nazarewicz 		/*
618ff9543fdSMichal Nazarewicz 		 * Record the highest PFN we isolated pages from. When next
619ff9543fdSMichal Nazarewicz 		 * looking for free pages, the search will restart here as
620ff9543fdSMichal Nazarewicz 		 * page migration may have returned some pages to the allocator
621ff9543fdSMichal Nazarewicz 		 */
6227db8889aSRik van Riel 		if (isolated) {
623ff9543fdSMichal Nazarewicz 			high_pfn = max(high_pfn, pfn);
624de74f1ccSMel Gorman 
625de74f1ccSMel Gorman 			/*
626de74f1ccSMel Gorman 			 * If the free scanner has wrapped, update
627de74f1ccSMel Gorman 			 * compact_cached_free_pfn to point to the highest
628de74f1ccSMel Gorman 			 * pageblock with free pages. This reduces excessive
629de74f1ccSMel Gorman 			 * scanning of full pageblocks near the end of the
630de74f1ccSMel Gorman 			 * zone
631de74f1ccSMel Gorman 			 */
632de74f1ccSMel Gorman 			if (cc->order > 0 && cc->wrapped)
6337db8889aSRik van Riel 				zone->compact_cached_free_pfn = high_pfn;
6347db8889aSRik van Riel 		}
635ff9543fdSMichal Nazarewicz 	}
636ff9543fdSMichal Nazarewicz 
637ff9543fdSMichal Nazarewicz 	/* split_free_page does not map the pages */
638ff9543fdSMichal Nazarewicz 	map_pages(freelist);
639ff9543fdSMichal Nazarewicz 
640ff9543fdSMichal Nazarewicz 	cc->free_pfn = high_pfn;
641ff9543fdSMichal Nazarewicz 	cc->nr_freepages = nr_freepages;
642de74f1ccSMel Gorman 
643de74f1ccSMel Gorman 	/* If compact_cached_free_pfn is reset then set it now */
644de74f1ccSMel Gorman 	if (cc->order > 0 && !cc->wrapped &&
645de74f1ccSMel Gorman 			zone->compact_cached_free_pfn == start_free_pfn(zone))
646de74f1ccSMel Gorman 		zone->compact_cached_free_pfn = high_pfn;
647748446bbSMel Gorman }
648748446bbSMel Gorman 
649748446bbSMel Gorman /*
650748446bbSMel Gorman  * This is a migrate-callback that "allocates" freepages by taking pages
651748446bbSMel Gorman  * from the isolated freelists in the block we are migrating to.
652748446bbSMel Gorman  */
653748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage,
654748446bbSMel Gorman 					unsigned long data,
655748446bbSMel Gorman 					int **result)
656748446bbSMel Gorman {
657748446bbSMel Gorman 	struct compact_control *cc = (struct compact_control *)data;
658748446bbSMel Gorman 	struct page *freepage;
659748446bbSMel Gorman 
660748446bbSMel Gorman 	/* Isolate free pages if necessary */
661748446bbSMel Gorman 	if (list_empty(&cc->freepages)) {
662748446bbSMel Gorman 		isolate_freepages(cc->zone, cc);
663748446bbSMel Gorman 
664748446bbSMel Gorman 		if (list_empty(&cc->freepages))
665748446bbSMel Gorman 			return NULL;
666748446bbSMel Gorman 	}
667748446bbSMel Gorman 
668748446bbSMel Gorman 	freepage = list_entry(cc->freepages.next, struct page, lru);
669748446bbSMel Gorman 	list_del(&freepage->lru);
670748446bbSMel Gorman 	cc->nr_freepages--;
671748446bbSMel Gorman 
672748446bbSMel Gorman 	return freepage;
673748446bbSMel Gorman }
674748446bbSMel Gorman 
675748446bbSMel Gorman /*
676748446bbSMel Gorman  * We cannot control nr_migratepages and nr_freepages fully when migration is
677748446bbSMel Gorman  * running as migrate_pages() has no knowledge of compact_control. When
678748446bbSMel Gorman  * migration is complete, we count the number of pages on the lists by hand.
679748446bbSMel Gorman  */
680748446bbSMel Gorman static void update_nr_listpages(struct compact_control *cc)
681748446bbSMel Gorman {
682748446bbSMel Gorman 	int nr_migratepages = 0;
683748446bbSMel Gorman 	int nr_freepages = 0;
684748446bbSMel Gorman 	struct page *page;
685748446bbSMel Gorman 
686748446bbSMel Gorman 	list_for_each_entry(page, &cc->migratepages, lru)
687748446bbSMel Gorman 		nr_migratepages++;
688748446bbSMel Gorman 	list_for_each_entry(page, &cc->freepages, lru)
689748446bbSMel Gorman 		nr_freepages++;
690748446bbSMel Gorman 
691748446bbSMel Gorman 	cc->nr_migratepages = nr_migratepages;
692748446bbSMel Gorman 	cc->nr_freepages = nr_freepages;
693748446bbSMel Gorman }
694748446bbSMel Gorman 
695ff9543fdSMichal Nazarewicz /* possible outcome of isolate_migratepages */
696ff9543fdSMichal Nazarewicz typedef enum {
697ff9543fdSMichal Nazarewicz 	ISOLATE_ABORT,		/* Abort compaction now */
698ff9543fdSMichal Nazarewicz 	ISOLATE_NONE,		/* No pages isolated, continue scanning */
699ff9543fdSMichal Nazarewicz 	ISOLATE_SUCCESS,	/* Pages isolated, migrate */
700ff9543fdSMichal Nazarewicz } isolate_migrate_t;
701ff9543fdSMichal Nazarewicz 
702ff9543fdSMichal Nazarewicz /*
703ff9543fdSMichal Nazarewicz  * Isolate all pages that can be migrated from the block pointed to by
704ff9543fdSMichal Nazarewicz  * the migrate scanner within compact_control.
705ff9543fdSMichal Nazarewicz  */
706ff9543fdSMichal Nazarewicz static isolate_migrate_t isolate_migratepages(struct zone *zone,
707ff9543fdSMichal Nazarewicz 					struct compact_control *cc)
708ff9543fdSMichal Nazarewicz {
709ff9543fdSMichal Nazarewicz 	unsigned long low_pfn, end_pfn;
710ff9543fdSMichal Nazarewicz 
711ff9543fdSMichal Nazarewicz 	/* Do not scan outside zone boundaries */
712ff9543fdSMichal Nazarewicz 	low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
713ff9543fdSMichal Nazarewicz 
714ff9543fdSMichal Nazarewicz 	/* Only scan within a pageblock boundary */
715ff9543fdSMichal Nazarewicz 	end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
716ff9543fdSMichal Nazarewicz 
717ff9543fdSMichal Nazarewicz 	/* Do not cross the free scanner or scan within a memory hole */
718ff9543fdSMichal Nazarewicz 	if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
719ff9543fdSMichal Nazarewicz 		cc->migrate_pfn = end_pfn;
720ff9543fdSMichal Nazarewicz 		return ISOLATE_NONE;
721ff9543fdSMichal Nazarewicz 	}
722ff9543fdSMichal Nazarewicz 
723ff9543fdSMichal Nazarewicz 	/* Perform the isolation */
724ff9543fdSMichal Nazarewicz 	low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn);
725e64c5237SShaohua Li 	if (!low_pfn || cc->contended)
726ff9543fdSMichal Nazarewicz 		return ISOLATE_ABORT;
727ff9543fdSMichal Nazarewicz 
728ff9543fdSMichal Nazarewicz 	cc->migrate_pfn = low_pfn;
729ff9543fdSMichal Nazarewicz 
730ff9543fdSMichal Nazarewicz 	return ISOLATE_SUCCESS;
731ff9543fdSMichal Nazarewicz }
732ff9543fdSMichal Nazarewicz 
733748446bbSMel Gorman static int compact_finished(struct zone *zone,
734748446bbSMel Gorman 			    struct compact_control *cc)
735748446bbSMel Gorman {
7365a03b051SAndrea Arcangeli 	unsigned long watermark;
73756de7263SMel Gorman 
738748446bbSMel Gorman 	if (fatal_signal_pending(current))
739748446bbSMel Gorman 		return COMPACT_PARTIAL;
740748446bbSMel Gorman 
7417db8889aSRik van Riel 	/*
7427db8889aSRik van Riel 	 * A full (order == -1) compaction run starts at the beginning and
7437db8889aSRik van Riel 	 * end of a zone; it completes when the migrate and free scanner meet.
7447db8889aSRik van Riel 	 * A partial (order > 0) compaction can start with the free scanner
7457db8889aSRik van Riel 	 * at a random point in the zone, and may have to restart.
7467db8889aSRik van Riel 	 */
7477db8889aSRik van Riel 	if (cc->free_pfn <= cc->migrate_pfn) {
7487db8889aSRik van Riel 		if (cc->order > 0 && !cc->wrapped) {
7497db8889aSRik van Riel 			/* We started partway through; restart at the end. */
7507db8889aSRik van Riel 			unsigned long free_pfn = start_free_pfn(zone);
7517db8889aSRik van Riel 			zone->compact_cached_free_pfn = free_pfn;
7527db8889aSRik van Riel 			cc->free_pfn = free_pfn;
7537db8889aSRik van Riel 			cc->wrapped = 1;
7547db8889aSRik van Riel 			return COMPACT_CONTINUE;
7557db8889aSRik van Riel 		}
7567db8889aSRik van Riel 		return COMPACT_COMPLETE;
7577db8889aSRik van Riel 	}
7587db8889aSRik van Riel 
7597db8889aSRik van Riel 	/* We wrapped around and ended up where we started. */
7607db8889aSRik van Riel 	if (cc->wrapped && cc->free_pfn <= cc->start_free_pfn)
761748446bbSMel Gorman 		return COMPACT_COMPLETE;
762748446bbSMel Gorman 
76382478fb7SJohannes Weiner 	/*
76482478fb7SJohannes Weiner 	 * order == -1 is expected when compacting via
76582478fb7SJohannes Weiner 	 * /proc/sys/vm/compact_memory
76682478fb7SJohannes Weiner 	 */
76756de7263SMel Gorman 	if (cc->order == -1)
76856de7263SMel Gorman 		return COMPACT_CONTINUE;
76956de7263SMel Gorman 
7703957c776SMichal Hocko 	/* Compaction run is not finished if the watermark is not met */
7713957c776SMichal Hocko 	watermark = low_wmark_pages(zone);
7723957c776SMichal Hocko 	watermark += (1 << cc->order);
7733957c776SMichal Hocko 
7743957c776SMichal Hocko 	if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
7753957c776SMichal Hocko 		return COMPACT_CONTINUE;
7763957c776SMichal Hocko 
77756de7263SMel Gorman 	/* Direct compactor: Is a suitable page free? */
7781fb3f8caSMel Gorman 	if (cc->page) {
7791fb3f8caSMel Gorman 		/* Was a suitable page captured? */
7801fb3f8caSMel Gorman 		if (*cc->page)
7811fb3f8caSMel Gorman 			return COMPACT_PARTIAL;
7821fb3f8caSMel Gorman 	} else {
7831fb3f8caSMel Gorman 		unsigned int order;
78456de7263SMel Gorman 		for (order = cc->order; order < MAX_ORDER; order++) {
7851fb3f8caSMel Gorman 			struct free_area *area = &zone->free_area[cc->order];
78656de7263SMel Gorman 			/* Job done if page is free of the right migratetype */
7871fb3f8caSMel Gorman 			if (!list_empty(&area->free_list[cc->migratetype]))
78856de7263SMel Gorman 				return COMPACT_PARTIAL;
78956de7263SMel Gorman 
79056de7263SMel Gorman 			/* Job done if allocation would set block type */
7911fb3f8caSMel Gorman 			if (cc->order >= pageblock_order && area->nr_free)
79256de7263SMel Gorman 				return COMPACT_PARTIAL;
79356de7263SMel Gorman 		}
7941fb3f8caSMel Gorman 	}
79556de7263SMel Gorman 
796748446bbSMel Gorman 	return COMPACT_CONTINUE;
797748446bbSMel Gorman }
798748446bbSMel Gorman 
7993e7d3449SMel Gorman /*
8003e7d3449SMel Gorman  * compaction_suitable: Is this suitable to run compaction on this zone now?
8013e7d3449SMel Gorman  * Returns
8023e7d3449SMel Gorman  *   COMPACT_SKIPPED  - If there are too few free pages for compaction
8033e7d3449SMel Gorman  *   COMPACT_PARTIAL  - If the allocation would succeed without compaction
8043e7d3449SMel Gorman  *   COMPACT_CONTINUE - If compaction should run now
8053e7d3449SMel Gorman  */
8063e7d3449SMel Gorman unsigned long compaction_suitable(struct zone *zone, int order)
8073e7d3449SMel Gorman {
8083e7d3449SMel Gorman 	int fragindex;
8093e7d3449SMel Gorman 	unsigned long watermark;
8103e7d3449SMel Gorman 
8113e7d3449SMel Gorman 	/*
8123957c776SMichal Hocko 	 * order == -1 is expected when compacting via
8133957c776SMichal Hocko 	 * /proc/sys/vm/compact_memory
8143957c776SMichal Hocko 	 */
8153957c776SMichal Hocko 	if (order == -1)
8163957c776SMichal Hocko 		return COMPACT_CONTINUE;
8173957c776SMichal Hocko 
8183957c776SMichal Hocko 	/*
8193e7d3449SMel Gorman 	 * Watermarks for order-0 must be met for compaction. Note the 2UL.
8203e7d3449SMel Gorman 	 * This is because during migration, copies of pages need to be
8213e7d3449SMel Gorman 	 * allocated and for a short time, the footprint is higher
8223e7d3449SMel Gorman 	 */
8233e7d3449SMel Gorman 	watermark = low_wmark_pages(zone) + (2UL << order);
8243e7d3449SMel Gorman 	if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
8253e7d3449SMel Gorman 		return COMPACT_SKIPPED;
8263e7d3449SMel Gorman 
8273e7d3449SMel Gorman 	/*
8283e7d3449SMel Gorman 	 * fragmentation index determines if allocation failures are due to
8293e7d3449SMel Gorman 	 * low memory or external fragmentation
8303e7d3449SMel Gorman 	 *
831a582a738SShaohua Li 	 * index of -1000 implies allocations might succeed depending on
832a582a738SShaohua Li 	 * watermarks
8333e7d3449SMel Gorman 	 * index towards 0 implies failure is due to lack of memory
8343e7d3449SMel Gorman 	 * index towards 1000 implies failure is due to fragmentation
8353e7d3449SMel Gorman 	 *
8363e7d3449SMel Gorman 	 * Only compact if a failure would be due to fragmentation.
8373e7d3449SMel Gorman 	 */
8383e7d3449SMel Gorman 	fragindex = fragmentation_index(zone, order);
8393e7d3449SMel Gorman 	if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
8403e7d3449SMel Gorman 		return COMPACT_SKIPPED;
8413e7d3449SMel Gorman 
842a582a738SShaohua Li 	if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark,
843a582a738SShaohua Li 	    0, 0))
8443e7d3449SMel Gorman 		return COMPACT_PARTIAL;
8453e7d3449SMel Gorman 
8463e7d3449SMel Gorman 	return COMPACT_CONTINUE;
8473e7d3449SMel Gorman }
8483e7d3449SMel Gorman 
849748446bbSMel Gorman static int compact_zone(struct zone *zone, struct compact_control *cc)
850748446bbSMel Gorman {
851748446bbSMel Gorman 	int ret;
852748446bbSMel Gorman 
8533e7d3449SMel Gorman 	ret = compaction_suitable(zone, cc->order);
8543e7d3449SMel Gorman 	switch (ret) {
8553e7d3449SMel Gorman 	case COMPACT_PARTIAL:
8563e7d3449SMel Gorman 	case COMPACT_SKIPPED:
8573e7d3449SMel Gorman 		/* Compaction is likely to fail */
8583e7d3449SMel Gorman 		return ret;
8593e7d3449SMel Gorman 	case COMPACT_CONTINUE:
8603e7d3449SMel Gorman 		/* Fall through to compaction */
8613e7d3449SMel Gorman 		;
8623e7d3449SMel Gorman 	}
8633e7d3449SMel Gorman 
864748446bbSMel Gorman 	/* Setup to move all movable pages to the end of the zone */
865748446bbSMel Gorman 	cc->migrate_pfn = zone->zone_start_pfn;
8667db8889aSRik van Riel 
8677db8889aSRik van Riel 	if (cc->order > 0) {
8687db8889aSRik van Riel 		/* Incremental compaction. Start where the last one stopped. */
8697db8889aSRik van Riel 		cc->free_pfn = zone->compact_cached_free_pfn;
8707db8889aSRik van Riel 		cc->start_free_pfn = cc->free_pfn;
8717db8889aSRik van Riel 	} else {
8727db8889aSRik van Riel 		/* Order == -1 starts at the end of the zone. */
8737db8889aSRik van Riel 		cc->free_pfn = start_free_pfn(zone);
8747db8889aSRik van Riel 	}
875748446bbSMel Gorman 
876748446bbSMel Gorman 	migrate_prep_local();
877748446bbSMel Gorman 
878748446bbSMel Gorman 	while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
879748446bbSMel Gorman 		unsigned long nr_migrate, nr_remaining;
8809d502c1cSMinchan Kim 		int err;
881748446bbSMel Gorman 
882f9e35b3bSMel Gorman 		switch (isolate_migratepages(zone, cc)) {
883f9e35b3bSMel Gorman 		case ISOLATE_ABORT:
884f9e35b3bSMel Gorman 			ret = COMPACT_PARTIAL;
885e64c5237SShaohua Li 			putback_lru_pages(&cc->migratepages);
886e64c5237SShaohua Li 			cc->nr_migratepages = 0;
887f9e35b3bSMel Gorman 			goto out;
888f9e35b3bSMel Gorman 		case ISOLATE_NONE:
889748446bbSMel Gorman 			continue;
890f9e35b3bSMel Gorman 		case ISOLATE_SUCCESS:
891f9e35b3bSMel Gorman 			;
892f9e35b3bSMel Gorman 		}
893748446bbSMel Gorman 
894748446bbSMel Gorman 		nr_migrate = cc->nr_migratepages;
8959d502c1cSMinchan Kim 		err = migrate_pages(&cc->migratepages, compaction_alloc,
89668e3e926SLinus Torvalds 				(unsigned long)cc, false,
89768e3e926SLinus Torvalds 				cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC);
898748446bbSMel Gorman 		update_nr_listpages(cc);
899748446bbSMel Gorman 		nr_remaining = cc->nr_migratepages;
900748446bbSMel Gorman 
901748446bbSMel Gorman 		count_vm_event(COMPACTBLOCKS);
902748446bbSMel Gorman 		count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining);
903748446bbSMel Gorman 		if (nr_remaining)
904748446bbSMel Gorman 			count_vm_events(COMPACTPAGEFAILED, nr_remaining);
905b7aba698SMel Gorman 		trace_mm_compaction_migratepages(nr_migrate - nr_remaining,
906b7aba698SMel Gorman 						nr_remaining);
907748446bbSMel Gorman 
908748446bbSMel Gorman 		/* Release LRU pages not migrated */
9099d502c1cSMinchan Kim 		if (err) {
910748446bbSMel Gorman 			putback_lru_pages(&cc->migratepages);
911748446bbSMel Gorman 			cc->nr_migratepages = 0;
9124bf2bba3SDavid Rientjes 			if (err == -ENOMEM) {
9134bf2bba3SDavid Rientjes 				ret = COMPACT_PARTIAL;
9144bf2bba3SDavid Rientjes 				goto out;
915748446bbSMel Gorman 			}
9164bf2bba3SDavid Rientjes 		}
9171fb3f8caSMel Gorman 
9181fb3f8caSMel Gorman 		/* Capture a page now if it is a suitable size */
9191fb3f8caSMel Gorman 		compact_capture_page(cc);
920748446bbSMel Gorman 	}
921748446bbSMel Gorman 
922f9e35b3bSMel Gorman out:
923748446bbSMel Gorman 	/* Release free pages and check accounting */
924748446bbSMel Gorman 	cc->nr_freepages -= release_freepages(&cc->freepages);
925748446bbSMel Gorman 	VM_BUG_ON(cc->nr_freepages != 0);
926748446bbSMel Gorman 
927748446bbSMel Gorman 	return ret;
928748446bbSMel Gorman }
92976ab0f53SMel Gorman 
930d43a87e6SKyungmin Park static unsigned long compact_zone_order(struct zone *zone,
93177f1fe6bSMel Gorman 				 int order, gfp_t gfp_mask,
9321fb3f8caSMel Gorman 				 bool sync, bool *contended,
9331fb3f8caSMel Gorman 				 struct page **page)
93456de7263SMel Gorman {
935e64c5237SShaohua Li 	unsigned long ret;
93656de7263SMel Gorman 	struct compact_control cc = {
93756de7263SMel Gorman 		.nr_freepages = 0,
93856de7263SMel Gorman 		.nr_migratepages = 0,
93956de7263SMel Gorman 		.order = order,
94056de7263SMel Gorman 		.migratetype = allocflags_to_migratetype(gfp_mask),
94156de7263SMel Gorman 		.zone = zone,
94268e3e926SLinus Torvalds 		.sync = sync,
9431fb3f8caSMel Gorman 		.page = page,
94456de7263SMel Gorman 	};
94556de7263SMel Gorman 	INIT_LIST_HEAD(&cc.freepages);
94656de7263SMel Gorman 	INIT_LIST_HEAD(&cc.migratepages);
94756de7263SMel Gorman 
948e64c5237SShaohua Li 	ret = compact_zone(zone, &cc);
949e64c5237SShaohua Li 
950e64c5237SShaohua Li 	VM_BUG_ON(!list_empty(&cc.freepages));
951e64c5237SShaohua Li 	VM_BUG_ON(!list_empty(&cc.migratepages));
952e64c5237SShaohua Li 
953e64c5237SShaohua Li 	*contended = cc.contended;
954e64c5237SShaohua Li 	return ret;
95556de7263SMel Gorman }
95656de7263SMel Gorman 
9575e771905SMel Gorman int sysctl_extfrag_threshold = 500;
9585e771905SMel Gorman 
95956de7263SMel Gorman /**
96056de7263SMel Gorman  * try_to_compact_pages - Direct compact to satisfy a high-order allocation
96156de7263SMel Gorman  * @zonelist: The zonelist used for the current allocation
96256de7263SMel Gorman  * @order: The order of the current allocation
96356de7263SMel Gorman  * @gfp_mask: The GFP mask of the current allocation
96456de7263SMel Gorman  * @nodemask: The allowed nodes to allocate from
96577f1fe6bSMel Gorman  * @sync: Whether migration is synchronous or not
966661c4cb9SMel Gorman  * @contended: Return value that is true if compaction was aborted due to lock contention
967661c4cb9SMel Gorman  * @page: Optionally capture a free page of the requested order during compaction
96856de7263SMel Gorman  *
96956de7263SMel Gorman  * This is the main entry point for direct page compaction.
97056de7263SMel Gorman  */
97156de7263SMel Gorman unsigned long try_to_compact_pages(struct zonelist *zonelist,
97277f1fe6bSMel Gorman 			int order, gfp_t gfp_mask, nodemask_t *nodemask,
9731fb3f8caSMel Gorman 			bool sync, bool *contended, struct page **page)
97456de7263SMel Gorman {
97556de7263SMel Gorman 	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
97656de7263SMel Gorman 	int may_enter_fs = gfp_mask & __GFP_FS;
97756de7263SMel Gorman 	int may_perform_io = gfp_mask & __GFP_IO;
97856de7263SMel Gorman 	struct zoneref *z;
97956de7263SMel Gorman 	struct zone *zone;
98056de7263SMel Gorman 	int rc = COMPACT_SKIPPED;
981d95ea5d1SBartlomiej Zolnierkiewicz 	int alloc_flags = 0;
98256de7263SMel Gorman 
9834ffb6335SMel Gorman 	/* Check if the GFP flags allow compaction */
984c5a73c3dSAndrea Arcangeli 	if (!order || !may_enter_fs || !may_perform_io)
98556de7263SMel Gorman 		return rc;
98656de7263SMel Gorman 
98756de7263SMel Gorman 	count_vm_event(COMPACTSTALL);
98856de7263SMel Gorman 
989d95ea5d1SBartlomiej Zolnierkiewicz #ifdef CONFIG_CMA
990d95ea5d1SBartlomiej Zolnierkiewicz 	if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
991d95ea5d1SBartlomiej Zolnierkiewicz 		alloc_flags |= ALLOC_CMA;
992d95ea5d1SBartlomiej Zolnierkiewicz #endif
99356de7263SMel Gorman 	/* Compact each zone in the list */
99456de7263SMel Gorman 	for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
99556de7263SMel Gorman 								nodemask) {
99656de7263SMel Gorman 		int status;
99756de7263SMel Gorman 
998c67fe375SMel Gorman 		status = compact_zone_order(zone, order, gfp_mask, sync,
9991fb3f8caSMel Gorman 						contended, page);
100056de7263SMel Gorman 		rc = max(status, rc);
100156de7263SMel Gorman 
10023e7d3449SMel Gorman 		/* If a normal allocation would succeed, stop compacting */
1003d95ea5d1SBartlomiej Zolnierkiewicz 		if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0,
1004d95ea5d1SBartlomiej Zolnierkiewicz 				      alloc_flags))
100556de7263SMel Gorman 			break;
100656de7263SMel Gorman 	}
100756de7263SMel Gorman 
100856de7263SMel Gorman 	return rc;
100956de7263SMel Gorman }
101056de7263SMel Gorman 
101156de7263SMel Gorman 
101276ab0f53SMel Gorman /* Compact all zones within a node */
10137be62de9SRik van Riel static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
101476ab0f53SMel Gorman {
101576ab0f53SMel Gorman 	int zoneid;
101676ab0f53SMel Gorman 	struct zone *zone;
101776ab0f53SMel Gorman 
101876ab0f53SMel Gorman 	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
101976ab0f53SMel Gorman 
102076ab0f53SMel Gorman 		zone = &pgdat->node_zones[zoneid];
102176ab0f53SMel Gorman 		if (!populated_zone(zone))
102276ab0f53SMel Gorman 			continue;
102376ab0f53SMel Gorman 
10247be62de9SRik van Riel 		cc->nr_freepages = 0;
10257be62de9SRik van Riel 		cc->nr_migratepages = 0;
10267be62de9SRik van Riel 		cc->zone = zone;
10277be62de9SRik van Riel 		INIT_LIST_HEAD(&cc->freepages);
10287be62de9SRik van Riel 		INIT_LIST_HEAD(&cc->migratepages);
102976ab0f53SMel Gorman 
1030aad6ec37SDan Carpenter 		if (cc->order == -1 || !compaction_deferred(zone, cc->order))
10317be62de9SRik van Riel 			compact_zone(zone, cc);
103276ab0f53SMel Gorman 
1033aff62249SRik van Riel 		if (cc->order > 0) {
1034aff62249SRik van Riel 			int ok = zone_watermark_ok(zone, cc->order,
1035aff62249SRik van Riel 						low_wmark_pages(zone), 0, 0);
1036c81758fbSMinchan Kim 			if (ok && cc->order >= zone->compact_order_failed)
1037aff62249SRik van Riel 				zone->compact_order_failed = cc->order + 1;
1038aff62249SRik van Riel 			/* Currently async compaction is never deferred. */
103968e3e926SLinus Torvalds 			else if (!ok && cc->sync)
1040aff62249SRik van Riel 				defer_compaction(zone, cc->order);
1041aff62249SRik van Riel 		}
1042aff62249SRik van Riel 
10437be62de9SRik van Riel 		VM_BUG_ON(!list_empty(&cc->freepages));
10447be62de9SRik van Riel 		VM_BUG_ON(!list_empty(&cc->migratepages));
104576ab0f53SMel Gorman 	}
104676ab0f53SMel Gorman 
104776ab0f53SMel Gorman 	return 0;
104876ab0f53SMel Gorman }
104976ab0f53SMel Gorman 
10507be62de9SRik van Riel int compact_pgdat(pg_data_t *pgdat, int order)
10517be62de9SRik van Riel {
10527be62de9SRik van Riel 	struct compact_control cc = {
10537be62de9SRik van Riel 		.order = order,
105468e3e926SLinus Torvalds 		.sync = false,
10551fb3f8caSMel Gorman 		.page = NULL,
10567be62de9SRik van Riel 	};
10577be62de9SRik van Riel 
10587be62de9SRik van Riel 	return __compact_pgdat(pgdat, &cc);
10597be62de9SRik van Riel }
10607be62de9SRik van Riel 
10617be62de9SRik van Riel static int compact_node(int nid)
10627be62de9SRik van Riel {
10637be62de9SRik van Riel 	struct compact_control cc = {
10647be62de9SRik van Riel 		.order = -1,
106568e3e926SLinus Torvalds 		.sync = true,
10661fb3f8caSMel Gorman 		.page = NULL,
10677be62de9SRik van Riel 	};
10687be62de9SRik van Riel 
10698575ec29SHugh Dickins 	return __compact_pgdat(NODE_DATA(nid), &cc);
10707be62de9SRik van Riel }
10717be62de9SRik van Riel 
107276ab0f53SMel Gorman /* Compact all nodes in the system */
107376ab0f53SMel Gorman static int compact_nodes(void)
107476ab0f53SMel Gorman {
107576ab0f53SMel Gorman 	int nid;
107676ab0f53SMel Gorman 
10778575ec29SHugh Dickins 	/* Flush pending updates to the LRU lists */
10788575ec29SHugh Dickins 	lru_add_drain_all();
10798575ec29SHugh Dickins 
108076ab0f53SMel Gorman 	for_each_online_node(nid)
108176ab0f53SMel Gorman 		compact_node(nid);
108276ab0f53SMel Gorman 
108376ab0f53SMel Gorman 	return COMPACT_COMPLETE;
108476ab0f53SMel Gorman }
108576ab0f53SMel Gorman 
108676ab0f53SMel Gorman /* The written value is actually unused, all memory is compacted */
108776ab0f53SMel Gorman int sysctl_compact_memory;
108876ab0f53SMel Gorman 
108976ab0f53SMel Gorman /* This is the entry point for compacting all nodes via /proc/sys/vm */
109076ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write,
109176ab0f53SMel Gorman 			void __user *buffer, size_t *length, loff_t *ppos)
109276ab0f53SMel Gorman {
109376ab0f53SMel Gorman 	if (write)
109476ab0f53SMel Gorman 		return compact_nodes();
109576ab0f53SMel Gorman 
109676ab0f53SMel Gorman 	return 0;
109776ab0f53SMel Gorman }
1098ed4a6d7fSMel Gorman 
10995e771905SMel Gorman int sysctl_extfrag_handler(struct ctl_table *table, int write,
11005e771905SMel Gorman 			void __user *buffer, size_t *length, loff_t *ppos)
11015e771905SMel Gorman {
11025e771905SMel Gorman 	proc_dointvec_minmax(table, write, buffer, length, ppos);
11035e771905SMel Gorman 
11045e771905SMel Gorman 	return 0;
11055e771905SMel Gorman }
11065e771905SMel Gorman 
1107ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
110810fbcf4cSKay Sievers ssize_t sysfs_compact_node(struct device *dev,
110910fbcf4cSKay Sievers 			struct device_attribute *attr,
1110ed4a6d7fSMel Gorman 			const char *buf, size_t count)
1111ed4a6d7fSMel Gorman {
11128575ec29SHugh Dickins 	int nid = dev->id;
11138575ec29SHugh Dickins 
11148575ec29SHugh Dickins 	if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
11158575ec29SHugh Dickins 		/* Flush pending updates to the LRU lists */
11168575ec29SHugh Dickins 		lru_add_drain_all();
11178575ec29SHugh Dickins 
11188575ec29SHugh Dickins 		compact_node(nid);
11198575ec29SHugh Dickins 	}
1120ed4a6d7fSMel Gorman 
1121ed4a6d7fSMel Gorman 	return count;
1122ed4a6d7fSMel Gorman }
112310fbcf4cSKay Sievers static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
1124ed4a6d7fSMel Gorman 
1125ed4a6d7fSMel Gorman int compaction_register_node(struct node *node)
1126ed4a6d7fSMel Gorman {
112710fbcf4cSKay Sievers 	return device_create_file(&node->dev, &dev_attr_compact);
1128ed4a6d7fSMel Gorman }
1129ed4a6d7fSMel Gorman 
1130ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node)
1131ed4a6d7fSMel Gorman {
113210fbcf4cSKay Sievers 	return device_remove_file(&node->dev, &dev_attr_compact);
1133ed4a6d7fSMel Gorman }
1134ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */
1135ff9543fdSMichal Nazarewicz 
1136ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION */
1137