xref: /linux/mm/compaction.c (revision 68e3e92620c323703bc7db75c2ba15239ee85c39)
1748446bbSMel Gorman /*
2748446bbSMel Gorman  * linux/mm/compaction.c
3748446bbSMel Gorman  *
4748446bbSMel Gorman  * Memory compaction for the reduction of external fragmentation. Note that
5748446bbSMel Gorman  * this heavily depends upon page migration to do all the real heavy
6748446bbSMel Gorman  * lifting
7748446bbSMel Gorman  *
8748446bbSMel Gorman  * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
9748446bbSMel Gorman  */
10748446bbSMel Gorman #include <linux/swap.h>
11748446bbSMel Gorman #include <linux/migrate.h>
12748446bbSMel Gorman #include <linux/compaction.h>
13748446bbSMel Gorman #include <linux/mm_inline.h>
14748446bbSMel Gorman #include <linux/backing-dev.h>
1576ab0f53SMel Gorman #include <linux/sysctl.h>
16ed4a6d7fSMel Gorman #include <linux/sysfs.h>
17748446bbSMel Gorman #include "internal.h"
18748446bbSMel Gorman 
19ff9543fdSMichal Nazarewicz #if defined CONFIG_COMPACTION || defined CONFIG_CMA
20ff9543fdSMichal Nazarewicz 
21b7aba698SMel Gorman #define CREATE_TRACE_POINTS
22b7aba698SMel Gorman #include <trace/events/compaction.h>
23b7aba698SMel Gorman 
24748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist)
25748446bbSMel Gorman {
26748446bbSMel Gorman 	struct page *page, *next;
27748446bbSMel Gorman 	unsigned long count = 0;
28748446bbSMel Gorman 
29748446bbSMel Gorman 	list_for_each_entry_safe(page, next, freelist, lru) {
30748446bbSMel Gorman 		list_del(&page->lru);
31748446bbSMel Gorman 		__free_page(page);
32748446bbSMel Gorman 		count++;
33748446bbSMel Gorman 	}
34748446bbSMel Gorman 
35748446bbSMel Gorman 	return count;
36748446bbSMel Gorman }
37748446bbSMel Gorman 
38ff9543fdSMichal Nazarewicz static void map_pages(struct list_head *list)
39ff9543fdSMichal Nazarewicz {
40ff9543fdSMichal Nazarewicz 	struct page *page;
41ff9543fdSMichal Nazarewicz 
42ff9543fdSMichal Nazarewicz 	list_for_each_entry(page, list, lru) {
43ff9543fdSMichal Nazarewicz 		arch_alloc_page(page, 0);
44ff9543fdSMichal Nazarewicz 		kernel_map_pages(page, 1, 1);
45ff9543fdSMichal Nazarewicz 	}
46ff9543fdSMichal Nazarewicz }
47ff9543fdSMichal Nazarewicz 
4847118af0SMichal Nazarewicz static inline bool migrate_async_suitable(int migratetype)
4947118af0SMichal Nazarewicz {
5047118af0SMichal Nazarewicz 	return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
5147118af0SMichal Nazarewicz }
5247118af0SMichal Nazarewicz 
5385aa125fSMichal Nazarewicz /*
5485aa125fSMichal Nazarewicz  * Isolate free pages onto a private freelist. Caller must hold zone->lock.
5585aa125fSMichal Nazarewicz  * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
5685aa125fSMichal Nazarewicz  * pages inside of the pageblock (even though it may still end up isolating
5785aa125fSMichal Nazarewicz  * some pages).
5885aa125fSMichal Nazarewicz  */
5985aa125fSMichal Nazarewicz static unsigned long isolate_freepages_block(unsigned long blockpfn,
6085aa125fSMichal Nazarewicz 				unsigned long end_pfn,
6185aa125fSMichal Nazarewicz 				struct list_head *freelist,
6285aa125fSMichal Nazarewicz 				bool strict)
63748446bbSMel Gorman {
64b7aba698SMel Gorman 	int nr_scanned = 0, total_isolated = 0;
65748446bbSMel Gorman 	struct page *cursor;
66748446bbSMel Gorman 
67748446bbSMel Gorman 	cursor = pfn_to_page(blockpfn);
68748446bbSMel Gorman 
69748446bbSMel Gorman 	/* Isolate free pages. This assumes the block is valid */
70748446bbSMel Gorman 	for (; blockpfn < end_pfn; blockpfn++, cursor++) {
71748446bbSMel Gorman 		int isolated, i;
72748446bbSMel Gorman 		struct page *page = cursor;
73748446bbSMel Gorman 
7485aa125fSMichal Nazarewicz 		if (!pfn_valid_within(blockpfn)) {
7585aa125fSMichal Nazarewicz 			if (strict)
7685aa125fSMichal Nazarewicz 				return 0;
77748446bbSMel Gorman 			continue;
7885aa125fSMichal Nazarewicz 		}
79b7aba698SMel Gorman 		nr_scanned++;
80748446bbSMel Gorman 
8185aa125fSMichal Nazarewicz 		if (!PageBuddy(page)) {
8285aa125fSMichal Nazarewicz 			if (strict)
8385aa125fSMichal Nazarewicz 				return 0;
84748446bbSMel Gorman 			continue;
8585aa125fSMichal Nazarewicz 		}
86748446bbSMel Gorman 
87748446bbSMel Gorman 		/* Found a free page, break it into order-0 pages */
88748446bbSMel Gorman 		isolated = split_free_page(page);
8985aa125fSMichal Nazarewicz 		if (!isolated && strict)
9085aa125fSMichal Nazarewicz 			return 0;
91748446bbSMel Gorman 		total_isolated += isolated;
92748446bbSMel Gorman 		for (i = 0; i < isolated; i++) {
93748446bbSMel Gorman 			list_add(&page->lru, freelist);
94748446bbSMel Gorman 			page++;
95748446bbSMel Gorman 		}
96748446bbSMel Gorman 
97748446bbSMel Gorman 		/* If a page was split, advance to the end of it */
98748446bbSMel Gorman 		if (isolated) {
99748446bbSMel Gorman 			blockpfn += isolated - 1;
100748446bbSMel Gorman 			cursor += isolated - 1;
101748446bbSMel Gorman 		}
102748446bbSMel Gorman 	}
103748446bbSMel Gorman 
104b7aba698SMel Gorman 	trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
105748446bbSMel Gorman 	return total_isolated;
106748446bbSMel Gorman }
107748446bbSMel Gorman 
10885aa125fSMichal Nazarewicz /**
10985aa125fSMichal Nazarewicz  * isolate_freepages_range() - isolate free pages.
11085aa125fSMichal Nazarewicz  * @start_pfn: The first PFN to start isolating.
11185aa125fSMichal Nazarewicz  * @end_pfn:   The one-past-last PFN.
11285aa125fSMichal Nazarewicz  *
11385aa125fSMichal Nazarewicz  * Non-free pages, invalid PFNs, or zone boundaries within the
11485aa125fSMichal Nazarewicz  * [start_pfn, end_pfn) range are considered errors, cause function to
11585aa125fSMichal Nazarewicz  * undo its actions and return zero.
11685aa125fSMichal Nazarewicz  *
11785aa125fSMichal Nazarewicz  * Otherwise, function returns one-past-the-last PFN of isolated page
11885aa125fSMichal Nazarewicz  * (which may be greater then end_pfn if end fell in a middle of
11985aa125fSMichal Nazarewicz  * a free page).
12085aa125fSMichal Nazarewicz  */
121ff9543fdSMichal Nazarewicz unsigned long
12285aa125fSMichal Nazarewicz isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
12385aa125fSMichal Nazarewicz {
12485aa125fSMichal Nazarewicz 	unsigned long isolated, pfn, block_end_pfn, flags;
12585aa125fSMichal Nazarewicz 	struct zone *zone = NULL;
12685aa125fSMichal Nazarewicz 	LIST_HEAD(freelist);
12785aa125fSMichal Nazarewicz 
12885aa125fSMichal Nazarewicz 	if (pfn_valid(start_pfn))
12985aa125fSMichal Nazarewicz 		zone = page_zone(pfn_to_page(start_pfn));
13085aa125fSMichal Nazarewicz 
13185aa125fSMichal Nazarewicz 	for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
13285aa125fSMichal Nazarewicz 		if (!pfn_valid(pfn) || zone != page_zone(pfn_to_page(pfn)))
13385aa125fSMichal Nazarewicz 			break;
13485aa125fSMichal Nazarewicz 
13585aa125fSMichal Nazarewicz 		/*
13685aa125fSMichal Nazarewicz 		 * On subsequent iterations ALIGN() is actually not needed,
13785aa125fSMichal Nazarewicz 		 * but we keep it that we not to complicate the code.
13885aa125fSMichal Nazarewicz 		 */
13985aa125fSMichal Nazarewicz 		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
14085aa125fSMichal Nazarewicz 		block_end_pfn = min(block_end_pfn, end_pfn);
14185aa125fSMichal Nazarewicz 
14285aa125fSMichal Nazarewicz 		spin_lock_irqsave(&zone->lock, flags);
14385aa125fSMichal Nazarewicz 		isolated = isolate_freepages_block(pfn, block_end_pfn,
14485aa125fSMichal Nazarewicz 						   &freelist, true);
14585aa125fSMichal Nazarewicz 		spin_unlock_irqrestore(&zone->lock, flags);
14685aa125fSMichal Nazarewicz 
14785aa125fSMichal Nazarewicz 		/*
14885aa125fSMichal Nazarewicz 		 * In strict mode, isolate_freepages_block() returns 0 if
14985aa125fSMichal Nazarewicz 		 * there are any holes in the block (ie. invalid PFNs or
15085aa125fSMichal Nazarewicz 		 * non-free pages).
15185aa125fSMichal Nazarewicz 		 */
15285aa125fSMichal Nazarewicz 		if (!isolated)
15385aa125fSMichal Nazarewicz 			break;
15485aa125fSMichal Nazarewicz 
15585aa125fSMichal Nazarewicz 		/*
15685aa125fSMichal Nazarewicz 		 * If we managed to isolate pages, it is always (1 << n) *
15785aa125fSMichal Nazarewicz 		 * pageblock_nr_pages for some non-negative n.  (Max order
15885aa125fSMichal Nazarewicz 		 * page may span two pageblocks).
15985aa125fSMichal Nazarewicz 		 */
16085aa125fSMichal Nazarewicz 	}
16185aa125fSMichal Nazarewicz 
16285aa125fSMichal Nazarewicz 	/* split_free_page does not map the pages */
16385aa125fSMichal Nazarewicz 	map_pages(&freelist);
16485aa125fSMichal Nazarewicz 
16585aa125fSMichal Nazarewicz 	if (pfn < end_pfn) {
16685aa125fSMichal Nazarewicz 		/* Loop terminated early, cleanup. */
16785aa125fSMichal Nazarewicz 		release_freepages(&freelist);
16885aa125fSMichal Nazarewicz 		return 0;
16985aa125fSMichal Nazarewicz 	}
17085aa125fSMichal Nazarewicz 
17185aa125fSMichal Nazarewicz 	/* We don't use freelists for anything. */
17285aa125fSMichal Nazarewicz 	return pfn;
17385aa125fSMichal Nazarewicz }
17485aa125fSMichal Nazarewicz 
175748446bbSMel Gorman /* Update the number of anon and file isolated pages in the zone */
176748446bbSMel Gorman static void acct_isolated(struct zone *zone, struct compact_control *cc)
177748446bbSMel Gorman {
178748446bbSMel Gorman 	struct page *page;
179b9e84ac1SMinchan Kim 	unsigned int count[2] = { 0, };
180748446bbSMel Gorman 
181b9e84ac1SMinchan Kim 	list_for_each_entry(page, &cc->migratepages, lru)
182b9e84ac1SMinchan Kim 		count[!!page_is_file_cache(page)]++;
183748446bbSMel Gorman 
184b9e84ac1SMinchan Kim 	__mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
185b9e84ac1SMinchan Kim 	__mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
186748446bbSMel Gorman }
187748446bbSMel Gorman 
188748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */
189748446bbSMel Gorman static bool too_many_isolated(struct zone *zone)
190748446bbSMel Gorman {
191bc693045SMinchan Kim 	unsigned long active, inactive, isolated;
192748446bbSMel Gorman 
193748446bbSMel Gorman 	inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
194748446bbSMel Gorman 					zone_page_state(zone, NR_INACTIVE_ANON);
195bc693045SMinchan Kim 	active = zone_page_state(zone, NR_ACTIVE_FILE) +
196bc693045SMinchan Kim 					zone_page_state(zone, NR_ACTIVE_ANON);
197748446bbSMel Gorman 	isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
198748446bbSMel Gorman 					zone_page_state(zone, NR_ISOLATED_ANON);
199748446bbSMel Gorman 
200bc693045SMinchan Kim 	return isolated > (inactive + active) / 2;
201748446bbSMel Gorman }
202748446bbSMel Gorman 
2032fe86e00SMichal Nazarewicz /**
2042fe86e00SMichal Nazarewicz  * isolate_migratepages_range() - isolate all migrate-able pages in range.
2052fe86e00SMichal Nazarewicz  * @zone:	Zone pages are in.
2062fe86e00SMichal Nazarewicz  * @cc:		Compaction control structure.
2072fe86e00SMichal Nazarewicz  * @low_pfn:	The first PFN of the range.
2082fe86e00SMichal Nazarewicz  * @end_pfn:	The one-past-the-last PFN of the range.
2092fe86e00SMichal Nazarewicz  *
2102fe86e00SMichal Nazarewicz  * Isolate all pages that can be migrated from the range specified by
2112fe86e00SMichal Nazarewicz  * [low_pfn, end_pfn).  Returns zero if there is a fatal signal
2122fe86e00SMichal Nazarewicz  * pending), otherwise PFN of the first page that was not scanned
2132fe86e00SMichal Nazarewicz  * (which may be both less, equal to or more then end_pfn).
2142fe86e00SMichal Nazarewicz  *
2152fe86e00SMichal Nazarewicz  * Assumes that cc->migratepages is empty and cc->nr_migratepages is
2162fe86e00SMichal Nazarewicz  * zero.
2172fe86e00SMichal Nazarewicz  *
2182fe86e00SMichal Nazarewicz  * Apart from cc->migratepages and cc->nr_migratetypes this function
2192fe86e00SMichal Nazarewicz  * does not modify any cc's fields, in particular it does not modify
2202fe86e00SMichal Nazarewicz  * (or read for that matter) cc->migrate_pfn.
221748446bbSMel Gorman  */
222ff9543fdSMichal Nazarewicz unsigned long
2232fe86e00SMichal Nazarewicz isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
2242fe86e00SMichal Nazarewicz 			   unsigned long low_pfn, unsigned long end_pfn)
225748446bbSMel Gorman {
2269927af74SMel Gorman 	unsigned long last_pageblock_nr = 0, pageblock_nr;
227b7aba698SMel Gorman 	unsigned long nr_scanned = 0, nr_isolated = 0;
228748446bbSMel Gorman 	struct list_head *migratelist = &cc->migratepages;
229f3fd4a61SKonstantin Khlebnikov 	isolate_mode_t mode = 0;
230fa9add64SHugh Dickins 	struct lruvec *lruvec;
231748446bbSMel Gorman 
232748446bbSMel Gorman 	/*
233748446bbSMel Gorman 	 * Ensure that there are not too many pages isolated from the LRU
234748446bbSMel Gorman 	 * list by either parallel reclaimers or compaction. If there are,
235748446bbSMel Gorman 	 * delay for some time until fewer pages are isolated
236748446bbSMel Gorman 	 */
237748446bbSMel Gorman 	while (unlikely(too_many_isolated(zone))) {
238f9e35b3bSMel Gorman 		/* async migration should just abort */
239*68e3e926SLinus Torvalds 		if (!cc->sync)
2402fe86e00SMichal Nazarewicz 			return 0;
241f9e35b3bSMel Gorman 
242748446bbSMel Gorman 		congestion_wait(BLK_RW_ASYNC, HZ/10);
243748446bbSMel Gorman 
244748446bbSMel Gorman 		if (fatal_signal_pending(current))
2452fe86e00SMichal Nazarewicz 			return 0;
246748446bbSMel Gorman 	}
247748446bbSMel Gorman 
248748446bbSMel Gorman 	/* Time to isolate some pages for migration */
249b2eef8c0SAndrea Arcangeli 	cond_resched();
250748446bbSMel Gorman 	spin_lock_irq(&zone->lru_lock);
251748446bbSMel Gorman 	for (; low_pfn < end_pfn; low_pfn++) {
252748446bbSMel Gorman 		struct page *page;
253b2eef8c0SAndrea Arcangeli 		bool locked = true;
254b2eef8c0SAndrea Arcangeli 
255b2eef8c0SAndrea Arcangeli 		/* give a chance to irqs before checking need_resched() */
256b2eef8c0SAndrea Arcangeli 		if (!((low_pfn+1) % SWAP_CLUSTER_MAX)) {
257b2eef8c0SAndrea Arcangeli 			spin_unlock_irq(&zone->lru_lock);
258b2eef8c0SAndrea Arcangeli 			locked = false;
259b2eef8c0SAndrea Arcangeli 		}
260b2eef8c0SAndrea Arcangeli 		if (need_resched() || spin_is_contended(&zone->lru_lock)) {
261b2eef8c0SAndrea Arcangeli 			if (locked)
262b2eef8c0SAndrea Arcangeli 				spin_unlock_irq(&zone->lru_lock);
263b2eef8c0SAndrea Arcangeli 			cond_resched();
264b2eef8c0SAndrea Arcangeli 			spin_lock_irq(&zone->lru_lock);
265b2eef8c0SAndrea Arcangeli 			if (fatal_signal_pending(current))
266b2eef8c0SAndrea Arcangeli 				break;
267b2eef8c0SAndrea Arcangeli 		} else if (!locked)
268b2eef8c0SAndrea Arcangeli 			spin_lock_irq(&zone->lru_lock);
269b2eef8c0SAndrea Arcangeli 
2700bf380bcSMel Gorman 		/*
2710bf380bcSMel Gorman 		 * migrate_pfn does not necessarily start aligned to a
2720bf380bcSMel Gorman 		 * pageblock. Ensure that pfn_valid is called when moving
2730bf380bcSMel Gorman 		 * into a new MAX_ORDER_NR_PAGES range in case of large
2740bf380bcSMel Gorman 		 * memory holes within the zone
2750bf380bcSMel Gorman 		 */
2760bf380bcSMel Gorman 		if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
2770bf380bcSMel Gorman 			if (!pfn_valid(low_pfn)) {
2780bf380bcSMel Gorman 				low_pfn += MAX_ORDER_NR_PAGES - 1;
2790bf380bcSMel Gorman 				continue;
2800bf380bcSMel Gorman 			}
2810bf380bcSMel Gorman 		}
2820bf380bcSMel Gorman 
283748446bbSMel Gorman 		if (!pfn_valid_within(low_pfn))
284748446bbSMel Gorman 			continue;
285b7aba698SMel Gorman 		nr_scanned++;
286748446bbSMel Gorman 
287dc908600SMel Gorman 		/*
288dc908600SMel Gorman 		 * Get the page and ensure the page is within the same zone.
289dc908600SMel Gorman 		 * See the comment in isolate_freepages about overlapping
290dc908600SMel Gorman 		 * nodes. It is deliberate that the new zone lock is not taken
291dc908600SMel Gorman 		 * as memory compaction should not move pages between nodes.
292dc908600SMel Gorman 		 */
293748446bbSMel Gorman 		page = pfn_to_page(low_pfn);
294dc908600SMel Gorman 		if (page_zone(page) != zone)
295dc908600SMel Gorman 			continue;
296dc908600SMel Gorman 
297dc908600SMel Gorman 		/* Skip if free */
298748446bbSMel Gorman 		if (PageBuddy(page))
299748446bbSMel Gorman 			continue;
300748446bbSMel Gorman 
3019927af74SMel Gorman 		/*
3029927af74SMel Gorman 		 * For async migration, also only scan in MOVABLE blocks. Async
3039927af74SMel Gorman 		 * migration is optimistic to see if the minimum amount of work
3049927af74SMel Gorman 		 * satisfies the allocation
3059927af74SMel Gorman 		 */
3069927af74SMel Gorman 		pageblock_nr = low_pfn >> pageblock_order;
307*68e3e926SLinus Torvalds 		if (!cc->sync && last_pageblock_nr != pageblock_nr &&
30847118af0SMichal Nazarewicz 		    !migrate_async_suitable(get_pageblock_migratetype(page))) {
3099927af74SMel Gorman 			low_pfn += pageblock_nr_pages;
3109927af74SMel Gorman 			low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
3119927af74SMel Gorman 			last_pageblock_nr = pageblock_nr;
3129927af74SMel Gorman 			continue;
3139927af74SMel Gorman 		}
3149927af74SMel Gorman 
315bc835011SAndrea Arcangeli 		if (!PageLRU(page))
316bc835011SAndrea Arcangeli 			continue;
317bc835011SAndrea Arcangeli 
318bc835011SAndrea Arcangeli 		/*
319bc835011SAndrea Arcangeli 		 * PageLRU is set, and lru_lock excludes isolation,
320bc835011SAndrea Arcangeli 		 * splitting and collapsing (collapsing has already
321bc835011SAndrea Arcangeli 		 * happened if PageLRU is set).
322bc835011SAndrea Arcangeli 		 */
323bc835011SAndrea Arcangeli 		if (PageTransHuge(page)) {
324bc835011SAndrea Arcangeli 			low_pfn += (1 << compound_order(page)) - 1;
325bc835011SAndrea Arcangeli 			continue;
326bc835011SAndrea Arcangeli 		}
327bc835011SAndrea Arcangeli 
328*68e3e926SLinus Torvalds 		if (!cc->sync)
329c8244935SMel Gorman 			mode |= ISOLATE_ASYNC_MIGRATE;
330c8244935SMel Gorman 
331fa9add64SHugh Dickins 		lruvec = mem_cgroup_page_lruvec(page, zone);
332fa9add64SHugh Dickins 
333748446bbSMel Gorman 		/* Try isolate the page */
334f3fd4a61SKonstantin Khlebnikov 		if (__isolate_lru_page(page, mode) != 0)
335748446bbSMel Gorman 			continue;
336748446bbSMel Gorman 
337bc835011SAndrea Arcangeli 		VM_BUG_ON(PageTransCompound(page));
338bc835011SAndrea Arcangeli 
339748446bbSMel Gorman 		/* Successfully isolated */
340fa9add64SHugh Dickins 		del_page_from_lru_list(page, lruvec, page_lru(page));
341748446bbSMel Gorman 		list_add(&page->lru, migratelist);
342748446bbSMel Gorman 		cc->nr_migratepages++;
343b7aba698SMel Gorman 		nr_isolated++;
344748446bbSMel Gorman 
345748446bbSMel Gorman 		/* Avoid isolating too much */
34631b8384aSHillf Danton 		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
34731b8384aSHillf Danton 			++low_pfn;
348748446bbSMel Gorman 			break;
349748446bbSMel Gorman 		}
35031b8384aSHillf Danton 	}
351748446bbSMel Gorman 
352748446bbSMel Gorman 	acct_isolated(zone, cc);
353748446bbSMel Gorman 
354748446bbSMel Gorman 	spin_unlock_irq(&zone->lru_lock);
355748446bbSMel Gorman 
356b7aba698SMel Gorman 	trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
357b7aba698SMel Gorman 
3582fe86e00SMichal Nazarewicz 	return low_pfn;
3592fe86e00SMichal Nazarewicz }
3602fe86e00SMichal Nazarewicz 
361ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION || CONFIG_CMA */
362ff9543fdSMichal Nazarewicz #ifdef CONFIG_COMPACTION
363ff9543fdSMichal Nazarewicz 
364*68e3e926SLinus Torvalds /* Returns true if the page is within a block suitable for migration to */
365*68e3e926SLinus Torvalds static bool suitable_migration_target(struct page *page)
3662fe86e00SMichal Nazarewicz {
3672fe86e00SMichal Nazarewicz 
368ff9543fdSMichal Nazarewicz 	int migratetype = get_pageblock_migratetype(page);
3692fe86e00SMichal Nazarewicz 
370ff9543fdSMichal Nazarewicz 	/* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
371ff9543fdSMichal Nazarewicz 	if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
372*68e3e926SLinus Torvalds 		return false;
3732fe86e00SMichal Nazarewicz 
374ff9543fdSMichal Nazarewicz 	/* If the page is a large free page, then allow migration */
375ff9543fdSMichal Nazarewicz 	if (PageBuddy(page) && page_order(page) >= pageblock_order)
376*68e3e926SLinus Torvalds 		return true;
377ff9543fdSMichal Nazarewicz 
37847118af0SMichal Nazarewicz 	/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
379*68e3e926SLinus Torvalds 	if (migrate_async_suitable(migratetype))
380*68e3e926SLinus Torvalds 		return true;
381ff9543fdSMichal Nazarewicz 
382ff9543fdSMichal Nazarewicz 	/* Otherwise skip the block */
383*68e3e926SLinus Torvalds 	return false;
3842fe86e00SMichal Nazarewicz }
3852fe86e00SMichal Nazarewicz 
386ff9543fdSMichal Nazarewicz /*
387ff9543fdSMichal Nazarewicz  * Based on information in the current compact_control, find blocks
388ff9543fdSMichal Nazarewicz  * suitable for isolating free pages from and then isolate them.
389ff9543fdSMichal Nazarewicz  */
390ff9543fdSMichal Nazarewicz static void isolate_freepages(struct zone *zone,
391ff9543fdSMichal Nazarewicz 				struct compact_control *cc)
392ff9543fdSMichal Nazarewicz {
393ff9543fdSMichal Nazarewicz 	struct page *page;
394ff9543fdSMichal Nazarewicz 	unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn;
395ff9543fdSMichal Nazarewicz 	unsigned long flags;
396ff9543fdSMichal Nazarewicz 	int nr_freepages = cc->nr_freepages;
397ff9543fdSMichal Nazarewicz 	struct list_head *freelist = &cc->freepages;
3982fe86e00SMichal Nazarewicz 
399ff9543fdSMichal Nazarewicz 	/*
400ff9543fdSMichal Nazarewicz 	 * Initialise the free scanner. The starting point is where we last
401ff9543fdSMichal Nazarewicz 	 * scanned from (or the end of the zone if starting). The low point
402ff9543fdSMichal Nazarewicz 	 * is the end of the pageblock the migration scanner is using.
403ff9543fdSMichal Nazarewicz 	 */
404ff9543fdSMichal Nazarewicz 	pfn = cc->free_pfn;
405ff9543fdSMichal Nazarewicz 	low_pfn = cc->migrate_pfn + pageblock_nr_pages;
4062fe86e00SMichal Nazarewicz 
407ff9543fdSMichal Nazarewicz 	/*
408ff9543fdSMichal Nazarewicz 	 * Take care that if the migration scanner is at the end of the zone
409ff9543fdSMichal Nazarewicz 	 * that the free scanner does not accidentally move to the next zone
410ff9543fdSMichal Nazarewicz 	 * in the next isolation cycle.
411ff9543fdSMichal Nazarewicz 	 */
412ff9543fdSMichal Nazarewicz 	high_pfn = min(low_pfn, pfn);
413ff9543fdSMichal Nazarewicz 
414ff9543fdSMichal Nazarewicz 	zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
415ff9543fdSMichal Nazarewicz 
416ff9543fdSMichal Nazarewicz 	/*
417ff9543fdSMichal Nazarewicz 	 * Isolate free pages until enough are available to migrate the
418ff9543fdSMichal Nazarewicz 	 * pages on cc->migratepages. We stop searching if the migrate
419ff9543fdSMichal Nazarewicz 	 * and free page scanners meet or enough free pages are isolated.
420ff9543fdSMichal Nazarewicz 	 */
421ff9543fdSMichal Nazarewicz 	for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
422ff9543fdSMichal Nazarewicz 					pfn -= pageblock_nr_pages) {
423ff9543fdSMichal Nazarewicz 		unsigned long isolated;
424ff9543fdSMichal Nazarewicz 
425ff9543fdSMichal Nazarewicz 		if (!pfn_valid(pfn))
426ff9543fdSMichal Nazarewicz 			continue;
427ff9543fdSMichal Nazarewicz 
428ff9543fdSMichal Nazarewicz 		/*
429ff9543fdSMichal Nazarewicz 		 * Check for overlapping nodes/zones. It's possible on some
430ff9543fdSMichal Nazarewicz 		 * configurations to have a setup like
431ff9543fdSMichal Nazarewicz 		 * node0 node1 node0
432ff9543fdSMichal Nazarewicz 		 * i.e. it's possible that all pages within a zones range of
433ff9543fdSMichal Nazarewicz 		 * pages do not belong to a single zone.
434ff9543fdSMichal Nazarewicz 		 */
435ff9543fdSMichal Nazarewicz 		page = pfn_to_page(pfn);
436ff9543fdSMichal Nazarewicz 		if (page_zone(page) != zone)
437ff9543fdSMichal Nazarewicz 			continue;
438ff9543fdSMichal Nazarewicz 
439ff9543fdSMichal Nazarewicz 		/* Check the block is suitable for migration */
440*68e3e926SLinus Torvalds 		if (!suitable_migration_target(page))
441ff9543fdSMichal Nazarewicz 			continue;
442*68e3e926SLinus Torvalds 
443ff9543fdSMichal Nazarewicz 		/*
444ff9543fdSMichal Nazarewicz 		 * Found a block suitable for isolating free pages from. Now
445ff9543fdSMichal Nazarewicz 		 * we disabled interrupts, double check things are ok and
446ff9543fdSMichal Nazarewicz 		 * isolate the pages. This is to minimise the time IRQs
447ff9543fdSMichal Nazarewicz 		 * are disabled
448ff9543fdSMichal Nazarewicz 		 */
449ff9543fdSMichal Nazarewicz 		isolated = 0;
450ff9543fdSMichal Nazarewicz 		spin_lock_irqsave(&zone->lock, flags);
451*68e3e926SLinus Torvalds 		if (suitable_migration_target(page)) {
452ff9543fdSMichal Nazarewicz 			end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
453ff9543fdSMichal Nazarewicz 			isolated = isolate_freepages_block(pfn, end_pfn,
454ff9543fdSMichal Nazarewicz 							   freelist, false);
455ff9543fdSMichal Nazarewicz 			nr_freepages += isolated;
456*68e3e926SLinus Torvalds 		}
457ff9543fdSMichal Nazarewicz 		spin_unlock_irqrestore(&zone->lock, flags);
458ff9543fdSMichal Nazarewicz 
459ff9543fdSMichal Nazarewicz 		/*
460ff9543fdSMichal Nazarewicz 		 * Record the highest PFN we isolated pages from. When next
461ff9543fdSMichal Nazarewicz 		 * looking for free pages, the search will restart here as
462ff9543fdSMichal Nazarewicz 		 * page migration may have returned some pages to the allocator
463ff9543fdSMichal Nazarewicz 		 */
464ff9543fdSMichal Nazarewicz 		if (isolated)
465ff9543fdSMichal Nazarewicz 			high_pfn = max(high_pfn, pfn);
466ff9543fdSMichal Nazarewicz 	}
467ff9543fdSMichal Nazarewicz 
468ff9543fdSMichal Nazarewicz 	/* split_free_page does not map the pages */
469ff9543fdSMichal Nazarewicz 	map_pages(freelist);
470ff9543fdSMichal Nazarewicz 
471ff9543fdSMichal Nazarewicz 	cc->free_pfn = high_pfn;
472ff9543fdSMichal Nazarewicz 	cc->nr_freepages = nr_freepages;
473748446bbSMel Gorman }
474748446bbSMel Gorman 
475748446bbSMel Gorman /*
476748446bbSMel Gorman  * This is a migrate-callback that "allocates" freepages by taking pages
477748446bbSMel Gorman  * from the isolated freelists in the block we are migrating to.
478748446bbSMel Gorman  */
479748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage,
480748446bbSMel Gorman 					unsigned long data,
481748446bbSMel Gorman 					int **result)
482748446bbSMel Gorman {
483748446bbSMel Gorman 	struct compact_control *cc = (struct compact_control *)data;
484748446bbSMel Gorman 	struct page *freepage;
485748446bbSMel Gorman 
486748446bbSMel Gorman 	/* Isolate free pages if necessary */
487748446bbSMel Gorman 	if (list_empty(&cc->freepages)) {
488748446bbSMel Gorman 		isolate_freepages(cc->zone, cc);
489748446bbSMel Gorman 
490748446bbSMel Gorman 		if (list_empty(&cc->freepages))
491748446bbSMel Gorman 			return NULL;
492748446bbSMel Gorman 	}
493748446bbSMel Gorman 
494748446bbSMel Gorman 	freepage = list_entry(cc->freepages.next, struct page, lru);
495748446bbSMel Gorman 	list_del(&freepage->lru);
496748446bbSMel Gorman 	cc->nr_freepages--;
497748446bbSMel Gorman 
498748446bbSMel Gorman 	return freepage;
499748446bbSMel Gorman }
500748446bbSMel Gorman 
501748446bbSMel Gorman /*
502748446bbSMel Gorman  * We cannot control nr_migratepages and nr_freepages fully when migration is
503748446bbSMel Gorman  * running as migrate_pages() has no knowledge of compact_control. When
504748446bbSMel Gorman  * migration is complete, we count the number of pages on the lists by hand.
505748446bbSMel Gorman  */
506748446bbSMel Gorman static void update_nr_listpages(struct compact_control *cc)
507748446bbSMel Gorman {
508748446bbSMel Gorman 	int nr_migratepages = 0;
509748446bbSMel Gorman 	int nr_freepages = 0;
510748446bbSMel Gorman 	struct page *page;
511748446bbSMel Gorman 
512748446bbSMel Gorman 	list_for_each_entry(page, &cc->migratepages, lru)
513748446bbSMel Gorman 		nr_migratepages++;
514748446bbSMel Gorman 	list_for_each_entry(page, &cc->freepages, lru)
515748446bbSMel Gorman 		nr_freepages++;
516748446bbSMel Gorman 
517748446bbSMel Gorman 	cc->nr_migratepages = nr_migratepages;
518748446bbSMel Gorman 	cc->nr_freepages = nr_freepages;
519748446bbSMel Gorman }
520748446bbSMel Gorman 
521ff9543fdSMichal Nazarewicz /* possible outcome of isolate_migratepages */
522ff9543fdSMichal Nazarewicz typedef enum {
523ff9543fdSMichal Nazarewicz 	ISOLATE_ABORT,		/* Abort compaction now */
524ff9543fdSMichal Nazarewicz 	ISOLATE_NONE,		/* No pages isolated, continue scanning */
525ff9543fdSMichal Nazarewicz 	ISOLATE_SUCCESS,	/* Pages isolated, migrate */
526ff9543fdSMichal Nazarewicz } isolate_migrate_t;
527ff9543fdSMichal Nazarewicz 
528ff9543fdSMichal Nazarewicz /*
529ff9543fdSMichal Nazarewicz  * Isolate all pages that can be migrated from the block pointed to by
530ff9543fdSMichal Nazarewicz  * the migrate scanner within compact_control.
531ff9543fdSMichal Nazarewicz  */
532ff9543fdSMichal Nazarewicz static isolate_migrate_t isolate_migratepages(struct zone *zone,
533ff9543fdSMichal Nazarewicz 					struct compact_control *cc)
534ff9543fdSMichal Nazarewicz {
535ff9543fdSMichal Nazarewicz 	unsigned long low_pfn, end_pfn;
536ff9543fdSMichal Nazarewicz 
537ff9543fdSMichal Nazarewicz 	/* Do not scan outside zone boundaries */
538ff9543fdSMichal Nazarewicz 	low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
539ff9543fdSMichal Nazarewicz 
540ff9543fdSMichal Nazarewicz 	/* Only scan within a pageblock boundary */
541ff9543fdSMichal Nazarewicz 	end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
542ff9543fdSMichal Nazarewicz 
543ff9543fdSMichal Nazarewicz 	/* Do not cross the free scanner or scan within a memory hole */
544ff9543fdSMichal Nazarewicz 	if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
545ff9543fdSMichal Nazarewicz 		cc->migrate_pfn = end_pfn;
546ff9543fdSMichal Nazarewicz 		return ISOLATE_NONE;
547ff9543fdSMichal Nazarewicz 	}
548ff9543fdSMichal Nazarewicz 
549ff9543fdSMichal Nazarewicz 	/* Perform the isolation */
550ff9543fdSMichal Nazarewicz 	low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn);
551ff9543fdSMichal Nazarewicz 	if (!low_pfn)
552ff9543fdSMichal Nazarewicz 		return ISOLATE_ABORT;
553ff9543fdSMichal Nazarewicz 
554ff9543fdSMichal Nazarewicz 	cc->migrate_pfn = low_pfn;
555ff9543fdSMichal Nazarewicz 
556ff9543fdSMichal Nazarewicz 	return ISOLATE_SUCCESS;
557ff9543fdSMichal Nazarewicz }
558ff9543fdSMichal Nazarewicz 
559748446bbSMel Gorman static int compact_finished(struct zone *zone,
560748446bbSMel Gorman 			    struct compact_control *cc)
561748446bbSMel Gorman {
56256de7263SMel Gorman 	unsigned int order;
5635a03b051SAndrea Arcangeli 	unsigned long watermark;
56456de7263SMel Gorman 
565748446bbSMel Gorman 	if (fatal_signal_pending(current))
566748446bbSMel Gorman 		return COMPACT_PARTIAL;
567748446bbSMel Gorman 
568748446bbSMel Gorman 	/* Compaction run completes if the migrate and free scanner meet */
569748446bbSMel Gorman 	if (cc->free_pfn <= cc->migrate_pfn)
570748446bbSMel Gorman 		return COMPACT_COMPLETE;
571748446bbSMel Gorman 
57282478fb7SJohannes Weiner 	/*
57382478fb7SJohannes Weiner 	 * order == -1 is expected when compacting via
57482478fb7SJohannes Weiner 	 * /proc/sys/vm/compact_memory
57582478fb7SJohannes Weiner 	 */
57656de7263SMel Gorman 	if (cc->order == -1)
57756de7263SMel Gorman 		return COMPACT_CONTINUE;
57856de7263SMel Gorman 
5793957c776SMichal Hocko 	/* Compaction run is not finished if the watermark is not met */
5803957c776SMichal Hocko 	watermark = low_wmark_pages(zone);
5813957c776SMichal Hocko 	watermark += (1 << cc->order);
5823957c776SMichal Hocko 
5833957c776SMichal Hocko 	if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
5843957c776SMichal Hocko 		return COMPACT_CONTINUE;
5853957c776SMichal Hocko 
58656de7263SMel Gorman 	/* Direct compactor: Is a suitable page free? */
58756de7263SMel Gorman 	for (order = cc->order; order < MAX_ORDER; order++) {
58856de7263SMel Gorman 		/* Job done if page is free of the right migratetype */
58956de7263SMel Gorman 		if (!list_empty(&zone->free_area[order].free_list[cc->migratetype]))
59056de7263SMel Gorman 			return COMPACT_PARTIAL;
59156de7263SMel Gorman 
59256de7263SMel Gorman 		/* Job done if allocation would set block type */
59356de7263SMel Gorman 		if (order >= pageblock_order && zone->free_area[order].nr_free)
59456de7263SMel Gorman 			return COMPACT_PARTIAL;
59556de7263SMel Gorman 	}
59656de7263SMel Gorman 
597748446bbSMel Gorman 	return COMPACT_CONTINUE;
598748446bbSMel Gorman }
599748446bbSMel Gorman 
6003e7d3449SMel Gorman /*
6013e7d3449SMel Gorman  * compaction_suitable: Is this suitable to run compaction on this zone now?
6023e7d3449SMel Gorman  * Returns
6033e7d3449SMel Gorman  *   COMPACT_SKIPPED  - If there are too few free pages for compaction
6043e7d3449SMel Gorman  *   COMPACT_PARTIAL  - If the allocation would succeed without compaction
6053e7d3449SMel Gorman  *   COMPACT_CONTINUE - If compaction should run now
6063e7d3449SMel Gorman  */
6073e7d3449SMel Gorman unsigned long compaction_suitable(struct zone *zone, int order)
6083e7d3449SMel Gorman {
6093e7d3449SMel Gorman 	int fragindex;
6103e7d3449SMel Gorman 	unsigned long watermark;
6113e7d3449SMel Gorman 
6123e7d3449SMel Gorman 	/*
6133957c776SMichal Hocko 	 * order == -1 is expected when compacting via
6143957c776SMichal Hocko 	 * /proc/sys/vm/compact_memory
6153957c776SMichal Hocko 	 */
6163957c776SMichal Hocko 	if (order == -1)
6173957c776SMichal Hocko 		return COMPACT_CONTINUE;
6183957c776SMichal Hocko 
6193957c776SMichal Hocko 	/*
6203e7d3449SMel Gorman 	 * Watermarks for order-0 must be met for compaction. Note the 2UL.
6213e7d3449SMel Gorman 	 * This is because during migration, copies of pages need to be
6223e7d3449SMel Gorman 	 * allocated and for a short time, the footprint is higher
6233e7d3449SMel Gorman 	 */
6243e7d3449SMel Gorman 	watermark = low_wmark_pages(zone) + (2UL << order);
6253e7d3449SMel Gorman 	if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
6263e7d3449SMel Gorman 		return COMPACT_SKIPPED;
6273e7d3449SMel Gorman 
6283e7d3449SMel Gorman 	/*
6293e7d3449SMel Gorman 	 * fragmentation index determines if allocation failures are due to
6303e7d3449SMel Gorman 	 * low memory or external fragmentation
6313e7d3449SMel Gorman 	 *
632a582a738SShaohua Li 	 * index of -1000 implies allocations might succeed depending on
633a582a738SShaohua Li 	 * watermarks
6343e7d3449SMel Gorman 	 * index towards 0 implies failure is due to lack of memory
6353e7d3449SMel Gorman 	 * index towards 1000 implies failure is due to fragmentation
6363e7d3449SMel Gorman 	 *
6373e7d3449SMel Gorman 	 * Only compact if a failure would be due to fragmentation.
6383e7d3449SMel Gorman 	 */
6393e7d3449SMel Gorman 	fragindex = fragmentation_index(zone, order);
6403e7d3449SMel Gorman 	if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
6413e7d3449SMel Gorman 		return COMPACT_SKIPPED;
6423e7d3449SMel Gorman 
643a582a738SShaohua Li 	if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark,
644a582a738SShaohua Li 	    0, 0))
6453e7d3449SMel Gorman 		return COMPACT_PARTIAL;
6463e7d3449SMel Gorman 
6473e7d3449SMel Gorman 	return COMPACT_CONTINUE;
6483e7d3449SMel Gorman }
6493e7d3449SMel Gorman 
650748446bbSMel Gorman static int compact_zone(struct zone *zone, struct compact_control *cc)
651748446bbSMel Gorman {
652748446bbSMel Gorman 	int ret;
653748446bbSMel Gorman 
6543e7d3449SMel Gorman 	ret = compaction_suitable(zone, cc->order);
6553e7d3449SMel Gorman 	switch (ret) {
6563e7d3449SMel Gorman 	case COMPACT_PARTIAL:
6573e7d3449SMel Gorman 	case COMPACT_SKIPPED:
6583e7d3449SMel Gorman 		/* Compaction is likely to fail */
6593e7d3449SMel Gorman 		return ret;
6603e7d3449SMel Gorman 	case COMPACT_CONTINUE:
6613e7d3449SMel Gorman 		/* Fall through to compaction */
6623e7d3449SMel Gorman 		;
6633e7d3449SMel Gorman 	}
6643e7d3449SMel Gorman 
665748446bbSMel Gorman 	/* Setup to move all movable pages to the end of the zone */
666748446bbSMel Gorman 	cc->migrate_pfn = zone->zone_start_pfn;
667748446bbSMel Gorman 	cc->free_pfn = cc->migrate_pfn + zone->spanned_pages;
668748446bbSMel Gorman 	cc->free_pfn &= ~(pageblock_nr_pages-1);
669748446bbSMel Gorman 
670748446bbSMel Gorman 	migrate_prep_local();
671748446bbSMel Gorman 
672748446bbSMel Gorman 	while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
673748446bbSMel Gorman 		unsigned long nr_migrate, nr_remaining;
6749d502c1cSMinchan Kim 		int err;
675748446bbSMel Gorman 
676f9e35b3bSMel Gorman 		switch (isolate_migratepages(zone, cc)) {
677f9e35b3bSMel Gorman 		case ISOLATE_ABORT:
678f9e35b3bSMel Gorman 			ret = COMPACT_PARTIAL;
679f9e35b3bSMel Gorman 			goto out;
680f9e35b3bSMel Gorman 		case ISOLATE_NONE:
681748446bbSMel Gorman 			continue;
682f9e35b3bSMel Gorman 		case ISOLATE_SUCCESS:
683f9e35b3bSMel Gorman 			;
684f9e35b3bSMel Gorman 		}
685748446bbSMel Gorman 
686748446bbSMel Gorman 		nr_migrate = cc->nr_migratepages;
6879d502c1cSMinchan Kim 		err = migrate_pages(&cc->migratepages, compaction_alloc,
688*68e3e926SLinus Torvalds 				(unsigned long)cc, false,
689*68e3e926SLinus Torvalds 				cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC);
690748446bbSMel Gorman 		update_nr_listpages(cc);
691748446bbSMel Gorman 		nr_remaining = cc->nr_migratepages;
692748446bbSMel Gorman 
693748446bbSMel Gorman 		count_vm_event(COMPACTBLOCKS);
694748446bbSMel Gorman 		count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining);
695748446bbSMel Gorman 		if (nr_remaining)
696748446bbSMel Gorman 			count_vm_events(COMPACTPAGEFAILED, nr_remaining);
697b7aba698SMel Gorman 		trace_mm_compaction_migratepages(nr_migrate - nr_remaining,
698b7aba698SMel Gorman 						nr_remaining);
699748446bbSMel Gorman 
700748446bbSMel Gorman 		/* Release LRU pages not migrated */
7019d502c1cSMinchan Kim 		if (err) {
702748446bbSMel Gorman 			putback_lru_pages(&cc->migratepages);
703748446bbSMel Gorman 			cc->nr_migratepages = 0;
704748446bbSMel Gorman 		}
705748446bbSMel Gorman 
706748446bbSMel Gorman 	}
707748446bbSMel Gorman 
708f9e35b3bSMel Gorman out:
709748446bbSMel Gorman 	/* Release free pages and check accounting */
710748446bbSMel Gorman 	cc->nr_freepages -= release_freepages(&cc->freepages);
711748446bbSMel Gorman 	VM_BUG_ON(cc->nr_freepages != 0);
712748446bbSMel Gorman 
713748446bbSMel Gorman 	return ret;
714748446bbSMel Gorman }
71576ab0f53SMel Gorman 
716d43a87e6SKyungmin Park static unsigned long compact_zone_order(struct zone *zone,
71777f1fe6bSMel Gorman 				 int order, gfp_t gfp_mask,
718*68e3e926SLinus Torvalds 				 bool sync)
71956de7263SMel Gorman {
72056de7263SMel Gorman 	struct compact_control cc = {
72156de7263SMel Gorman 		.nr_freepages = 0,
72256de7263SMel Gorman 		.nr_migratepages = 0,
72356de7263SMel Gorman 		.order = order,
72456de7263SMel Gorman 		.migratetype = allocflags_to_migratetype(gfp_mask),
72556de7263SMel Gorman 		.zone = zone,
726*68e3e926SLinus Torvalds 		.sync = sync,
72756de7263SMel Gorman 	};
72856de7263SMel Gorman 	INIT_LIST_HEAD(&cc.freepages);
72956de7263SMel Gorman 	INIT_LIST_HEAD(&cc.migratepages);
73056de7263SMel Gorman 
731*68e3e926SLinus Torvalds 	return compact_zone(zone, &cc);
73256de7263SMel Gorman }
73356de7263SMel Gorman 
7345e771905SMel Gorman int sysctl_extfrag_threshold = 500;
7355e771905SMel Gorman 
73656de7263SMel Gorman /**
73756de7263SMel Gorman  * try_to_compact_pages - Direct compact to satisfy a high-order allocation
73856de7263SMel Gorman  * @zonelist: The zonelist used for the current allocation
73956de7263SMel Gorman  * @order: The order of the current allocation
74056de7263SMel Gorman  * @gfp_mask: The GFP mask of the current allocation
74156de7263SMel Gorman  * @nodemask: The allowed nodes to allocate from
74277f1fe6bSMel Gorman  * @sync: Whether migration is synchronous or not
74356de7263SMel Gorman  *
74456de7263SMel Gorman  * This is the main entry point for direct page compaction.
74556de7263SMel Gorman  */
74656de7263SMel Gorman unsigned long try_to_compact_pages(struct zonelist *zonelist,
74777f1fe6bSMel Gorman 			int order, gfp_t gfp_mask, nodemask_t *nodemask,
74877f1fe6bSMel Gorman 			bool sync)
74956de7263SMel Gorman {
75056de7263SMel Gorman 	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
75156de7263SMel Gorman 	int may_enter_fs = gfp_mask & __GFP_FS;
75256de7263SMel Gorman 	int may_perform_io = gfp_mask & __GFP_IO;
75356de7263SMel Gorman 	struct zoneref *z;
75456de7263SMel Gorman 	struct zone *zone;
75556de7263SMel Gorman 	int rc = COMPACT_SKIPPED;
75656de7263SMel Gorman 
75756de7263SMel Gorman 	/*
75856de7263SMel Gorman 	 * Check whether it is worth even starting compaction. The order check is
75956de7263SMel Gorman 	 * made because an assumption is made that the page allocator can satisfy
76056de7263SMel Gorman 	 * the "cheaper" orders without taking special steps
76156de7263SMel Gorman 	 */
762c5a73c3dSAndrea Arcangeli 	if (!order || !may_enter_fs || !may_perform_io)
76356de7263SMel Gorman 		return rc;
76456de7263SMel Gorman 
76556de7263SMel Gorman 	count_vm_event(COMPACTSTALL);
76656de7263SMel Gorman 
76756de7263SMel Gorman 	/* Compact each zone in the list */
76856de7263SMel Gorman 	for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
76956de7263SMel Gorman 								nodemask) {
77056de7263SMel Gorman 		int status;
77156de7263SMel Gorman 
772*68e3e926SLinus Torvalds 		status = compact_zone_order(zone, order, gfp_mask, sync);
77356de7263SMel Gorman 		rc = max(status, rc);
77456de7263SMel Gorman 
7753e7d3449SMel Gorman 		/* If a normal allocation would succeed, stop compacting */
7763e7d3449SMel Gorman 		if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
77756de7263SMel Gorman 			break;
77856de7263SMel Gorman 	}
77956de7263SMel Gorman 
78056de7263SMel Gorman 	return rc;
78156de7263SMel Gorman }
78256de7263SMel Gorman 
78356de7263SMel Gorman 
78476ab0f53SMel Gorman /* Compact all zones within a node */
7857be62de9SRik van Riel static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
78676ab0f53SMel Gorman {
78776ab0f53SMel Gorman 	int zoneid;
78876ab0f53SMel Gorman 	struct zone *zone;
78976ab0f53SMel Gorman 
79076ab0f53SMel Gorman 	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
79176ab0f53SMel Gorman 
79276ab0f53SMel Gorman 		zone = &pgdat->node_zones[zoneid];
79376ab0f53SMel Gorman 		if (!populated_zone(zone))
79476ab0f53SMel Gorman 			continue;
79576ab0f53SMel Gorman 
7967be62de9SRik van Riel 		cc->nr_freepages = 0;
7977be62de9SRik van Riel 		cc->nr_migratepages = 0;
7987be62de9SRik van Riel 		cc->zone = zone;
7997be62de9SRik van Riel 		INIT_LIST_HEAD(&cc->freepages);
8007be62de9SRik van Riel 		INIT_LIST_HEAD(&cc->migratepages);
80176ab0f53SMel Gorman 
802aad6ec37SDan Carpenter 		if (cc->order == -1 || !compaction_deferred(zone, cc->order))
8037be62de9SRik van Riel 			compact_zone(zone, cc);
80476ab0f53SMel Gorman 
805aff62249SRik van Riel 		if (cc->order > 0) {
806aff62249SRik van Riel 			int ok = zone_watermark_ok(zone, cc->order,
807aff62249SRik van Riel 						low_wmark_pages(zone), 0, 0);
808aff62249SRik van Riel 			if (ok && cc->order > zone->compact_order_failed)
809aff62249SRik van Riel 				zone->compact_order_failed = cc->order + 1;
810aff62249SRik van Riel 			/* Currently async compaction is never deferred. */
811*68e3e926SLinus Torvalds 			else if (!ok && cc->sync)
812aff62249SRik van Riel 				defer_compaction(zone, cc->order);
813aff62249SRik van Riel 		}
814aff62249SRik van Riel 
8157be62de9SRik van Riel 		VM_BUG_ON(!list_empty(&cc->freepages));
8167be62de9SRik van Riel 		VM_BUG_ON(!list_empty(&cc->migratepages));
81776ab0f53SMel Gorman 	}
81876ab0f53SMel Gorman 
81976ab0f53SMel Gorman 	return 0;
82076ab0f53SMel Gorman }
82176ab0f53SMel Gorman 
8227be62de9SRik van Riel int compact_pgdat(pg_data_t *pgdat, int order)
8237be62de9SRik van Riel {
8247be62de9SRik van Riel 	struct compact_control cc = {
8257be62de9SRik van Riel 		.order = order,
826*68e3e926SLinus Torvalds 		.sync = false,
8277be62de9SRik van Riel 	};
8287be62de9SRik van Riel 
8297be62de9SRik van Riel 	return __compact_pgdat(pgdat, &cc);
8307be62de9SRik van Riel }
8317be62de9SRik van Riel 
8327be62de9SRik van Riel static int compact_node(int nid)
8337be62de9SRik van Riel {
8347be62de9SRik van Riel 	struct compact_control cc = {
8357be62de9SRik van Riel 		.order = -1,
836*68e3e926SLinus Torvalds 		.sync = true,
8377be62de9SRik van Riel 	};
8387be62de9SRik van Riel 
8398575ec29SHugh Dickins 	return __compact_pgdat(NODE_DATA(nid), &cc);
8407be62de9SRik van Riel }
8417be62de9SRik van Riel 
84276ab0f53SMel Gorman /* Compact all nodes in the system */
84376ab0f53SMel Gorman static int compact_nodes(void)
84476ab0f53SMel Gorman {
84576ab0f53SMel Gorman 	int nid;
84676ab0f53SMel Gorman 
8478575ec29SHugh Dickins 	/* Flush pending updates to the LRU lists */
8488575ec29SHugh Dickins 	lru_add_drain_all();
8498575ec29SHugh Dickins 
85076ab0f53SMel Gorman 	for_each_online_node(nid)
85176ab0f53SMel Gorman 		compact_node(nid);
85276ab0f53SMel Gorman 
85376ab0f53SMel Gorman 	return COMPACT_COMPLETE;
85476ab0f53SMel Gorman }
85576ab0f53SMel Gorman 
85676ab0f53SMel Gorman /* The written value is actually unused, all memory is compacted */
85776ab0f53SMel Gorman int sysctl_compact_memory;
85876ab0f53SMel Gorman 
85976ab0f53SMel Gorman /* This is the entry point for compacting all nodes via /proc/sys/vm */
86076ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write,
86176ab0f53SMel Gorman 			void __user *buffer, size_t *length, loff_t *ppos)
86276ab0f53SMel Gorman {
86376ab0f53SMel Gorman 	if (write)
86476ab0f53SMel Gorman 		return compact_nodes();
86576ab0f53SMel Gorman 
86676ab0f53SMel Gorman 	return 0;
86776ab0f53SMel Gorman }
868ed4a6d7fSMel Gorman 
8695e771905SMel Gorman int sysctl_extfrag_handler(struct ctl_table *table, int write,
8705e771905SMel Gorman 			void __user *buffer, size_t *length, loff_t *ppos)
8715e771905SMel Gorman {
8725e771905SMel Gorman 	proc_dointvec_minmax(table, write, buffer, length, ppos);
8735e771905SMel Gorman 
8745e771905SMel Gorman 	return 0;
8755e771905SMel Gorman }
8765e771905SMel Gorman 
877ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
87810fbcf4cSKay Sievers ssize_t sysfs_compact_node(struct device *dev,
87910fbcf4cSKay Sievers 			struct device_attribute *attr,
880ed4a6d7fSMel Gorman 			const char *buf, size_t count)
881ed4a6d7fSMel Gorman {
8828575ec29SHugh Dickins 	int nid = dev->id;
8838575ec29SHugh Dickins 
8848575ec29SHugh Dickins 	if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
8858575ec29SHugh Dickins 		/* Flush pending updates to the LRU lists */
8868575ec29SHugh Dickins 		lru_add_drain_all();
8878575ec29SHugh Dickins 
8888575ec29SHugh Dickins 		compact_node(nid);
8898575ec29SHugh Dickins 	}
890ed4a6d7fSMel Gorman 
891ed4a6d7fSMel Gorman 	return count;
892ed4a6d7fSMel Gorman }
89310fbcf4cSKay Sievers static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
894ed4a6d7fSMel Gorman 
895ed4a6d7fSMel Gorman int compaction_register_node(struct node *node)
896ed4a6d7fSMel Gorman {
89710fbcf4cSKay Sievers 	return device_create_file(&node->dev, &dev_attr_compact);
898ed4a6d7fSMel Gorman }
899ed4a6d7fSMel Gorman 
900ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node)
901ed4a6d7fSMel Gorman {
90210fbcf4cSKay Sievers 	return device_remove_file(&node->dev, &dev_attr_compact);
903ed4a6d7fSMel Gorman }
904ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */
905ff9543fdSMichal Nazarewicz 
906ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION */
907