xref: /linux/mm/compaction.c (revision a671de086874b9d8155369319b2bd989cf55d77c)
1 /*
2  * linux/mm/compaction.c
3  *
4  * Memory compaction for the reduction of external fragmentation. Note that
5  * this heavily depends upon page migration to do all the real heavy
6  * lifting
7  *
8  * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
9  */
10 #include <linux/swap.h>
11 #include <linux/migrate.h>
12 #include <linux/compaction.h>
13 #include <linux/mm_inline.h>
14 #include <linux/backing-dev.h>
15 #include <linux/sysctl.h>
16 #include <linux/sysfs.h>
17 #include <linux/balloon_compaction.h>
18 #include "internal.h"
19 
20 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
21 
22 #define CREATE_TRACE_POINTS
23 #include <trace/events/compaction.h>
24 
25 static unsigned long release_freepages(struct list_head *freelist)
26 {
27 	struct page *page, *next;
28 	unsigned long count = 0;
29 
30 	list_for_each_entry_safe(page, next, freelist, lru) {
31 		list_del(&page->lru);
32 		__free_page(page);
33 		count++;
34 	}
35 
36 	return count;
37 }
38 
39 static void map_pages(struct list_head *list)
40 {
41 	struct page *page;
42 
43 	list_for_each_entry(page, list, lru) {
44 		arch_alloc_page(page, 0);
45 		kernel_map_pages(page, 1, 1);
46 	}
47 }
48 
49 static inline bool migrate_async_suitable(int migratetype)
50 {
51 	return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
52 }
53 
54 #ifdef CONFIG_COMPACTION
55 /* Returns true if the pageblock should be scanned for pages to isolate. */
56 static inline bool isolation_suitable(struct compact_control *cc,
57 					struct page *page)
58 {
59 	if (cc->ignore_skip_hint)
60 		return true;
61 
62 	return !get_pageblock_skip(page);
63 }
64 
65 /*
66  * This function is called to clear all cached information on pageblocks that
67  * should be skipped for page isolation when the migrate and free page scanner
68  * meet.
69  */
70 static void __reset_isolation_suitable(struct zone *zone)
71 {
72 	unsigned long start_pfn = zone->zone_start_pfn;
73 	unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages;
74 	unsigned long pfn;
75 
76 	zone->compact_cached_migrate_pfn = start_pfn;
77 	zone->compact_cached_free_pfn = end_pfn;
78 	zone->compact_blockskip_flush = false;
79 
80 	/* Walk the zone and mark every pageblock as suitable for isolation */
81 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
82 		struct page *page;
83 
84 		cond_resched();
85 
86 		if (!pfn_valid(pfn))
87 			continue;
88 
89 		page = pfn_to_page(pfn);
90 		if (zone != page_zone(page))
91 			continue;
92 
93 		clear_pageblock_skip(page);
94 	}
95 }
96 
97 void reset_isolation_suitable(pg_data_t *pgdat)
98 {
99 	int zoneid;
100 
101 	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
102 		struct zone *zone = &pgdat->node_zones[zoneid];
103 		if (!populated_zone(zone))
104 			continue;
105 
106 		/* Only flush if a full compaction finished recently */
107 		if (zone->compact_blockskip_flush)
108 			__reset_isolation_suitable(zone);
109 	}
110 }
111 
112 /*
113  * If no pages were isolated then mark this pageblock to be skipped in the
114  * future. The information is later cleared by __reset_isolation_suitable().
115  */
116 static void update_pageblock_skip(struct compact_control *cc,
117 			struct page *page, unsigned long nr_isolated,
118 			bool migrate_scanner)
119 {
120 	struct zone *zone = cc->zone;
121 	if (!page)
122 		return;
123 
124 	if (!nr_isolated) {
125 		unsigned long pfn = page_to_pfn(page);
126 		set_pageblock_skip(page);
127 
128 		/* Update where compaction should restart */
129 		if (migrate_scanner) {
130 			if (!cc->finished_update_migrate &&
131 			    pfn > zone->compact_cached_migrate_pfn)
132 				zone->compact_cached_migrate_pfn = pfn;
133 		} else {
134 			if (!cc->finished_update_free &&
135 			    pfn < zone->compact_cached_free_pfn)
136 				zone->compact_cached_free_pfn = pfn;
137 		}
138 	}
139 }
140 #else
141 static inline bool isolation_suitable(struct compact_control *cc,
142 					struct page *page)
143 {
144 	return true;
145 }
146 
147 static void update_pageblock_skip(struct compact_control *cc,
148 			struct page *page, unsigned long nr_isolated,
149 			bool migrate_scanner)
150 {
151 }
152 #endif /* CONFIG_COMPACTION */
153 
154 static inline bool should_release_lock(spinlock_t *lock)
155 {
156 	return need_resched() || spin_is_contended(lock);
157 }
158 
159 /*
160  * Compaction requires the taking of some coarse locks that are potentially
161  * very heavily contended. Check if the process needs to be scheduled or
162  * if the lock is contended. For async compaction, back out in the event
163  * if contention is severe. For sync compaction, schedule.
164  *
165  * Returns true if the lock is held.
166  * Returns false if the lock is released and compaction should abort
167  */
168 static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
169 				      bool locked, struct compact_control *cc)
170 {
171 	if (should_release_lock(lock)) {
172 		if (locked) {
173 			spin_unlock_irqrestore(lock, *flags);
174 			locked = false;
175 		}
176 
177 		/* async aborts if taking too long or contended */
178 		if (!cc->sync) {
179 			cc->contended = true;
180 			return false;
181 		}
182 
183 		cond_resched();
184 	}
185 
186 	if (!locked)
187 		spin_lock_irqsave(lock, *flags);
188 	return true;
189 }
190 
191 static inline bool compact_trylock_irqsave(spinlock_t *lock,
192 			unsigned long *flags, struct compact_control *cc)
193 {
194 	return compact_checklock_irqsave(lock, flags, false, cc);
195 }
196 
197 /* Returns true if the page is within a block suitable for migration to */
198 static bool suitable_migration_target(struct page *page)
199 {
200 	int migratetype = get_pageblock_migratetype(page);
201 
202 	/* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
203 	if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
204 		return false;
205 
206 	/* If the page is a large free page, then allow migration */
207 	if (PageBuddy(page) && page_order(page) >= pageblock_order)
208 		return true;
209 
210 	/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
211 	if (migrate_async_suitable(migratetype))
212 		return true;
213 
214 	/* Otherwise skip the block */
215 	return false;
216 }
217 
218 static void compact_capture_page(struct compact_control *cc)
219 {
220 	unsigned long flags;
221 	int mtype, mtype_low, mtype_high;
222 
223 	if (!cc->page || *cc->page)
224 		return;
225 
226 	/*
227 	 * For MIGRATE_MOVABLE allocations we capture a suitable page ASAP
228 	 * regardless of the migratetype of the freelist is is captured from.
229 	 * This is fine because the order for a high-order MIGRATE_MOVABLE
230 	 * allocation is typically at least a pageblock size and overall
231 	 * fragmentation is not impaired. Other allocation types must
232 	 * capture pages from their own migratelist because otherwise they
233 	 * could pollute other pageblocks like MIGRATE_MOVABLE with
234 	 * difficult to move pages and making fragmentation worse overall.
235 	 */
236 	if (cc->migratetype == MIGRATE_MOVABLE) {
237 		mtype_low = 0;
238 		mtype_high = MIGRATE_PCPTYPES;
239 	} else {
240 		mtype_low = cc->migratetype;
241 		mtype_high = cc->migratetype + 1;
242 	}
243 
244 	/* Speculatively examine the free lists without zone lock */
245 	for (mtype = mtype_low; mtype < mtype_high; mtype++) {
246 		int order;
247 		for (order = cc->order; order < MAX_ORDER; order++) {
248 			struct page *page;
249 			struct free_area *area;
250 			area = &(cc->zone->free_area[order]);
251 			if (list_empty(&area->free_list[mtype]))
252 				continue;
253 
254 			/* Take the lock and attempt capture of the page */
255 			if (!compact_trylock_irqsave(&cc->zone->lock, &flags, cc))
256 				return;
257 			if (!list_empty(&area->free_list[mtype])) {
258 				page = list_entry(area->free_list[mtype].next,
259 							struct page, lru);
260 				if (capture_free_page(page, cc->order, mtype)) {
261 					spin_unlock_irqrestore(&cc->zone->lock,
262 									flags);
263 					*cc->page = page;
264 					return;
265 				}
266 			}
267 			spin_unlock_irqrestore(&cc->zone->lock, flags);
268 		}
269 	}
270 }
271 
272 /*
273  * Isolate free pages onto a private freelist. Caller must hold zone->lock.
274  * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
275  * pages inside of the pageblock (even though it may still end up isolating
276  * some pages).
277  */
278 static unsigned long isolate_freepages_block(struct compact_control *cc,
279 				unsigned long blockpfn,
280 				unsigned long end_pfn,
281 				struct list_head *freelist,
282 				bool strict)
283 {
284 	int nr_scanned = 0, total_isolated = 0;
285 	struct page *cursor, *valid_page = NULL;
286 	unsigned long nr_strict_required = end_pfn - blockpfn;
287 	unsigned long flags;
288 	bool locked = false;
289 
290 	cursor = pfn_to_page(blockpfn);
291 
292 	/* Isolate free pages. */
293 	for (; blockpfn < end_pfn; blockpfn++, cursor++) {
294 		int isolated, i;
295 		struct page *page = cursor;
296 
297 		nr_scanned++;
298 		if (!pfn_valid_within(blockpfn))
299 			continue;
300 		if (!valid_page)
301 			valid_page = page;
302 		if (!PageBuddy(page))
303 			continue;
304 
305 		/*
306 		 * The zone lock must be held to isolate freepages.
307 		 * Unfortunately this is a very coarse lock and can be
308 		 * heavily contended if there are parallel allocations
309 		 * or parallel compactions. For async compaction do not
310 		 * spin on the lock and we acquire the lock as late as
311 		 * possible.
312 		 */
313 		locked = compact_checklock_irqsave(&cc->zone->lock, &flags,
314 								locked, cc);
315 		if (!locked)
316 			break;
317 
318 		/* Recheck this is a suitable migration target under lock */
319 		if (!strict && !suitable_migration_target(page))
320 			break;
321 
322 		/* Recheck this is a buddy page under lock */
323 		if (!PageBuddy(page))
324 			continue;
325 
326 		/* Found a free page, break it into order-0 pages */
327 		isolated = split_free_page(page);
328 		if (!isolated && strict)
329 			break;
330 		total_isolated += isolated;
331 		for (i = 0; i < isolated; i++) {
332 			list_add(&page->lru, freelist);
333 			page++;
334 		}
335 
336 		/* If a page was split, advance to the end of it */
337 		if (isolated) {
338 			blockpfn += isolated - 1;
339 			cursor += isolated - 1;
340 		}
341 	}
342 
343 	trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
344 
345 	/*
346 	 * If strict isolation is requested by CMA then check that all the
347 	 * pages requested were isolated. If there were any failures, 0 is
348 	 * returned and CMA will fail.
349 	 */
350 	if (strict && nr_strict_required > total_isolated)
351 		total_isolated = 0;
352 
353 	if (locked)
354 		spin_unlock_irqrestore(&cc->zone->lock, flags);
355 
356 	/* Update the pageblock-skip if the whole pageblock was scanned */
357 	if (blockpfn == end_pfn)
358 		update_pageblock_skip(cc, valid_page, total_isolated, false);
359 
360 	return total_isolated;
361 }
362 
363 /**
364  * isolate_freepages_range() - isolate free pages.
365  * @start_pfn: The first PFN to start isolating.
366  * @end_pfn:   The one-past-last PFN.
367  *
368  * Non-free pages, invalid PFNs, or zone boundaries within the
369  * [start_pfn, end_pfn) range are considered errors, cause function to
370  * undo its actions and return zero.
371  *
372  * Otherwise, function returns one-past-the-last PFN of isolated page
373  * (which may be greater then end_pfn if end fell in a middle of
374  * a free page).
375  */
376 unsigned long
377 isolate_freepages_range(struct compact_control *cc,
378 			unsigned long start_pfn, unsigned long end_pfn)
379 {
380 	unsigned long isolated, pfn, block_end_pfn;
381 	LIST_HEAD(freelist);
382 
383 	for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
384 		if (!pfn_valid(pfn) || cc->zone != page_zone(pfn_to_page(pfn)))
385 			break;
386 
387 		/*
388 		 * On subsequent iterations ALIGN() is actually not needed,
389 		 * but we keep it that we not to complicate the code.
390 		 */
391 		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
392 		block_end_pfn = min(block_end_pfn, end_pfn);
393 
394 		isolated = isolate_freepages_block(cc, pfn, block_end_pfn,
395 						   &freelist, true);
396 
397 		/*
398 		 * In strict mode, isolate_freepages_block() returns 0 if
399 		 * there are any holes in the block (ie. invalid PFNs or
400 		 * non-free pages).
401 		 */
402 		if (!isolated)
403 			break;
404 
405 		/*
406 		 * If we managed to isolate pages, it is always (1 << n) *
407 		 * pageblock_nr_pages for some non-negative n.  (Max order
408 		 * page may span two pageblocks).
409 		 */
410 	}
411 
412 	/* split_free_page does not map the pages */
413 	map_pages(&freelist);
414 
415 	if (pfn < end_pfn) {
416 		/* Loop terminated early, cleanup. */
417 		release_freepages(&freelist);
418 		return 0;
419 	}
420 
421 	/* We don't use freelists for anything. */
422 	return pfn;
423 }
424 
425 /* Update the number of anon and file isolated pages in the zone */
426 static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc)
427 {
428 	struct page *page;
429 	unsigned int count[2] = { 0, };
430 
431 	list_for_each_entry(page, &cc->migratepages, lru)
432 		count[!!page_is_file_cache(page)]++;
433 
434 	/* If locked we can use the interrupt unsafe versions */
435 	if (locked) {
436 		__mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
437 		__mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
438 	} else {
439 		mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
440 		mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
441 	}
442 }
443 
444 /* Similar to reclaim, but different enough that they don't share logic */
445 static bool too_many_isolated(struct zone *zone)
446 {
447 	unsigned long active, inactive, isolated;
448 
449 	inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
450 					zone_page_state(zone, NR_INACTIVE_ANON);
451 	active = zone_page_state(zone, NR_ACTIVE_FILE) +
452 					zone_page_state(zone, NR_ACTIVE_ANON);
453 	isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
454 					zone_page_state(zone, NR_ISOLATED_ANON);
455 
456 	return isolated > (inactive + active) / 2;
457 }
458 
459 /**
460  * isolate_migratepages_range() - isolate all migrate-able pages in range.
461  * @zone:	Zone pages are in.
462  * @cc:		Compaction control structure.
463  * @low_pfn:	The first PFN of the range.
464  * @end_pfn:	The one-past-the-last PFN of the range.
465  * @unevictable: true if it allows to isolate unevictable pages
466  *
467  * Isolate all pages that can be migrated from the range specified by
468  * [low_pfn, end_pfn).  Returns zero if there is a fatal signal
469  * pending), otherwise PFN of the first page that was not scanned
470  * (which may be both less, equal to or more then end_pfn).
471  *
472  * Assumes that cc->migratepages is empty and cc->nr_migratepages is
473  * zero.
474  *
475  * Apart from cc->migratepages and cc->nr_migratetypes this function
476  * does not modify any cc's fields, in particular it does not modify
477  * (or read for that matter) cc->migrate_pfn.
478  */
479 unsigned long
480 isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
481 		unsigned long low_pfn, unsigned long end_pfn, bool unevictable)
482 {
483 	unsigned long last_pageblock_nr = 0, pageblock_nr;
484 	unsigned long nr_scanned = 0, nr_isolated = 0;
485 	struct list_head *migratelist = &cc->migratepages;
486 	isolate_mode_t mode = 0;
487 	struct lruvec *lruvec;
488 	unsigned long flags;
489 	bool locked = false;
490 	struct page *page = NULL, *valid_page = NULL;
491 
492 	/*
493 	 * Ensure that there are not too many pages isolated from the LRU
494 	 * list by either parallel reclaimers or compaction. If there are,
495 	 * delay for some time until fewer pages are isolated
496 	 */
497 	while (unlikely(too_many_isolated(zone))) {
498 		/* async migration should just abort */
499 		if (!cc->sync)
500 			return 0;
501 
502 		congestion_wait(BLK_RW_ASYNC, HZ/10);
503 
504 		if (fatal_signal_pending(current))
505 			return 0;
506 	}
507 
508 	/* Time to isolate some pages for migration */
509 	cond_resched();
510 	for (; low_pfn < end_pfn; low_pfn++) {
511 		/* give a chance to irqs before checking need_resched() */
512 		if (locked && !((low_pfn+1) % SWAP_CLUSTER_MAX)) {
513 			if (should_release_lock(&zone->lru_lock)) {
514 				spin_unlock_irqrestore(&zone->lru_lock, flags);
515 				locked = false;
516 			}
517 		}
518 
519 		/*
520 		 * migrate_pfn does not necessarily start aligned to a
521 		 * pageblock. Ensure that pfn_valid is called when moving
522 		 * into a new MAX_ORDER_NR_PAGES range in case of large
523 		 * memory holes within the zone
524 		 */
525 		if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
526 			if (!pfn_valid(low_pfn)) {
527 				low_pfn += MAX_ORDER_NR_PAGES - 1;
528 				continue;
529 			}
530 		}
531 
532 		if (!pfn_valid_within(low_pfn))
533 			continue;
534 		nr_scanned++;
535 
536 		/*
537 		 * Get the page and ensure the page is within the same zone.
538 		 * See the comment in isolate_freepages about overlapping
539 		 * nodes. It is deliberate that the new zone lock is not taken
540 		 * as memory compaction should not move pages between nodes.
541 		 */
542 		page = pfn_to_page(low_pfn);
543 		if (page_zone(page) != zone)
544 			continue;
545 
546 		if (!valid_page)
547 			valid_page = page;
548 
549 		/* If isolation recently failed, do not retry */
550 		pageblock_nr = low_pfn >> pageblock_order;
551 		if (!isolation_suitable(cc, page))
552 			goto next_pageblock;
553 
554 		/* Skip if free */
555 		if (PageBuddy(page))
556 			continue;
557 
558 		/*
559 		 * For async migration, also only scan in MOVABLE blocks. Async
560 		 * migration is optimistic to see if the minimum amount of work
561 		 * satisfies the allocation
562 		 */
563 		if (!cc->sync && last_pageblock_nr != pageblock_nr &&
564 		    !migrate_async_suitable(get_pageblock_migratetype(page))) {
565 			cc->finished_update_migrate = true;
566 			goto next_pageblock;
567 		}
568 
569 		/*
570 		 * Check may be lockless but that's ok as we recheck later.
571 		 * It's possible to migrate LRU pages and balloon pages
572 		 * Skip any other type of page
573 		 */
574 		if (!PageLRU(page)) {
575 			if (unlikely(balloon_page_movable(page))) {
576 				if (locked && balloon_page_isolate(page)) {
577 					/* Successfully isolated */
578 					cc->finished_update_migrate = true;
579 					list_add(&page->lru, migratelist);
580 					cc->nr_migratepages++;
581 					nr_isolated++;
582 					goto check_compact_cluster;
583 				}
584 			}
585 			continue;
586 		}
587 
588 		/*
589 		 * PageLRU is set. lru_lock normally excludes isolation
590 		 * splitting and collapsing (collapsing has already happened
591 		 * if PageLRU is set) but the lock is not necessarily taken
592 		 * here and it is wasteful to take it just to check transhuge.
593 		 * Check TransHuge without lock and skip the whole pageblock if
594 		 * it's either a transhuge or hugetlbfs page, as calling
595 		 * compound_order() without preventing THP from splitting the
596 		 * page underneath us may return surprising results.
597 		 */
598 		if (PageTransHuge(page)) {
599 			if (!locked)
600 				goto next_pageblock;
601 			low_pfn += (1 << compound_order(page)) - 1;
602 			continue;
603 		}
604 
605 		/* Check if it is ok to still hold the lock */
606 		locked = compact_checklock_irqsave(&zone->lru_lock, &flags,
607 								locked, cc);
608 		if (!locked || fatal_signal_pending(current))
609 			break;
610 
611 		/* Recheck PageLRU and PageTransHuge under lock */
612 		if (!PageLRU(page))
613 			continue;
614 		if (PageTransHuge(page)) {
615 			low_pfn += (1 << compound_order(page)) - 1;
616 			continue;
617 		}
618 
619 		if (!cc->sync)
620 			mode |= ISOLATE_ASYNC_MIGRATE;
621 
622 		if (unevictable)
623 			mode |= ISOLATE_UNEVICTABLE;
624 
625 		lruvec = mem_cgroup_page_lruvec(page, zone);
626 
627 		/* Try isolate the page */
628 		if (__isolate_lru_page(page, mode) != 0)
629 			continue;
630 
631 		VM_BUG_ON(PageTransCompound(page));
632 
633 		/* Successfully isolated */
634 		cc->finished_update_migrate = true;
635 		del_page_from_lru_list(page, lruvec, page_lru(page));
636 		list_add(&page->lru, migratelist);
637 		cc->nr_migratepages++;
638 		nr_isolated++;
639 
640 check_compact_cluster:
641 		/* Avoid isolating too much */
642 		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
643 			++low_pfn;
644 			break;
645 		}
646 
647 		continue;
648 
649 next_pageblock:
650 		low_pfn += pageblock_nr_pages;
651 		low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
652 		last_pageblock_nr = pageblock_nr;
653 	}
654 
655 	acct_isolated(zone, locked, cc);
656 
657 	if (locked)
658 		spin_unlock_irqrestore(&zone->lru_lock, flags);
659 
660 	/* Update the pageblock-skip if the whole pageblock was scanned */
661 	if (low_pfn == end_pfn)
662 		update_pageblock_skip(cc, valid_page, nr_isolated, true);
663 
664 	trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
665 
666 	return low_pfn;
667 }
668 
669 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
670 #ifdef CONFIG_COMPACTION
671 /*
672  * Based on information in the current compact_control, find blocks
673  * suitable for isolating free pages from and then isolate them.
674  */
675 static void isolate_freepages(struct zone *zone,
676 				struct compact_control *cc)
677 {
678 	struct page *page;
679 	unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn;
680 	int nr_freepages = cc->nr_freepages;
681 	struct list_head *freelist = &cc->freepages;
682 
683 	/*
684 	 * Initialise the free scanner. The starting point is where we last
685 	 * scanned from (or the end of the zone if starting). The low point
686 	 * is the end of the pageblock the migration scanner is using.
687 	 */
688 	pfn = cc->free_pfn;
689 	low_pfn = cc->migrate_pfn + pageblock_nr_pages;
690 
691 	/*
692 	 * Take care that if the migration scanner is at the end of the zone
693 	 * that the free scanner does not accidentally move to the next zone
694 	 * in the next isolation cycle.
695 	 */
696 	high_pfn = min(low_pfn, pfn);
697 
698 	zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
699 
700 	/*
701 	 * Isolate free pages until enough are available to migrate the
702 	 * pages on cc->migratepages. We stop searching if the migrate
703 	 * and free page scanners meet or enough free pages are isolated.
704 	 */
705 	for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
706 					pfn -= pageblock_nr_pages) {
707 		unsigned long isolated;
708 
709 		if (!pfn_valid(pfn))
710 			continue;
711 
712 		/*
713 		 * Check for overlapping nodes/zones. It's possible on some
714 		 * configurations to have a setup like
715 		 * node0 node1 node0
716 		 * i.e. it's possible that all pages within a zones range of
717 		 * pages do not belong to a single zone.
718 		 */
719 		page = pfn_to_page(pfn);
720 		if (page_zone(page) != zone)
721 			continue;
722 
723 		/* Check the block is suitable for migration */
724 		if (!suitable_migration_target(page))
725 			continue;
726 
727 		/* If isolation recently failed, do not retry */
728 		if (!isolation_suitable(cc, page))
729 			continue;
730 
731 		/* Found a block suitable for isolating free pages from */
732 		isolated = 0;
733 
734 		/*
735 		 * As pfn may not start aligned, pfn+pageblock_nr_page
736 		 * may cross a MAX_ORDER_NR_PAGES boundary and miss
737 		 * a pfn_valid check. Ensure isolate_freepages_block()
738 		 * only scans within a pageblock
739 		 */
740 		end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
741 		end_pfn = min(end_pfn, zone_end_pfn);
742 		isolated = isolate_freepages_block(cc, pfn, end_pfn,
743 						   freelist, false);
744 		nr_freepages += isolated;
745 
746 		/*
747 		 * Record the highest PFN we isolated pages from. When next
748 		 * looking for free pages, the search will restart here as
749 		 * page migration may have returned some pages to the allocator
750 		 */
751 		if (isolated) {
752 			cc->finished_update_free = true;
753 			high_pfn = max(high_pfn, pfn);
754 		}
755 	}
756 
757 	/* split_free_page does not map the pages */
758 	map_pages(freelist);
759 
760 	cc->free_pfn = high_pfn;
761 	cc->nr_freepages = nr_freepages;
762 }
763 
764 /*
765  * This is a migrate-callback that "allocates" freepages by taking pages
766  * from the isolated freelists in the block we are migrating to.
767  */
768 static struct page *compaction_alloc(struct page *migratepage,
769 					unsigned long data,
770 					int **result)
771 {
772 	struct compact_control *cc = (struct compact_control *)data;
773 	struct page *freepage;
774 
775 	/* Isolate free pages if necessary */
776 	if (list_empty(&cc->freepages)) {
777 		isolate_freepages(cc->zone, cc);
778 
779 		if (list_empty(&cc->freepages))
780 			return NULL;
781 	}
782 
783 	freepage = list_entry(cc->freepages.next, struct page, lru);
784 	list_del(&freepage->lru);
785 	cc->nr_freepages--;
786 
787 	return freepage;
788 }
789 
790 /*
791  * We cannot control nr_migratepages and nr_freepages fully when migration is
792  * running as migrate_pages() has no knowledge of compact_control. When
793  * migration is complete, we count the number of pages on the lists by hand.
794  */
795 static void update_nr_listpages(struct compact_control *cc)
796 {
797 	int nr_migratepages = 0;
798 	int nr_freepages = 0;
799 	struct page *page;
800 
801 	list_for_each_entry(page, &cc->migratepages, lru)
802 		nr_migratepages++;
803 	list_for_each_entry(page, &cc->freepages, lru)
804 		nr_freepages++;
805 
806 	cc->nr_migratepages = nr_migratepages;
807 	cc->nr_freepages = nr_freepages;
808 }
809 
810 /* possible outcome of isolate_migratepages */
811 typedef enum {
812 	ISOLATE_ABORT,		/* Abort compaction now */
813 	ISOLATE_NONE,		/* No pages isolated, continue scanning */
814 	ISOLATE_SUCCESS,	/* Pages isolated, migrate */
815 } isolate_migrate_t;
816 
817 /*
818  * Isolate all pages that can be migrated from the block pointed to by
819  * the migrate scanner within compact_control.
820  */
821 static isolate_migrate_t isolate_migratepages(struct zone *zone,
822 					struct compact_control *cc)
823 {
824 	unsigned long low_pfn, end_pfn;
825 
826 	/* Do not scan outside zone boundaries */
827 	low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
828 
829 	/* Only scan within a pageblock boundary */
830 	end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
831 
832 	/* Do not cross the free scanner or scan within a memory hole */
833 	if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
834 		cc->migrate_pfn = end_pfn;
835 		return ISOLATE_NONE;
836 	}
837 
838 	/* Perform the isolation */
839 	low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false);
840 	if (!low_pfn || cc->contended)
841 		return ISOLATE_ABORT;
842 
843 	cc->migrate_pfn = low_pfn;
844 
845 	return ISOLATE_SUCCESS;
846 }
847 
848 static int compact_finished(struct zone *zone,
849 			    struct compact_control *cc)
850 {
851 	unsigned long watermark;
852 
853 	if (fatal_signal_pending(current))
854 		return COMPACT_PARTIAL;
855 
856 	/* Compaction run completes if the migrate and free scanner meet */
857 	if (cc->free_pfn <= cc->migrate_pfn) {
858 		/*
859 		 * Mark that the PG_migrate_skip information should be cleared
860 		 * by kswapd when it goes to sleep. kswapd does not set the
861 		 * flag itself as the decision to be clear should be directly
862 		 * based on an allocation request.
863 		 */
864 		if (!current_is_kswapd())
865 			zone->compact_blockskip_flush = true;
866 
867 		return COMPACT_COMPLETE;
868 	}
869 
870 	/*
871 	 * order == -1 is expected when compacting via
872 	 * /proc/sys/vm/compact_memory
873 	 */
874 	if (cc->order == -1)
875 		return COMPACT_CONTINUE;
876 
877 	/* Compaction run is not finished if the watermark is not met */
878 	watermark = low_wmark_pages(zone);
879 	watermark += (1 << cc->order);
880 
881 	if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
882 		return COMPACT_CONTINUE;
883 
884 	/* Direct compactor: Is a suitable page free? */
885 	if (cc->page) {
886 		/* Was a suitable page captured? */
887 		if (*cc->page)
888 			return COMPACT_PARTIAL;
889 	} else {
890 		unsigned int order;
891 		for (order = cc->order; order < MAX_ORDER; order++) {
892 			struct free_area *area = &zone->free_area[cc->order];
893 			/* Job done if page is free of the right migratetype */
894 			if (!list_empty(&area->free_list[cc->migratetype]))
895 				return COMPACT_PARTIAL;
896 
897 			/* Job done if allocation would set block type */
898 			if (cc->order >= pageblock_order && area->nr_free)
899 				return COMPACT_PARTIAL;
900 		}
901 	}
902 
903 	return COMPACT_CONTINUE;
904 }
905 
906 /*
907  * compaction_suitable: Is this suitable to run compaction on this zone now?
908  * Returns
909  *   COMPACT_SKIPPED  - If there are too few free pages for compaction
910  *   COMPACT_PARTIAL  - If the allocation would succeed without compaction
911  *   COMPACT_CONTINUE - If compaction should run now
912  */
913 unsigned long compaction_suitable(struct zone *zone, int order)
914 {
915 	int fragindex;
916 	unsigned long watermark;
917 
918 	/*
919 	 * order == -1 is expected when compacting via
920 	 * /proc/sys/vm/compact_memory
921 	 */
922 	if (order == -1)
923 		return COMPACT_CONTINUE;
924 
925 	/*
926 	 * Watermarks for order-0 must be met for compaction. Note the 2UL.
927 	 * This is because during migration, copies of pages need to be
928 	 * allocated and for a short time, the footprint is higher
929 	 */
930 	watermark = low_wmark_pages(zone) + (2UL << order);
931 	if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
932 		return COMPACT_SKIPPED;
933 
934 	/*
935 	 * fragmentation index determines if allocation failures are due to
936 	 * low memory or external fragmentation
937 	 *
938 	 * index of -1000 implies allocations might succeed depending on
939 	 * watermarks
940 	 * index towards 0 implies failure is due to lack of memory
941 	 * index towards 1000 implies failure is due to fragmentation
942 	 *
943 	 * Only compact if a failure would be due to fragmentation.
944 	 */
945 	fragindex = fragmentation_index(zone, order);
946 	if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
947 		return COMPACT_SKIPPED;
948 
949 	if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark,
950 	    0, 0))
951 		return COMPACT_PARTIAL;
952 
953 	return COMPACT_CONTINUE;
954 }
955 
956 static int compact_zone(struct zone *zone, struct compact_control *cc)
957 {
958 	int ret;
959 	unsigned long start_pfn = zone->zone_start_pfn;
960 	unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages;
961 
962 	ret = compaction_suitable(zone, cc->order);
963 	switch (ret) {
964 	case COMPACT_PARTIAL:
965 	case COMPACT_SKIPPED:
966 		/* Compaction is likely to fail */
967 		return ret;
968 	case COMPACT_CONTINUE:
969 		/* Fall through to compaction */
970 		;
971 	}
972 
973 	/*
974 	 * Setup to move all movable pages to the end of the zone. Used cached
975 	 * information on where the scanners should start but check that it
976 	 * is initialised by ensuring the values are within zone boundaries.
977 	 */
978 	cc->migrate_pfn = zone->compact_cached_migrate_pfn;
979 	cc->free_pfn = zone->compact_cached_free_pfn;
980 	if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
981 		cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
982 		zone->compact_cached_free_pfn = cc->free_pfn;
983 	}
984 	if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
985 		cc->migrate_pfn = start_pfn;
986 		zone->compact_cached_migrate_pfn = cc->migrate_pfn;
987 	}
988 
989 	/*
990 	 * Clear pageblock skip if there were failures recently and compaction
991 	 * is about to be retried after being deferred. kswapd does not do
992 	 * this reset as it'll reset the cached information when going to sleep.
993 	 */
994 	if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
995 		__reset_isolation_suitable(zone);
996 
997 	migrate_prep_local();
998 
999 	while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
1000 		unsigned long nr_migrate, nr_remaining;
1001 		int err;
1002 
1003 		switch (isolate_migratepages(zone, cc)) {
1004 		case ISOLATE_ABORT:
1005 			ret = COMPACT_PARTIAL;
1006 			putback_movable_pages(&cc->migratepages);
1007 			cc->nr_migratepages = 0;
1008 			goto out;
1009 		case ISOLATE_NONE:
1010 			continue;
1011 		case ISOLATE_SUCCESS:
1012 			;
1013 		}
1014 
1015 		nr_migrate = cc->nr_migratepages;
1016 		err = migrate_pages(&cc->migratepages, compaction_alloc,
1017 				(unsigned long)cc, false,
1018 				cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC);
1019 		update_nr_listpages(cc);
1020 		nr_remaining = cc->nr_migratepages;
1021 
1022 		count_vm_event(COMPACTBLOCKS);
1023 		count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining);
1024 		if (nr_remaining)
1025 			count_vm_events(COMPACTPAGEFAILED, nr_remaining);
1026 		trace_mm_compaction_migratepages(nr_migrate - nr_remaining,
1027 						nr_remaining);
1028 
1029 		/* Release isolated pages not migrated */
1030 		if (err) {
1031 			putback_movable_pages(&cc->migratepages);
1032 			cc->nr_migratepages = 0;
1033 			if (err == -ENOMEM) {
1034 				ret = COMPACT_PARTIAL;
1035 				goto out;
1036 			}
1037 		}
1038 
1039 		/* Capture a page now if it is a suitable size */
1040 		compact_capture_page(cc);
1041 	}
1042 
1043 out:
1044 	/* Release free pages and check accounting */
1045 	cc->nr_freepages -= release_freepages(&cc->freepages);
1046 	VM_BUG_ON(cc->nr_freepages != 0);
1047 
1048 	return ret;
1049 }
1050 
1051 static unsigned long compact_zone_order(struct zone *zone,
1052 				 int order, gfp_t gfp_mask,
1053 				 bool sync, bool *contended,
1054 				 struct page **page)
1055 {
1056 	unsigned long ret;
1057 	struct compact_control cc = {
1058 		.nr_freepages = 0,
1059 		.nr_migratepages = 0,
1060 		.order = order,
1061 		.migratetype = allocflags_to_migratetype(gfp_mask),
1062 		.zone = zone,
1063 		.sync = sync,
1064 		.page = page,
1065 	};
1066 	INIT_LIST_HEAD(&cc.freepages);
1067 	INIT_LIST_HEAD(&cc.migratepages);
1068 
1069 	ret = compact_zone(zone, &cc);
1070 
1071 	VM_BUG_ON(!list_empty(&cc.freepages));
1072 	VM_BUG_ON(!list_empty(&cc.migratepages));
1073 
1074 	*contended = cc.contended;
1075 	return ret;
1076 }
1077 
1078 int sysctl_extfrag_threshold = 500;
1079 
1080 /**
1081  * try_to_compact_pages - Direct compact to satisfy a high-order allocation
1082  * @zonelist: The zonelist used for the current allocation
1083  * @order: The order of the current allocation
1084  * @gfp_mask: The GFP mask of the current allocation
1085  * @nodemask: The allowed nodes to allocate from
1086  * @sync: Whether migration is synchronous or not
1087  * @contended: Return value that is true if compaction was aborted due to lock contention
1088  * @page: Optionally capture a free page of the requested order during compaction
1089  *
1090  * This is the main entry point for direct page compaction.
1091  */
1092 unsigned long try_to_compact_pages(struct zonelist *zonelist,
1093 			int order, gfp_t gfp_mask, nodemask_t *nodemask,
1094 			bool sync, bool *contended, struct page **page)
1095 {
1096 	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
1097 	int may_enter_fs = gfp_mask & __GFP_FS;
1098 	int may_perform_io = gfp_mask & __GFP_IO;
1099 	struct zoneref *z;
1100 	struct zone *zone;
1101 	int rc = COMPACT_SKIPPED;
1102 	int alloc_flags = 0;
1103 
1104 	/* Check if the GFP flags allow compaction */
1105 	if (!order || !may_enter_fs || !may_perform_io)
1106 		return rc;
1107 
1108 	count_vm_event(COMPACTSTALL);
1109 
1110 #ifdef CONFIG_CMA
1111 	if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
1112 		alloc_flags |= ALLOC_CMA;
1113 #endif
1114 	/* Compact each zone in the list */
1115 	for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
1116 								nodemask) {
1117 		int status;
1118 
1119 		status = compact_zone_order(zone, order, gfp_mask, sync,
1120 						contended, page);
1121 		rc = max(status, rc);
1122 
1123 		/* If a normal allocation would succeed, stop compacting */
1124 		if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0,
1125 				      alloc_flags))
1126 			break;
1127 	}
1128 
1129 	return rc;
1130 }
1131 
1132 
1133 /* Compact all zones within a node */
1134 static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
1135 {
1136 	int zoneid;
1137 	struct zone *zone;
1138 
1139 	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
1140 
1141 		zone = &pgdat->node_zones[zoneid];
1142 		if (!populated_zone(zone))
1143 			continue;
1144 
1145 		cc->nr_freepages = 0;
1146 		cc->nr_migratepages = 0;
1147 		cc->zone = zone;
1148 		INIT_LIST_HEAD(&cc->freepages);
1149 		INIT_LIST_HEAD(&cc->migratepages);
1150 
1151 		if (cc->order == -1 || !compaction_deferred(zone, cc->order))
1152 			compact_zone(zone, cc);
1153 
1154 		if (cc->order > 0) {
1155 			int ok = zone_watermark_ok(zone, cc->order,
1156 						low_wmark_pages(zone), 0, 0);
1157 			if (ok && cc->order >= zone->compact_order_failed)
1158 				zone->compact_order_failed = cc->order + 1;
1159 			/* Currently async compaction is never deferred. */
1160 			else if (!ok && cc->sync)
1161 				defer_compaction(zone, cc->order);
1162 		}
1163 
1164 		VM_BUG_ON(!list_empty(&cc->freepages));
1165 		VM_BUG_ON(!list_empty(&cc->migratepages));
1166 	}
1167 
1168 	return 0;
1169 }
1170 
1171 int compact_pgdat(pg_data_t *pgdat, int order)
1172 {
1173 	struct compact_control cc = {
1174 		.order = order,
1175 		.sync = false,
1176 		.page = NULL,
1177 	};
1178 
1179 	return __compact_pgdat(pgdat, &cc);
1180 }
1181 
1182 static int compact_node(int nid)
1183 {
1184 	struct compact_control cc = {
1185 		.order = -1,
1186 		.sync = true,
1187 		.page = NULL,
1188 	};
1189 
1190 	return __compact_pgdat(NODE_DATA(nid), &cc);
1191 }
1192 
1193 /* Compact all nodes in the system */
1194 static int compact_nodes(void)
1195 {
1196 	int nid;
1197 
1198 	/* Flush pending updates to the LRU lists */
1199 	lru_add_drain_all();
1200 
1201 	for_each_online_node(nid)
1202 		compact_node(nid);
1203 
1204 	return COMPACT_COMPLETE;
1205 }
1206 
1207 /* The written value is actually unused, all memory is compacted */
1208 int sysctl_compact_memory;
1209 
1210 /* This is the entry point for compacting all nodes via /proc/sys/vm */
1211 int sysctl_compaction_handler(struct ctl_table *table, int write,
1212 			void __user *buffer, size_t *length, loff_t *ppos)
1213 {
1214 	if (write)
1215 		return compact_nodes();
1216 
1217 	return 0;
1218 }
1219 
1220 int sysctl_extfrag_handler(struct ctl_table *table, int write,
1221 			void __user *buffer, size_t *length, loff_t *ppos)
1222 {
1223 	proc_dointvec_minmax(table, write, buffer, length, ppos);
1224 
1225 	return 0;
1226 }
1227 
1228 #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
1229 ssize_t sysfs_compact_node(struct device *dev,
1230 			struct device_attribute *attr,
1231 			const char *buf, size_t count)
1232 {
1233 	int nid = dev->id;
1234 
1235 	if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
1236 		/* Flush pending updates to the LRU lists */
1237 		lru_add_drain_all();
1238 
1239 		compact_node(nid);
1240 	}
1241 
1242 	return count;
1243 }
1244 static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
1245 
1246 int compaction_register_node(struct node *node)
1247 {
1248 	return device_create_file(&node->dev, &dev_attr_compact);
1249 }
1250 
1251 void compaction_unregister_node(struct node *node)
1252 {
1253 	return device_remove_file(&node->dev, &dev_attr_compact);
1254 }
1255 #endif /* CONFIG_SYSFS && CONFIG_NUMA */
1256 
1257 #endif /* CONFIG_COMPACTION */
1258