xref: /linux/mm/compaction.c (revision 100c85421b52e41269ada88f7d71a6b8a06c7a11)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/mm/compaction.c
4  *
5  * Memory compaction for the reduction of external fragmentation. Note that
6  * this heavily depends upon page migration to do all the real heavy
7  * lifting
8  *
9  * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
10  */
11 #include <linux/cpu.h>
12 #include <linux/swap.h>
13 #include <linux/migrate.h>
14 #include <linux/compaction.h>
15 #include <linux/mm_inline.h>
16 #include <linux/sched/signal.h>
17 #include <linux/backing-dev.h>
18 #include <linux/sysctl.h>
19 #include <linux/sysfs.h>
20 #include <linux/page-isolation.h>
21 #include <linux/kasan.h>
22 #include <linux/kthread.h>
23 #include <linux/freezer.h>
24 #include <linux/page_owner.h>
25 #include <linux/psi.h>
26 #include "internal.h"
27 
28 #ifdef CONFIG_COMPACTION
29 /*
30  * Fragmentation score check interval for proactive compaction purposes.
31  */
32 #define HPAGE_FRAG_CHECK_INTERVAL_MSEC	(500)
33 
34 static inline void count_compact_event(enum vm_event_item item)
35 {
36 	count_vm_event(item);
37 }
38 
39 static inline void count_compact_events(enum vm_event_item item, long delta)
40 {
41 	count_vm_events(item, delta);
42 }
43 
44 /*
45  * order == -1 is expected when compacting proactively via
46  * 1. /proc/sys/vm/compact_memory
47  * 2. /sys/devices/system/node/nodex/compact
48  * 3. /proc/sys/vm/compaction_proactiveness
49  */
50 static inline bool is_via_compact_memory(int order)
51 {
52 	return order == -1;
53 }
54 
55 #else
56 #define count_compact_event(item) do { } while (0)
57 #define count_compact_events(item, delta) do { } while (0)
58 static inline bool is_via_compact_memory(int order) { return false; }
59 #endif
60 
61 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
62 
63 #define CREATE_TRACE_POINTS
64 #include <trace/events/compaction.h>
65 
66 #define block_start_pfn(pfn, order)	round_down(pfn, 1UL << (order))
67 #define block_end_pfn(pfn, order)	ALIGN((pfn) + 1, 1UL << (order))
68 
69 /*
70  * Page order with-respect-to which proactive compaction
71  * calculates external fragmentation, which is used as
72  * the "fragmentation score" of a node/zone.
73  */
74 #if defined CONFIG_TRANSPARENT_HUGEPAGE
75 #define COMPACTION_HPAGE_ORDER	HPAGE_PMD_ORDER
76 #elif defined CONFIG_HUGETLBFS
77 #define COMPACTION_HPAGE_ORDER	HUGETLB_PAGE_ORDER
78 #else
79 #define COMPACTION_HPAGE_ORDER	(PMD_SHIFT - PAGE_SHIFT)
80 #endif
81 
82 static void split_map_pages(struct list_head *freepages)
83 {
84 	unsigned int i, order;
85 	struct page *page, *next;
86 	LIST_HEAD(tmp_list);
87 
88 	for (order = 0; order < NR_PAGE_ORDERS; order++) {
89 		list_for_each_entry_safe(page, next, &freepages[order], lru) {
90 			unsigned int nr_pages;
91 
92 			list_del(&page->lru);
93 
94 			nr_pages = 1 << order;
95 
96 			post_alloc_hook(page, order, __GFP_MOVABLE);
97 			if (order)
98 				split_page(page, order);
99 
100 			for (i = 0; i < nr_pages; i++) {
101 				list_add(&page->lru, &tmp_list);
102 				page++;
103 			}
104 		}
105 		list_splice_init(&tmp_list, &freepages[0]);
106 	}
107 }
108 
109 static unsigned long release_free_list(struct list_head *freepages)
110 {
111 	int order;
112 	unsigned long high_pfn = 0;
113 
114 	for (order = 0; order < NR_PAGE_ORDERS; order++) {
115 		struct page *page, *next;
116 
117 		list_for_each_entry_safe(page, next, &freepages[order], lru) {
118 			unsigned long pfn = page_to_pfn(page);
119 
120 			list_del(&page->lru);
121 			/*
122 			 * Convert free pages into post allocation pages, so
123 			 * that we can free them via __free_page.
124 			 */
125 			post_alloc_hook(page, order, __GFP_MOVABLE);
126 			__free_pages(page, order);
127 			if (pfn > high_pfn)
128 				high_pfn = pfn;
129 		}
130 	}
131 	return high_pfn;
132 }
133 
134 #ifdef CONFIG_COMPACTION
135 bool PageMovable(struct page *page)
136 {
137 	const struct movable_operations *mops;
138 
139 	VM_BUG_ON_PAGE(!PageLocked(page), page);
140 	if (!__PageMovable(page))
141 		return false;
142 
143 	mops = page_movable_ops(page);
144 	if (mops)
145 		return true;
146 
147 	return false;
148 }
149 
150 void __SetPageMovable(struct page *page, const struct movable_operations *mops)
151 {
152 	VM_BUG_ON_PAGE(!PageLocked(page), page);
153 	VM_BUG_ON_PAGE((unsigned long)mops & PAGE_MAPPING_MOVABLE, page);
154 	page->mapping = (void *)((unsigned long)mops | PAGE_MAPPING_MOVABLE);
155 }
156 EXPORT_SYMBOL(__SetPageMovable);
157 
158 void __ClearPageMovable(struct page *page)
159 {
160 	VM_BUG_ON_PAGE(!PageMovable(page), page);
161 	/*
162 	 * This page still has the type of a movable page, but it's
163 	 * actually not movable any more.
164 	 */
165 	page->mapping = (void *)PAGE_MAPPING_MOVABLE;
166 }
167 EXPORT_SYMBOL(__ClearPageMovable);
168 
169 /* Do not skip compaction more than 64 times */
170 #define COMPACT_MAX_DEFER_SHIFT 6
171 
172 /*
173  * Compaction is deferred when compaction fails to result in a page
174  * allocation success. 1 << compact_defer_shift, compactions are skipped up
175  * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
176  */
177 static void defer_compaction(struct zone *zone, int order)
178 {
179 	zone->compact_considered = 0;
180 	zone->compact_defer_shift++;
181 
182 	if (order < zone->compact_order_failed)
183 		zone->compact_order_failed = order;
184 
185 	if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
186 		zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
187 
188 	trace_mm_compaction_defer_compaction(zone, order);
189 }
190 
191 /* Returns true if compaction should be skipped this time */
192 static bool compaction_deferred(struct zone *zone, int order)
193 {
194 	unsigned long defer_limit = 1UL << zone->compact_defer_shift;
195 
196 	if (order < zone->compact_order_failed)
197 		return false;
198 
199 	/* Avoid possible overflow */
200 	if (++zone->compact_considered >= defer_limit) {
201 		zone->compact_considered = defer_limit;
202 		return false;
203 	}
204 
205 	trace_mm_compaction_deferred(zone, order);
206 
207 	return true;
208 }
209 
210 /*
211  * Update defer tracking counters after successful compaction of given order,
212  * which means an allocation either succeeded (alloc_success == true) or is
213  * expected to succeed.
214  */
215 void compaction_defer_reset(struct zone *zone, int order,
216 		bool alloc_success)
217 {
218 	if (alloc_success) {
219 		zone->compact_considered = 0;
220 		zone->compact_defer_shift = 0;
221 	}
222 	if (order >= zone->compact_order_failed)
223 		zone->compact_order_failed = order + 1;
224 
225 	trace_mm_compaction_defer_reset(zone, order);
226 }
227 
228 /* Returns true if restarting compaction after many failures */
229 static bool compaction_restarting(struct zone *zone, int order)
230 {
231 	if (order < zone->compact_order_failed)
232 		return false;
233 
234 	return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
235 		zone->compact_considered >= 1UL << zone->compact_defer_shift;
236 }
237 
238 /* Returns true if the pageblock should be scanned for pages to isolate. */
239 static inline bool isolation_suitable(struct compact_control *cc,
240 					struct page *page)
241 {
242 	if (cc->ignore_skip_hint)
243 		return true;
244 
245 	return !get_pageblock_skip(page);
246 }
247 
248 static void reset_cached_positions(struct zone *zone)
249 {
250 	zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
251 	zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
252 	zone->compact_cached_free_pfn =
253 				pageblock_start_pfn(zone_end_pfn(zone) - 1);
254 }
255 
256 #ifdef CONFIG_SPARSEMEM
257 /*
258  * If the PFN falls into an offline section, return the start PFN of the
259  * next online section. If the PFN falls into an online section or if
260  * there is no next online section, return 0.
261  */
262 static unsigned long skip_offline_sections(unsigned long start_pfn)
263 {
264 	unsigned long start_nr = pfn_to_section_nr(start_pfn);
265 
266 	if (online_section_nr(start_nr))
267 		return 0;
268 
269 	while (++start_nr <= __highest_present_section_nr) {
270 		if (online_section_nr(start_nr))
271 			return section_nr_to_pfn(start_nr);
272 	}
273 
274 	return 0;
275 }
276 
277 /*
278  * If the PFN falls into an offline section, return the end PFN of the
279  * next online section in reverse. If the PFN falls into an online section
280  * or if there is no next online section in reverse, return 0.
281  */
282 static unsigned long skip_offline_sections_reverse(unsigned long start_pfn)
283 {
284 	unsigned long start_nr = pfn_to_section_nr(start_pfn);
285 
286 	if (!start_nr || online_section_nr(start_nr))
287 		return 0;
288 
289 	while (start_nr-- > 0) {
290 		if (online_section_nr(start_nr))
291 			return section_nr_to_pfn(start_nr) + PAGES_PER_SECTION;
292 	}
293 
294 	return 0;
295 }
296 #else
297 static unsigned long skip_offline_sections(unsigned long start_pfn)
298 {
299 	return 0;
300 }
301 
302 static unsigned long skip_offline_sections_reverse(unsigned long start_pfn)
303 {
304 	return 0;
305 }
306 #endif
307 
308 /*
309  * Compound pages of >= pageblock_order should consistently be skipped until
310  * released. It is always pointless to compact pages of such order (if they are
311  * migratable), and the pageblocks they occupy cannot contain any free pages.
312  */
313 static bool pageblock_skip_persistent(struct page *page)
314 {
315 	if (!PageCompound(page))
316 		return false;
317 
318 	page = compound_head(page);
319 
320 	if (compound_order(page) >= pageblock_order)
321 		return true;
322 
323 	return false;
324 }
325 
326 static bool
327 __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
328 							bool check_target)
329 {
330 	struct page *page = pfn_to_online_page(pfn);
331 	struct page *block_page;
332 	struct page *end_page;
333 	unsigned long block_pfn;
334 
335 	if (!page)
336 		return false;
337 	if (zone != page_zone(page))
338 		return false;
339 	if (pageblock_skip_persistent(page))
340 		return false;
341 
342 	/*
343 	 * If skip is already cleared do no further checking once the
344 	 * restart points have been set.
345 	 */
346 	if (check_source && check_target && !get_pageblock_skip(page))
347 		return true;
348 
349 	/*
350 	 * If clearing skip for the target scanner, do not select a
351 	 * non-movable pageblock as the starting point.
352 	 */
353 	if (!check_source && check_target &&
354 	    get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
355 		return false;
356 
357 	/* Ensure the start of the pageblock or zone is online and valid */
358 	block_pfn = pageblock_start_pfn(pfn);
359 	block_pfn = max(block_pfn, zone->zone_start_pfn);
360 	block_page = pfn_to_online_page(block_pfn);
361 	if (block_page) {
362 		page = block_page;
363 		pfn = block_pfn;
364 	}
365 
366 	/* Ensure the end of the pageblock or zone is online and valid */
367 	block_pfn = pageblock_end_pfn(pfn) - 1;
368 	block_pfn = min(block_pfn, zone_end_pfn(zone) - 1);
369 	end_page = pfn_to_online_page(block_pfn);
370 	if (!end_page)
371 		return false;
372 
373 	/*
374 	 * Only clear the hint if a sample indicates there is either a
375 	 * free page or an LRU page in the block. One or other condition
376 	 * is necessary for the block to be a migration source/target.
377 	 */
378 	do {
379 		if (check_source && PageLRU(page)) {
380 			clear_pageblock_skip(page);
381 			return true;
382 		}
383 
384 		if (check_target && PageBuddy(page)) {
385 			clear_pageblock_skip(page);
386 			return true;
387 		}
388 
389 		page += (1 << PAGE_ALLOC_COSTLY_ORDER);
390 	} while (page <= end_page);
391 
392 	return false;
393 }
394 
395 /*
396  * This function is called to clear all cached information on pageblocks that
397  * should be skipped for page isolation when the migrate and free page scanner
398  * meet.
399  */
400 static void __reset_isolation_suitable(struct zone *zone)
401 {
402 	unsigned long migrate_pfn = zone->zone_start_pfn;
403 	unsigned long free_pfn = zone_end_pfn(zone) - 1;
404 	unsigned long reset_migrate = free_pfn;
405 	unsigned long reset_free = migrate_pfn;
406 	bool source_set = false;
407 	bool free_set = false;
408 
409 	/* Only flush if a full compaction finished recently */
410 	if (!zone->compact_blockskip_flush)
411 		return;
412 
413 	zone->compact_blockskip_flush = false;
414 
415 	/*
416 	 * Walk the zone and update pageblock skip information. Source looks
417 	 * for PageLRU while target looks for PageBuddy. When the scanner
418 	 * is found, both PageBuddy and PageLRU are checked as the pageblock
419 	 * is suitable as both source and target.
420 	 */
421 	for (; migrate_pfn < free_pfn; migrate_pfn += pageblock_nr_pages,
422 					free_pfn -= pageblock_nr_pages) {
423 		cond_resched();
424 
425 		/* Update the migrate PFN */
426 		if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) &&
427 		    migrate_pfn < reset_migrate) {
428 			source_set = true;
429 			reset_migrate = migrate_pfn;
430 			zone->compact_init_migrate_pfn = reset_migrate;
431 			zone->compact_cached_migrate_pfn[0] = reset_migrate;
432 			zone->compact_cached_migrate_pfn[1] = reset_migrate;
433 		}
434 
435 		/* Update the free PFN */
436 		if (__reset_isolation_pfn(zone, free_pfn, free_set, true) &&
437 		    free_pfn > reset_free) {
438 			free_set = true;
439 			reset_free = free_pfn;
440 			zone->compact_init_free_pfn = reset_free;
441 			zone->compact_cached_free_pfn = reset_free;
442 		}
443 	}
444 
445 	/* Leave no distance if no suitable block was reset */
446 	if (reset_migrate >= reset_free) {
447 		zone->compact_cached_migrate_pfn[0] = migrate_pfn;
448 		zone->compact_cached_migrate_pfn[1] = migrate_pfn;
449 		zone->compact_cached_free_pfn = free_pfn;
450 	}
451 }
452 
453 void reset_isolation_suitable(pg_data_t *pgdat)
454 {
455 	int zoneid;
456 
457 	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
458 		struct zone *zone = &pgdat->node_zones[zoneid];
459 		if (!populated_zone(zone))
460 			continue;
461 
462 		__reset_isolation_suitable(zone);
463 	}
464 }
465 
466 /*
467  * Sets the pageblock skip bit if it was clear. Note that this is a hint as
468  * locks are not required for read/writers. Returns true if it was already set.
469  */
470 static bool test_and_set_skip(struct compact_control *cc, struct page *page)
471 {
472 	bool skip;
473 
474 	/* Do not update if skip hint is being ignored */
475 	if (cc->ignore_skip_hint)
476 		return false;
477 
478 	skip = get_pageblock_skip(page);
479 	if (!skip && !cc->no_set_skip_hint)
480 		set_pageblock_skip(page);
481 
482 	return skip;
483 }
484 
485 static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
486 {
487 	struct zone *zone = cc->zone;
488 
489 	/* Set for isolation rather than compaction */
490 	if (cc->no_set_skip_hint)
491 		return;
492 
493 	pfn = pageblock_end_pfn(pfn);
494 
495 	/* Update where async and sync compaction should restart */
496 	if (pfn > zone->compact_cached_migrate_pfn[0])
497 		zone->compact_cached_migrate_pfn[0] = pfn;
498 	if (cc->mode != MIGRATE_ASYNC &&
499 	    pfn > zone->compact_cached_migrate_pfn[1])
500 		zone->compact_cached_migrate_pfn[1] = pfn;
501 }
502 
503 /*
504  * If no pages were isolated then mark this pageblock to be skipped in the
505  * future. The information is later cleared by __reset_isolation_suitable().
506  */
507 static void update_pageblock_skip(struct compact_control *cc,
508 			struct page *page, unsigned long pfn)
509 {
510 	struct zone *zone = cc->zone;
511 
512 	if (cc->no_set_skip_hint)
513 		return;
514 
515 	set_pageblock_skip(page);
516 
517 	if (pfn < zone->compact_cached_free_pfn)
518 		zone->compact_cached_free_pfn = pfn;
519 }
520 #else
521 static inline bool isolation_suitable(struct compact_control *cc,
522 					struct page *page)
523 {
524 	return true;
525 }
526 
527 static inline bool pageblock_skip_persistent(struct page *page)
528 {
529 	return false;
530 }
531 
532 static inline void update_pageblock_skip(struct compact_control *cc,
533 			struct page *page, unsigned long pfn)
534 {
535 }
536 
537 static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
538 {
539 }
540 
541 static bool test_and_set_skip(struct compact_control *cc, struct page *page)
542 {
543 	return false;
544 }
545 #endif /* CONFIG_COMPACTION */
546 
547 /*
548  * Compaction requires the taking of some coarse locks that are potentially
549  * very heavily contended. For async compaction, trylock and record if the
550  * lock is contended. The lock will still be acquired but compaction will
551  * abort when the current block is finished regardless of success rate.
552  * Sync compaction acquires the lock.
553  *
554  * Always returns true which makes it easier to track lock state in callers.
555  */
556 static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
557 						struct compact_control *cc)
558 	__acquires(lock)
559 {
560 	/* Track if the lock is contended in async mode */
561 	if (cc->mode == MIGRATE_ASYNC && !cc->contended) {
562 		if (spin_trylock_irqsave(lock, *flags))
563 			return true;
564 
565 		cc->contended = true;
566 	}
567 
568 	spin_lock_irqsave(lock, *flags);
569 	return true;
570 }
571 
572 /*
573  * Compaction requires the taking of some coarse locks that are potentially
574  * very heavily contended. The lock should be periodically unlocked to avoid
575  * having disabled IRQs for a long time, even when there is nobody waiting on
576  * the lock. It might also be that allowing the IRQs will result in
577  * need_resched() becoming true. If scheduling is needed, compaction schedules.
578  * Either compaction type will also abort if a fatal signal is pending.
579  * In either case if the lock was locked, it is dropped and not regained.
580  *
581  * Returns true if compaction should abort due to fatal signal pending.
582  * Returns false when compaction can continue.
583  */
584 static bool compact_unlock_should_abort(spinlock_t *lock,
585 		unsigned long flags, bool *locked, struct compact_control *cc)
586 {
587 	if (*locked) {
588 		spin_unlock_irqrestore(lock, flags);
589 		*locked = false;
590 	}
591 
592 	if (fatal_signal_pending(current)) {
593 		cc->contended = true;
594 		return true;
595 	}
596 
597 	cond_resched();
598 
599 	return false;
600 }
601 
602 /*
603  * Isolate free pages onto a private freelist. If @strict is true, will abort
604  * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
605  * (even though it may still end up isolating some pages).
606  */
607 static unsigned long isolate_freepages_block(struct compact_control *cc,
608 				unsigned long *start_pfn,
609 				unsigned long end_pfn,
610 				struct list_head *freelist,
611 				unsigned int stride,
612 				bool strict)
613 {
614 	int nr_scanned = 0, total_isolated = 0;
615 	struct page *page;
616 	unsigned long flags = 0;
617 	bool locked = false;
618 	unsigned long blockpfn = *start_pfn;
619 	unsigned int order;
620 
621 	/* Strict mode is for isolation, speed is secondary */
622 	if (strict)
623 		stride = 1;
624 
625 	page = pfn_to_page(blockpfn);
626 
627 	/* Isolate free pages. */
628 	for (; blockpfn < end_pfn; blockpfn += stride, page += stride) {
629 		int isolated;
630 
631 		/*
632 		 * Periodically drop the lock (if held) regardless of its
633 		 * contention, to give chance to IRQs. Abort if fatal signal
634 		 * pending.
635 		 */
636 		if (!(blockpfn % COMPACT_CLUSTER_MAX)
637 		    && compact_unlock_should_abort(&cc->zone->lock, flags,
638 								&locked, cc))
639 			break;
640 
641 		nr_scanned++;
642 
643 		/*
644 		 * For compound pages such as THP and hugetlbfs, we can save
645 		 * potentially a lot of iterations if we skip them at once.
646 		 * The check is racy, but we can consider only valid values
647 		 * and the only danger is skipping too much.
648 		 */
649 		if (PageCompound(page)) {
650 			const unsigned int order = compound_order(page);
651 
652 			if (blockpfn + (1UL << order) <= end_pfn) {
653 				blockpfn += (1UL << order) - 1;
654 				page += (1UL << order) - 1;
655 				nr_scanned += (1UL << order) - 1;
656 			}
657 
658 			goto isolate_fail;
659 		}
660 
661 		if (!PageBuddy(page))
662 			goto isolate_fail;
663 
664 		/* If we already hold the lock, we can skip some rechecking. */
665 		if (!locked) {
666 			locked = compact_lock_irqsave(&cc->zone->lock,
667 								&flags, cc);
668 
669 			/* Recheck this is a buddy page under lock */
670 			if (!PageBuddy(page))
671 				goto isolate_fail;
672 		}
673 
674 		/* Found a free page, will break it into order-0 pages */
675 		order = buddy_order(page);
676 		isolated = __isolate_free_page(page, order);
677 		if (!isolated)
678 			break;
679 		set_page_private(page, order);
680 
681 		nr_scanned += isolated - 1;
682 		total_isolated += isolated;
683 		cc->nr_freepages += isolated;
684 		list_add_tail(&page->lru, &freelist[order]);
685 
686 		if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
687 			blockpfn += isolated;
688 			break;
689 		}
690 		/* Advance to the end of split page */
691 		blockpfn += isolated - 1;
692 		page += isolated - 1;
693 		continue;
694 
695 isolate_fail:
696 		if (strict)
697 			break;
698 
699 	}
700 
701 	if (locked)
702 		spin_unlock_irqrestore(&cc->zone->lock, flags);
703 
704 	/*
705 	 * Be careful to not go outside of the pageblock.
706 	 */
707 	if (unlikely(blockpfn > end_pfn))
708 		blockpfn = end_pfn;
709 
710 	trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
711 					nr_scanned, total_isolated);
712 
713 	/* Record how far we have got within the block */
714 	*start_pfn = blockpfn;
715 
716 	/*
717 	 * If strict isolation is requested by CMA then check that all the
718 	 * pages requested were isolated. If there were any failures, 0 is
719 	 * returned and CMA will fail.
720 	 */
721 	if (strict && blockpfn < end_pfn)
722 		total_isolated = 0;
723 
724 	cc->total_free_scanned += nr_scanned;
725 	if (total_isolated)
726 		count_compact_events(COMPACTISOLATED, total_isolated);
727 	return total_isolated;
728 }
729 
730 /**
731  * isolate_freepages_range() - isolate free pages.
732  * @cc:        Compaction control structure.
733  * @start_pfn: The first PFN to start isolating.
734  * @end_pfn:   The one-past-last PFN.
735  *
736  * Non-free pages, invalid PFNs, or zone boundaries within the
737  * [start_pfn, end_pfn) range are considered errors, cause function to
738  * undo its actions and return zero.
739  *
740  * Otherwise, function returns one-past-the-last PFN of isolated page
741  * (which may be greater then end_pfn if end fell in a middle of
742  * a free page).
743  */
744 unsigned long
745 isolate_freepages_range(struct compact_control *cc,
746 			unsigned long start_pfn, unsigned long end_pfn)
747 {
748 	unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
749 	int order;
750 	struct list_head tmp_freepages[NR_PAGE_ORDERS];
751 
752 	for (order = 0; order < NR_PAGE_ORDERS; order++)
753 		INIT_LIST_HEAD(&tmp_freepages[order]);
754 
755 	pfn = start_pfn;
756 	block_start_pfn = pageblock_start_pfn(pfn);
757 	if (block_start_pfn < cc->zone->zone_start_pfn)
758 		block_start_pfn = cc->zone->zone_start_pfn;
759 	block_end_pfn = pageblock_end_pfn(pfn);
760 
761 	for (; pfn < end_pfn; pfn += isolated,
762 				block_start_pfn = block_end_pfn,
763 				block_end_pfn += pageblock_nr_pages) {
764 		/* Protect pfn from changing by isolate_freepages_block */
765 		unsigned long isolate_start_pfn = pfn;
766 
767 		/*
768 		 * pfn could pass the block_end_pfn if isolated freepage
769 		 * is more than pageblock order. In this case, we adjust
770 		 * scanning range to right one.
771 		 */
772 		if (pfn >= block_end_pfn) {
773 			block_start_pfn = pageblock_start_pfn(pfn);
774 			block_end_pfn = pageblock_end_pfn(pfn);
775 		}
776 
777 		block_end_pfn = min(block_end_pfn, end_pfn);
778 
779 		if (!pageblock_pfn_to_page(block_start_pfn,
780 					block_end_pfn, cc->zone))
781 			break;
782 
783 		isolated = isolate_freepages_block(cc, &isolate_start_pfn,
784 					block_end_pfn, tmp_freepages, 0, true);
785 
786 		/*
787 		 * In strict mode, isolate_freepages_block() returns 0 if
788 		 * there are any holes in the block (ie. invalid PFNs or
789 		 * non-free pages).
790 		 */
791 		if (!isolated)
792 			break;
793 
794 		/*
795 		 * If we managed to isolate pages, it is always (1 << n) *
796 		 * pageblock_nr_pages for some non-negative n.  (Max order
797 		 * page may span two pageblocks).
798 		 */
799 	}
800 
801 	if (pfn < end_pfn) {
802 		/* Loop terminated early, cleanup. */
803 		release_free_list(tmp_freepages);
804 		return 0;
805 	}
806 
807 	/* __isolate_free_page() does not map the pages */
808 	split_map_pages(tmp_freepages);
809 
810 	/* We don't use freelists for anything. */
811 	return pfn;
812 }
813 
814 /* Similar to reclaim, but different enough that they don't share logic */
815 static bool too_many_isolated(struct compact_control *cc)
816 {
817 	pg_data_t *pgdat = cc->zone->zone_pgdat;
818 	bool too_many;
819 
820 	unsigned long active, inactive, isolated;
821 
822 	inactive = node_page_state(pgdat, NR_INACTIVE_FILE) +
823 			node_page_state(pgdat, NR_INACTIVE_ANON);
824 	active = node_page_state(pgdat, NR_ACTIVE_FILE) +
825 			node_page_state(pgdat, NR_ACTIVE_ANON);
826 	isolated = node_page_state(pgdat, NR_ISOLATED_FILE) +
827 			node_page_state(pgdat, NR_ISOLATED_ANON);
828 
829 	/*
830 	 * Allow GFP_NOFS to isolate past the limit set for regular
831 	 * compaction runs. This prevents an ABBA deadlock when other
832 	 * compactors have already isolated to the limit, but are
833 	 * blocked on filesystem locks held by the GFP_NOFS thread.
834 	 */
835 	if (cc->gfp_mask & __GFP_FS) {
836 		inactive >>= 3;
837 		active >>= 3;
838 	}
839 
840 	too_many = isolated > (inactive + active) / 2;
841 	if (!too_many)
842 		wake_throttle_isolated(pgdat);
843 
844 	return too_many;
845 }
846 
847 /**
848  * skip_isolation_on_order() - determine when to skip folio isolation based on
849  *			       folio order and compaction target order
850  * @order:		to-be-isolated folio order
851  * @target_order:	compaction target order
852  *
853  * This avoids unnecessary folio isolations during compaction.
854  */
855 static bool skip_isolation_on_order(int order, int target_order)
856 {
857 	/*
858 	 * Unless we are performing global compaction (i.e.,
859 	 * is_via_compact_memory), skip any folios that are larger than the
860 	 * target order: we wouldn't be here if we'd have a free folio with
861 	 * the desired target_order, so migrating this folio would likely fail
862 	 * later.
863 	 */
864 	if (!is_via_compact_memory(target_order) && order >= target_order)
865 		return true;
866 	/*
867 	 * We limit memory compaction to pageblocks and won't try
868 	 * creating free blocks of memory that are larger than that.
869 	 */
870 	return order >= pageblock_order;
871 }
872 
873 /**
874  * isolate_migratepages_block() - isolate all migrate-able pages within
875  *				  a single pageblock
876  * @cc:		Compaction control structure.
877  * @low_pfn:	The first PFN to isolate
878  * @end_pfn:	The one-past-the-last PFN to isolate, within same pageblock
879  * @mode:	Isolation mode to be used.
880  *
881  * Isolate all pages that can be migrated from the range specified by
882  * [low_pfn, end_pfn). The range is expected to be within same pageblock.
883  * Returns errno, like -EAGAIN or -EINTR in case e.g signal pending or congestion,
884  * -ENOMEM in case we could not allocate a page, or 0.
885  * cc->migrate_pfn will contain the next pfn to scan.
886  *
887  * The pages are isolated on cc->migratepages list (not required to be empty),
888  * and cc->nr_migratepages is updated accordingly.
889  */
890 static int
891 isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
892 			unsigned long end_pfn, isolate_mode_t mode)
893 {
894 	pg_data_t *pgdat = cc->zone->zone_pgdat;
895 	unsigned long nr_scanned = 0, nr_isolated = 0;
896 	struct lruvec *lruvec;
897 	unsigned long flags = 0;
898 	struct lruvec *locked = NULL;
899 	struct folio *folio = NULL;
900 	struct page *page = NULL, *valid_page = NULL;
901 	struct address_space *mapping;
902 	unsigned long start_pfn = low_pfn;
903 	bool skip_on_failure = false;
904 	unsigned long next_skip_pfn = 0;
905 	bool skip_updated = false;
906 	int ret = 0;
907 
908 	cc->migrate_pfn = low_pfn;
909 
910 	/*
911 	 * Ensure that there are not too many pages isolated from the LRU
912 	 * list by either parallel reclaimers or compaction. If there are,
913 	 * delay for some time until fewer pages are isolated
914 	 */
915 	while (unlikely(too_many_isolated(cc))) {
916 		/* stop isolation if there are still pages not migrated */
917 		if (cc->nr_migratepages)
918 			return -EAGAIN;
919 
920 		/* async migration should just abort */
921 		if (cc->mode == MIGRATE_ASYNC)
922 			return -EAGAIN;
923 
924 		reclaim_throttle(pgdat, VMSCAN_THROTTLE_ISOLATED);
925 
926 		if (fatal_signal_pending(current))
927 			return -EINTR;
928 	}
929 
930 	cond_resched();
931 
932 	if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
933 		skip_on_failure = true;
934 		next_skip_pfn = block_end_pfn(low_pfn, cc->order);
935 	}
936 
937 	/* Time to isolate some pages for migration */
938 	for (; low_pfn < end_pfn; low_pfn++) {
939 		bool is_dirty, is_unevictable;
940 
941 		if (skip_on_failure && low_pfn >= next_skip_pfn) {
942 			/*
943 			 * We have isolated all migration candidates in the
944 			 * previous order-aligned block, and did not skip it due
945 			 * to failure. We should migrate the pages now and
946 			 * hopefully succeed compaction.
947 			 */
948 			if (nr_isolated)
949 				break;
950 
951 			/*
952 			 * We failed to isolate in the previous order-aligned
953 			 * block. Set the new boundary to the end of the
954 			 * current block. Note we can't simply increase
955 			 * next_skip_pfn by 1 << order, as low_pfn might have
956 			 * been incremented by a higher number due to skipping
957 			 * a compound or a high-order buddy page in the
958 			 * previous loop iteration.
959 			 */
960 			next_skip_pfn = block_end_pfn(low_pfn, cc->order);
961 		}
962 
963 		/*
964 		 * Periodically drop the lock (if held) regardless of its
965 		 * contention, to give chance to IRQs. Abort completely if
966 		 * a fatal signal is pending.
967 		 */
968 		if (!(low_pfn % COMPACT_CLUSTER_MAX)) {
969 			if (locked) {
970 				unlock_page_lruvec_irqrestore(locked, flags);
971 				locked = NULL;
972 			}
973 
974 			if (fatal_signal_pending(current)) {
975 				cc->contended = true;
976 				ret = -EINTR;
977 
978 				goto fatal_pending;
979 			}
980 
981 			cond_resched();
982 		}
983 
984 		nr_scanned++;
985 
986 		page = pfn_to_page(low_pfn);
987 
988 		/*
989 		 * Check if the pageblock has already been marked skipped.
990 		 * Only the first PFN is checked as the caller isolates
991 		 * COMPACT_CLUSTER_MAX at a time so the second call must
992 		 * not falsely conclude that the block should be skipped.
993 		 */
994 		if (!valid_page && (pageblock_aligned(low_pfn) ||
995 				    low_pfn == cc->zone->zone_start_pfn)) {
996 			if (!isolation_suitable(cc, page)) {
997 				low_pfn = end_pfn;
998 				folio = NULL;
999 				goto isolate_abort;
1000 			}
1001 			valid_page = page;
1002 		}
1003 
1004 		if (PageHuge(page)) {
1005 			/*
1006 			 * skip hugetlbfs if we are not compacting for pages
1007 			 * bigger than its order. THPs and other compound pages
1008 			 * are handled below.
1009 			 */
1010 			if (!cc->alloc_contig) {
1011 				const unsigned int order = compound_order(page);
1012 
1013 				if (order <= MAX_PAGE_ORDER) {
1014 					low_pfn += (1UL << order) - 1;
1015 					nr_scanned += (1UL << order) - 1;
1016 				}
1017 				goto isolate_fail;
1018 			}
1019 			/* for alloc_contig case */
1020 			if (locked) {
1021 				unlock_page_lruvec_irqrestore(locked, flags);
1022 				locked = NULL;
1023 			}
1024 
1025 			ret = isolate_or_dissolve_huge_page(page, &cc->migratepages);
1026 
1027 			/*
1028 			 * Fail isolation in case isolate_or_dissolve_huge_page()
1029 			 * reports an error. In case of -ENOMEM, abort right away.
1030 			 */
1031 			if (ret < 0) {
1032 				 /* Do not report -EBUSY down the chain */
1033 				if (ret == -EBUSY)
1034 					ret = 0;
1035 				low_pfn += compound_nr(page) - 1;
1036 				nr_scanned += compound_nr(page) - 1;
1037 				goto isolate_fail;
1038 			}
1039 
1040 			if (PageHuge(page)) {
1041 				/*
1042 				 * Hugepage was successfully isolated and placed
1043 				 * on the cc->migratepages list.
1044 				 */
1045 				folio = page_folio(page);
1046 				low_pfn += folio_nr_pages(folio) - 1;
1047 				goto isolate_success_no_list;
1048 			}
1049 
1050 			/*
1051 			 * Ok, the hugepage was dissolved. Now these pages are
1052 			 * Buddy and cannot be re-allocated because they are
1053 			 * isolated. Fall-through as the check below handles
1054 			 * Buddy pages.
1055 			 */
1056 		}
1057 
1058 		/*
1059 		 * Skip if free. We read page order here without zone lock
1060 		 * which is generally unsafe, but the race window is small and
1061 		 * the worst thing that can happen is that we skip some
1062 		 * potential isolation targets.
1063 		 */
1064 		if (PageBuddy(page)) {
1065 			unsigned long freepage_order = buddy_order_unsafe(page);
1066 
1067 			/*
1068 			 * Without lock, we cannot be sure that what we got is
1069 			 * a valid page order. Consider only values in the
1070 			 * valid order range to prevent low_pfn overflow.
1071 			 */
1072 			if (freepage_order > 0 && freepage_order <= MAX_PAGE_ORDER) {
1073 				low_pfn += (1UL << freepage_order) - 1;
1074 				nr_scanned += (1UL << freepage_order) - 1;
1075 			}
1076 			continue;
1077 		}
1078 
1079 		/*
1080 		 * Regardless of being on LRU, compound pages such as THP
1081 		 * (hugetlbfs is handled above) are not to be compacted unless
1082 		 * we are attempting an allocation larger than the compound
1083 		 * page size. We can potentially save a lot of iterations if we
1084 		 * skip them at once. The check is racy, but we can consider
1085 		 * only valid values and the only danger is skipping too much.
1086 		 */
1087 		if (PageCompound(page) && !cc->alloc_contig) {
1088 			const unsigned int order = compound_order(page);
1089 
1090 			/* Skip based on page order and compaction target order. */
1091 			if (skip_isolation_on_order(order, cc->order)) {
1092 				if (order <= MAX_PAGE_ORDER) {
1093 					low_pfn += (1UL << order) - 1;
1094 					nr_scanned += (1UL << order) - 1;
1095 				}
1096 				goto isolate_fail;
1097 			}
1098 		}
1099 
1100 		/*
1101 		 * Check may be lockless but that's ok as we recheck later.
1102 		 * It's possible to migrate LRU and non-lru movable pages.
1103 		 * Skip any other type of page
1104 		 */
1105 		if (!PageLRU(page)) {
1106 			/*
1107 			 * __PageMovable can return false positive so we need
1108 			 * to verify it under page_lock.
1109 			 */
1110 			if (unlikely(__PageMovable(page)) &&
1111 					!PageIsolated(page)) {
1112 				if (locked) {
1113 					unlock_page_lruvec_irqrestore(locked, flags);
1114 					locked = NULL;
1115 				}
1116 
1117 				if (isolate_movable_page(page, mode)) {
1118 					folio = page_folio(page);
1119 					goto isolate_success;
1120 				}
1121 			}
1122 
1123 			goto isolate_fail;
1124 		}
1125 
1126 		/*
1127 		 * Be careful not to clear PageLRU until after we're
1128 		 * sure the page is not being freed elsewhere -- the
1129 		 * page release code relies on it.
1130 		 */
1131 		folio = folio_get_nontail_page(page);
1132 		if (unlikely(!folio))
1133 			goto isolate_fail;
1134 
1135 		/*
1136 		 * Migration will fail if an anonymous page is pinned in memory,
1137 		 * so avoid taking lru_lock and isolating it unnecessarily in an
1138 		 * admittedly racy check.
1139 		 */
1140 		mapping = folio_mapping(folio);
1141 		if (!mapping && (folio_ref_count(folio) - 1) > folio_mapcount(folio))
1142 			goto isolate_fail_put;
1143 
1144 		/*
1145 		 * Only allow to migrate anonymous pages in GFP_NOFS context
1146 		 * because those do not depend on fs locks.
1147 		 */
1148 		if (!(cc->gfp_mask & __GFP_FS) && mapping)
1149 			goto isolate_fail_put;
1150 
1151 		/* Only take pages on LRU: a check now makes later tests safe */
1152 		if (!folio_test_lru(folio))
1153 			goto isolate_fail_put;
1154 
1155 		is_unevictable = folio_test_unevictable(folio);
1156 
1157 		/* Compaction might skip unevictable pages but CMA takes them */
1158 		if (!(mode & ISOLATE_UNEVICTABLE) && is_unevictable)
1159 			goto isolate_fail_put;
1160 
1161 		/*
1162 		 * To minimise LRU disruption, the caller can indicate with
1163 		 * ISOLATE_ASYNC_MIGRATE that it only wants to isolate pages
1164 		 * it will be able to migrate without blocking - clean pages
1165 		 * for the most part.  PageWriteback would require blocking.
1166 		 */
1167 		if ((mode & ISOLATE_ASYNC_MIGRATE) && folio_test_writeback(folio))
1168 			goto isolate_fail_put;
1169 
1170 		is_dirty = folio_test_dirty(folio);
1171 
1172 		if (((mode & ISOLATE_ASYNC_MIGRATE) && is_dirty) ||
1173 		    (mapping && is_unevictable)) {
1174 			bool migrate_dirty = true;
1175 			bool is_unmovable;
1176 
1177 			/*
1178 			 * Only folios without mappings or that have
1179 			 * a ->migrate_folio callback are possible to migrate
1180 			 * without blocking.
1181 			 *
1182 			 * Folios from unmovable mappings are not migratable.
1183 			 *
1184 			 * However, we can be racing with truncation, which can
1185 			 * free the mapping that we need to check. Truncation
1186 			 * holds the folio lock until after the folio is removed
1187 			 * from the page so holding it ourselves is sufficient.
1188 			 *
1189 			 * To avoid locking the folio just to check unmovable,
1190 			 * assume every unmovable folio is also unevictable,
1191 			 * which is a cheaper test.  If our assumption goes
1192 			 * wrong, it's not a correctness bug, just potentially
1193 			 * wasted cycles.
1194 			 */
1195 			if (!folio_trylock(folio))
1196 				goto isolate_fail_put;
1197 
1198 			mapping = folio_mapping(folio);
1199 			if ((mode & ISOLATE_ASYNC_MIGRATE) && is_dirty) {
1200 				migrate_dirty = !mapping ||
1201 						mapping->a_ops->migrate_folio;
1202 			}
1203 			is_unmovable = mapping && mapping_unmovable(mapping);
1204 			folio_unlock(folio);
1205 			if (!migrate_dirty || is_unmovable)
1206 				goto isolate_fail_put;
1207 		}
1208 
1209 		/* Try isolate the folio */
1210 		if (!folio_test_clear_lru(folio))
1211 			goto isolate_fail_put;
1212 
1213 		lruvec = folio_lruvec(folio);
1214 
1215 		/* If we already hold the lock, we can skip some rechecking */
1216 		if (lruvec != locked) {
1217 			if (locked)
1218 				unlock_page_lruvec_irqrestore(locked, flags);
1219 
1220 			compact_lock_irqsave(&lruvec->lru_lock, &flags, cc);
1221 			locked = lruvec;
1222 
1223 			lruvec_memcg_debug(lruvec, folio);
1224 
1225 			/*
1226 			 * Try get exclusive access under lock. If marked for
1227 			 * skip, the scan is aborted unless the current context
1228 			 * is a rescan to reach the end of the pageblock.
1229 			 */
1230 			if (!skip_updated && valid_page) {
1231 				skip_updated = true;
1232 				if (test_and_set_skip(cc, valid_page) &&
1233 				    !cc->finish_pageblock) {
1234 					low_pfn = end_pfn;
1235 					goto isolate_abort;
1236 				}
1237 			}
1238 
1239 			/*
1240 			 * Check LRU folio order under the lock
1241 			 */
1242 			if (unlikely(skip_isolation_on_order(folio_order(folio),
1243 							     cc->order) &&
1244 				     !cc->alloc_contig)) {
1245 				low_pfn += folio_nr_pages(folio) - 1;
1246 				nr_scanned += folio_nr_pages(folio) - 1;
1247 				folio_set_lru(folio);
1248 				goto isolate_fail_put;
1249 			}
1250 		}
1251 
1252 		/* The folio is taken off the LRU */
1253 		if (folio_test_large(folio))
1254 			low_pfn += folio_nr_pages(folio) - 1;
1255 
1256 		/* Successfully isolated */
1257 		lruvec_del_folio(lruvec, folio);
1258 		node_stat_mod_folio(folio,
1259 				NR_ISOLATED_ANON + folio_is_file_lru(folio),
1260 				folio_nr_pages(folio));
1261 
1262 isolate_success:
1263 		list_add(&folio->lru, &cc->migratepages);
1264 isolate_success_no_list:
1265 		cc->nr_migratepages += folio_nr_pages(folio);
1266 		nr_isolated += folio_nr_pages(folio);
1267 		nr_scanned += folio_nr_pages(folio) - 1;
1268 
1269 		/*
1270 		 * Avoid isolating too much unless this block is being
1271 		 * fully scanned (e.g. dirty/writeback pages, parallel allocation)
1272 		 * or a lock is contended. For contention, isolate quickly to
1273 		 * potentially remove one source of contention.
1274 		 */
1275 		if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX &&
1276 		    !cc->finish_pageblock && !cc->contended) {
1277 			++low_pfn;
1278 			break;
1279 		}
1280 
1281 		continue;
1282 
1283 isolate_fail_put:
1284 		/* Avoid potential deadlock in freeing page under lru_lock */
1285 		if (locked) {
1286 			unlock_page_lruvec_irqrestore(locked, flags);
1287 			locked = NULL;
1288 		}
1289 		folio_put(folio);
1290 
1291 isolate_fail:
1292 		if (!skip_on_failure && ret != -ENOMEM)
1293 			continue;
1294 
1295 		/*
1296 		 * We have isolated some pages, but then failed. Release them
1297 		 * instead of migrating, as we cannot form the cc->order buddy
1298 		 * page anyway.
1299 		 */
1300 		if (nr_isolated) {
1301 			if (locked) {
1302 				unlock_page_lruvec_irqrestore(locked, flags);
1303 				locked = NULL;
1304 			}
1305 			putback_movable_pages(&cc->migratepages);
1306 			cc->nr_migratepages = 0;
1307 			nr_isolated = 0;
1308 		}
1309 
1310 		if (low_pfn < next_skip_pfn) {
1311 			low_pfn = next_skip_pfn - 1;
1312 			/*
1313 			 * The check near the loop beginning would have updated
1314 			 * next_skip_pfn too, but this is a bit simpler.
1315 			 */
1316 			next_skip_pfn += 1UL << cc->order;
1317 		}
1318 
1319 		if (ret == -ENOMEM)
1320 			break;
1321 	}
1322 
1323 	/*
1324 	 * The PageBuddy() check could have potentially brought us outside
1325 	 * the range to be scanned.
1326 	 */
1327 	if (unlikely(low_pfn > end_pfn))
1328 		low_pfn = end_pfn;
1329 
1330 	folio = NULL;
1331 
1332 isolate_abort:
1333 	if (locked)
1334 		unlock_page_lruvec_irqrestore(locked, flags);
1335 	if (folio) {
1336 		folio_set_lru(folio);
1337 		folio_put(folio);
1338 	}
1339 
1340 	/*
1341 	 * Update the cached scanner pfn once the pageblock has been scanned.
1342 	 * Pages will either be migrated in which case there is no point
1343 	 * scanning in the near future or migration failed in which case the
1344 	 * failure reason may persist. The block is marked for skipping if
1345 	 * there were no pages isolated in the block or if the block is
1346 	 * rescanned twice in a row.
1347 	 */
1348 	if (low_pfn == end_pfn && (!nr_isolated || cc->finish_pageblock)) {
1349 		if (!cc->no_set_skip_hint && valid_page && !skip_updated)
1350 			set_pageblock_skip(valid_page);
1351 		update_cached_migrate(cc, low_pfn);
1352 	}
1353 
1354 	trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
1355 						nr_scanned, nr_isolated);
1356 
1357 fatal_pending:
1358 	cc->total_migrate_scanned += nr_scanned;
1359 	if (nr_isolated)
1360 		count_compact_events(COMPACTISOLATED, nr_isolated);
1361 
1362 	cc->migrate_pfn = low_pfn;
1363 
1364 	return ret;
1365 }
1366 
1367 /**
1368  * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
1369  * @cc:        Compaction control structure.
1370  * @start_pfn: The first PFN to start isolating.
1371  * @end_pfn:   The one-past-last PFN.
1372  *
1373  * Returns -EAGAIN when contented, -EINTR in case of a signal pending, -ENOMEM
1374  * in case we could not allocate a page, or 0.
1375  */
1376 int
1377 isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
1378 							unsigned long end_pfn)
1379 {
1380 	unsigned long pfn, block_start_pfn, block_end_pfn;
1381 	int ret = 0;
1382 
1383 	/* Scan block by block. First and last block may be incomplete */
1384 	pfn = start_pfn;
1385 	block_start_pfn = pageblock_start_pfn(pfn);
1386 	if (block_start_pfn < cc->zone->zone_start_pfn)
1387 		block_start_pfn = cc->zone->zone_start_pfn;
1388 	block_end_pfn = pageblock_end_pfn(pfn);
1389 
1390 	for (; pfn < end_pfn; pfn = block_end_pfn,
1391 				block_start_pfn = block_end_pfn,
1392 				block_end_pfn += pageblock_nr_pages) {
1393 
1394 		block_end_pfn = min(block_end_pfn, end_pfn);
1395 
1396 		if (!pageblock_pfn_to_page(block_start_pfn,
1397 					block_end_pfn, cc->zone))
1398 			continue;
1399 
1400 		ret = isolate_migratepages_block(cc, pfn, block_end_pfn,
1401 						 ISOLATE_UNEVICTABLE);
1402 
1403 		if (ret)
1404 			break;
1405 
1406 		if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX)
1407 			break;
1408 	}
1409 
1410 	return ret;
1411 }
1412 
1413 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
1414 #ifdef CONFIG_COMPACTION
1415 
1416 static bool suitable_migration_source(struct compact_control *cc,
1417 							struct page *page)
1418 {
1419 	int block_mt;
1420 
1421 	if (pageblock_skip_persistent(page))
1422 		return false;
1423 
1424 	if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction)
1425 		return true;
1426 
1427 	block_mt = get_pageblock_migratetype(page);
1428 
1429 	if (cc->migratetype == MIGRATE_MOVABLE)
1430 		return is_migrate_movable(block_mt);
1431 	else
1432 		return block_mt == cc->migratetype;
1433 }
1434 
1435 /* Returns true if the page is within a block suitable for migration to */
1436 static bool suitable_migration_target(struct compact_control *cc,
1437 							struct page *page)
1438 {
1439 	/* If the page is a large free page, then disallow migration */
1440 	if (PageBuddy(page)) {
1441 		int order = cc->order > 0 ? cc->order : pageblock_order;
1442 
1443 		/*
1444 		 * We are checking page_order without zone->lock taken. But
1445 		 * the only small danger is that we skip a potentially suitable
1446 		 * pageblock, so it's not worth to check order for valid range.
1447 		 */
1448 		if (buddy_order_unsafe(page) >= order)
1449 			return false;
1450 	}
1451 
1452 	if (cc->ignore_block_suitable)
1453 		return true;
1454 
1455 	/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
1456 	if (is_migrate_movable(get_pageblock_migratetype(page)))
1457 		return true;
1458 
1459 	/* Otherwise skip the block */
1460 	return false;
1461 }
1462 
1463 static inline unsigned int
1464 freelist_scan_limit(struct compact_control *cc)
1465 {
1466 	unsigned short shift = BITS_PER_LONG - 1;
1467 
1468 	return (COMPACT_CLUSTER_MAX >> min(shift, cc->fast_search_fail)) + 1;
1469 }
1470 
1471 /*
1472  * Test whether the free scanner has reached the same or lower pageblock than
1473  * the migration scanner, and compaction should thus terminate.
1474  */
1475 static inline bool compact_scanners_met(struct compact_control *cc)
1476 {
1477 	return (cc->free_pfn >> pageblock_order)
1478 		<= (cc->migrate_pfn >> pageblock_order);
1479 }
1480 
1481 /*
1482  * Used when scanning for a suitable migration target which scans freelists
1483  * in reverse. Reorders the list such as the unscanned pages are scanned
1484  * first on the next iteration of the free scanner
1485  */
1486 static void
1487 move_freelist_head(struct list_head *freelist, struct page *freepage)
1488 {
1489 	LIST_HEAD(sublist);
1490 
1491 	if (!list_is_first(&freepage->buddy_list, freelist)) {
1492 		list_cut_before(&sublist, freelist, &freepage->buddy_list);
1493 		list_splice_tail(&sublist, freelist);
1494 	}
1495 }
1496 
1497 /*
1498  * Similar to move_freelist_head except used by the migration scanner
1499  * when scanning forward. It's possible for these list operations to
1500  * move against each other if they search the free list exactly in
1501  * lockstep.
1502  */
1503 static void
1504 move_freelist_tail(struct list_head *freelist, struct page *freepage)
1505 {
1506 	LIST_HEAD(sublist);
1507 
1508 	if (!list_is_last(&freepage->buddy_list, freelist)) {
1509 		list_cut_position(&sublist, freelist, &freepage->buddy_list);
1510 		list_splice_tail(&sublist, freelist);
1511 	}
1512 }
1513 
1514 static void
1515 fast_isolate_around(struct compact_control *cc, unsigned long pfn)
1516 {
1517 	unsigned long start_pfn, end_pfn;
1518 	struct page *page;
1519 
1520 	/* Do not search around if there are enough pages already */
1521 	if (cc->nr_freepages >= cc->nr_migratepages)
1522 		return;
1523 
1524 	/* Minimise scanning during async compaction */
1525 	if (cc->direct_compaction && cc->mode == MIGRATE_ASYNC)
1526 		return;
1527 
1528 	/* Pageblock boundaries */
1529 	start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn);
1530 	end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone));
1531 
1532 	page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone);
1533 	if (!page)
1534 		return;
1535 
1536 	isolate_freepages_block(cc, &start_pfn, end_pfn, cc->freepages, 1, false);
1537 
1538 	/* Skip this pageblock in the future as it's full or nearly full */
1539 	if (start_pfn == end_pfn && !cc->no_set_skip_hint)
1540 		set_pageblock_skip(page);
1541 }
1542 
1543 /* Search orders in round-robin fashion */
1544 static int next_search_order(struct compact_control *cc, int order)
1545 {
1546 	order--;
1547 	if (order < 0)
1548 		order = cc->order - 1;
1549 
1550 	/* Search wrapped around? */
1551 	if (order == cc->search_order) {
1552 		cc->search_order--;
1553 		if (cc->search_order < 0)
1554 			cc->search_order = cc->order - 1;
1555 		return -1;
1556 	}
1557 
1558 	return order;
1559 }
1560 
1561 static void fast_isolate_freepages(struct compact_control *cc)
1562 {
1563 	unsigned int limit = max(1U, freelist_scan_limit(cc) >> 1);
1564 	unsigned int nr_scanned = 0, total_isolated = 0;
1565 	unsigned long low_pfn, min_pfn, highest = 0;
1566 	unsigned long nr_isolated = 0;
1567 	unsigned long distance;
1568 	struct page *page = NULL;
1569 	bool scan_start = false;
1570 	int order;
1571 
1572 	/* Full compaction passes in a negative order */
1573 	if (cc->order <= 0)
1574 		return;
1575 
1576 	/*
1577 	 * If starting the scan, use a deeper search and use the highest
1578 	 * PFN found if a suitable one is not found.
1579 	 */
1580 	if (cc->free_pfn >= cc->zone->compact_init_free_pfn) {
1581 		limit = pageblock_nr_pages >> 1;
1582 		scan_start = true;
1583 	}
1584 
1585 	/*
1586 	 * Preferred point is in the top quarter of the scan space but take
1587 	 * a pfn from the top half if the search is problematic.
1588 	 */
1589 	distance = (cc->free_pfn - cc->migrate_pfn);
1590 	low_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 2));
1591 	min_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 1));
1592 
1593 	if (WARN_ON_ONCE(min_pfn > low_pfn))
1594 		low_pfn = min_pfn;
1595 
1596 	/*
1597 	 * Search starts from the last successful isolation order or the next
1598 	 * order to search after a previous failure
1599 	 */
1600 	cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order);
1601 
1602 	for (order = cc->search_order;
1603 	     !page && order >= 0;
1604 	     order = next_search_order(cc, order)) {
1605 		struct free_area *area = &cc->zone->free_area[order];
1606 		struct list_head *freelist;
1607 		struct page *freepage;
1608 		unsigned long flags;
1609 		unsigned int order_scanned = 0;
1610 		unsigned long high_pfn = 0;
1611 
1612 		if (!area->nr_free)
1613 			continue;
1614 
1615 		spin_lock_irqsave(&cc->zone->lock, flags);
1616 		freelist = &area->free_list[MIGRATE_MOVABLE];
1617 		list_for_each_entry_reverse(freepage, freelist, buddy_list) {
1618 			unsigned long pfn;
1619 
1620 			order_scanned++;
1621 			nr_scanned++;
1622 			pfn = page_to_pfn(freepage);
1623 
1624 			if (pfn >= highest)
1625 				highest = max(pageblock_start_pfn(pfn),
1626 					      cc->zone->zone_start_pfn);
1627 
1628 			if (pfn >= low_pfn) {
1629 				cc->fast_search_fail = 0;
1630 				cc->search_order = order;
1631 				page = freepage;
1632 				break;
1633 			}
1634 
1635 			if (pfn >= min_pfn && pfn > high_pfn) {
1636 				high_pfn = pfn;
1637 
1638 				/* Shorten the scan if a candidate is found */
1639 				limit >>= 1;
1640 			}
1641 
1642 			if (order_scanned >= limit)
1643 				break;
1644 		}
1645 
1646 		/* Use a maximum candidate pfn if a preferred one was not found */
1647 		if (!page && high_pfn) {
1648 			page = pfn_to_page(high_pfn);
1649 
1650 			/* Update freepage for the list reorder below */
1651 			freepage = page;
1652 		}
1653 
1654 		/* Reorder to so a future search skips recent pages */
1655 		move_freelist_head(freelist, freepage);
1656 
1657 		/* Isolate the page if available */
1658 		if (page) {
1659 			if (__isolate_free_page(page, order)) {
1660 				set_page_private(page, order);
1661 				nr_isolated = 1 << order;
1662 				nr_scanned += nr_isolated - 1;
1663 				total_isolated += nr_isolated;
1664 				cc->nr_freepages += nr_isolated;
1665 				list_add_tail(&page->lru, &cc->freepages[order]);
1666 				count_compact_events(COMPACTISOLATED, nr_isolated);
1667 			} else {
1668 				/* If isolation fails, abort the search */
1669 				order = cc->search_order + 1;
1670 				page = NULL;
1671 			}
1672 		}
1673 
1674 		spin_unlock_irqrestore(&cc->zone->lock, flags);
1675 
1676 		/* Skip fast search if enough freepages isolated */
1677 		if (cc->nr_freepages >= cc->nr_migratepages)
1678 			break;
1679 
1680 		/*
1681 		 * Smaller scan on next order so the total scan is related
1682 		 * to freelist_scan_limit.
1683 		 */
1684 		if (order_scanned >= limit)
1685 			limit = max(1U, limit >> 1);
1686 	}
1687 
1688 	trace_mm_compaction_fast_isolate_freepages(min_pfn, cc->free_pfn,
1689 						   nr_scanned, total_isolated);
1690 
1691 	if (!page) {
1692 		cc->fast_search_fail++;
1693 		if (scan_start) {
1694 			/*
1695 			 * Use the highest PFN found above min. If one was
1696 			 * not found, be pessimistic for direct compaction
1697 			 * and use the min mark.
1698 			 */
1699 			if (highest >= min_pfn) {
1700 				page = pfn_to_page(highest);
1701 				cc->free_pfn = highest;
1702 			} else {
1703 				if (cc->direct_compaction && pfn_valid(min_pfn)) {
1704 					page = pageblock_pfn_to_page(min_pfn,
1705 						min(pageblock_end_pfn(min_pfn),
1706 						    zone_end_pfn(cc->zone)),
1707 						cc->zone);
1708 					if (page && !suitable_migration_target(cc, page))
1709 						page = NULL;
1710 
1711 					cc->free_pfn = min_pfn;
1712 				}
1713 			}
1714 		}
1715 	}
1716 
1717 	if (highest && highest >= cc->zone->compact_cached_free_pfn) {
1718 		highest -= pageblock_nr_pages;
1719 		cc->zone->compact_cached_free_pfn = highest;
1720 	}
1721 
1722 	cc->total_free_scanned += nr_scanned;
1723 	if (!page)
1724 		return;
1725 
1726 	low_pfn = page_to_pfn(page);
1727 	fast_isolate_around(cc, low_pfn);
1728 }
1729 
1730 /*
1731  * Based on information in the current compact_control, find blocks
1732  * suitable for isolating free pages from and then isolate them.
1733  */
1734 static void isolate_freepages(struct compact_control *cc)
1735 {
1736 	struct zone *zone = cc->zone;
1737 	struct page *page;
1738 	unsigned long block_start_pfn;	/* start of current pageblock */
1739 	unsigned long isolate_start_pfn; /* exact pfn we start at */
1740 	unsigned long block_end_pfn;	/* end of current pageblock */
1741 	unsigned long low_pfn;	     /* lowest pfn scanner is able to scan */
1742 	unsigned int stride;
1743 
1744 	/* Try a small search of the free lists for a candidate */
1745 	fast_isolate_freepages(cc);
1746 	if (cc->nr_freepages)
1747 		return;
1748 
1749 	/*
1750 	 * Initialise the free scanner. The starting point is where we last
1751 	 * successfully isolated from, zone-cached value, or the end of the
1752 	 * zone when isolating for the first time. For looping we also need
1753 	 * this pfn aligned down to the pageblock boundary, because we do
1754 	 * block_start_pfn -= pageblock_nr_pages in the for loop.
1755 	 * For ending point, take care when isolating in last pageblock of a
1756 	 * zone which ends in the middle of a pageblock.
1757 	 * The low boundary is the end of the pageblock the migration scanner
1758 	 * is using.
1759 	 */
1760 	isolate_start_pfn = cc->free_pfn;
1761 	block_start_pfn = pageblock_start_pfn(isolate_start_pfn);
1762 	block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
1763 						zone_end_pfn(zone));
1764 	low_pfn = pageblock_end_pfn(cc->migrate_pfn);
1765 	stride = cc->mode == MIGRATE_ASYNC ? COMPACT_CLUSTER_MAX : 1;
1766 
1767 	/*
1768 	 * Isolate free pages until enough are available to migrate the
1769 	 * pages on cc->migratepages. We stop searching if the migrate
1770 	 * and free page scanners meet or enough free pages are isolated.
1771 	 */
1772 	for (; block_start_pfn >= low_pfn;
1773 				block_end_pfn = block_start_pfn,
1774 				block_start_pfn -= pageblock_nr_pages,
1775 				isolate_start_pfn = block_start_pfn) {
1776 		unsigned long nr_isolated;
1777 
1778 		/*
1779 		 * This can iterate a massively long zone without finding any
1780 		 * suitable migration targets, so periodically check resched.
1781 		 */
1782 		if (!(block_start_pfn % (COMPACT_CLUSTER_MAX * pageblock_nr_pages)))
1783 			cond_resched();
1784 
1785 		page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1786 									zone);
1787 		if (!page) {
1788 			unsigned long next_pfn;
1789 
1790 			next_pfn = skip_offline_sections_reverse(block_start_pfn);
1791 			if (next_pfn)
1792 				block_start_pfn = max(next_pfn, low_pfn);
1793 
1794 			continue;
1795 		}
1796 
1797 		/* Check the block is suitable for migration */
1798 		if (!suitable_migration_target(cc, page))
1799 			continue;
1800 
1801 		/* If isolation recently failed, do not retry */
1802 		if (!isolation_suitable(cc, page))
1803 			continue;
1804 
1805 		/* Found a block suitable for isolating free pages from. */
1806 		nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn,
1807 					block_end_pfn, cc->freepages, stride, false);
1808 
1809 		/* Update the skip hint if the full pageblock was scanned */
1810 		if (isolate_start_pfn == block_end_pfn)
1811 			update_pageblock_skip(cc, page, block_start_pfn -
1812 					      pageblock_nr_pages);
1813 
1814 		/* Are enough freepages isolated? */
1815 		if (cc->nr_freepages >= cc->nr_migratepages) {
1816 			if (isolate_start_pfn >= block_end_pfn) {
1817 				/*
1818 				 * Restart at previous pageblock if more
1819 				 * freepages can be isolated next time.
1820 				 */
1821 				isolate_start_pfn =
1822 					block_start_pfn - pageblock_nr_pages;
1823 			}
1824 			break;
1825 		} else if (isolate_start_pfn < block_end_pfn) {
1826 			/*
1827 			 * If isolation failed early, do not continue
1828 			 * needlessly.
1829 			 */
1830 			break;
1831 		}
1832 
1833 		/* Adjust stride depending on isolation */
1834 		if (nr_isolated) {
1835 			stride = 1;
1836 			continue;
1837 		}
1838 		stride = min_t(unsigned int, COMPACT_CLUSTER_MAX, stride << 1);
1839 	}
1840 
1841 	/*
1842 	 * Record where the free scanner will restart next time. Either we
1843 	 * broke from the loop and set isolate_start_pfn based on the last
1844 	 * call to isolate_freepages_block(), or we met the migration scanner
1845 	 * and the loop terminated due to isolate_start_pfn < low_pfn
1846 	 */
1847 	cc->free_pfn = isolate_start_pfn;
1848 }
1849 
1850 /*
1851  * This is a migrate-callback that "allocates" freepages by taking pages
1852  * from the isolated freelists in the block we are migrating to.
1853  */
1854 static struct folio *compaction_alloc(struct folio *src, unsigned long data)
1855 {
1856 	struct compact_control *cc = (struct compact_control *)data;
1857 	struct folio *dst;
1858 	int order = folio_order(src);
1859 	bool has_isolated_pages = false;
1860 	int start_order;
1861 	struct page *freepage;
1862 	unsigned long size;
1863 
1864 again:
1865 	for (start_order = order; start_order < NR_PAGE_ORDERS; start_order++)
1866 		if (!list_empty(&cc->freepages[start_order]))
1867 			break;
1868 
1869 	/* no free pages in the list */
1870 	if (start_order == NR_PAGE_ORDERS) {
1871 		if (has_isolated_pages)
1872 			return NULL;
1873 		isolate_freepages(cc);
1874 		has_isolated_pages = true;
1875 		goto again;
1876 	}
1877 
1878 	freepage = list_first_entry(&cc->freepages[start_order], struct page,
1879 				lru);
1880 	size = 1 << start_order;
1881 
1882 	list_del(&freepage->lru);
1883 
1884 	while (start_order > order) {
1885 		start_order--;
1886 		size >>= 1;
1887 
1888 		list_add(&freepage[size].lru, &cc->freepages[start_order]);
1889 		set_page_private(&freepage[size], start_order);
1890 	}
1891 	dst = (struct folio *)freepage;
1892 
1893 	post_alloc_hook(&dst->page, order, __GFP_MOVABLE);
1894 	if (order)
1895 		prep_compound_page(&dst->page, order);
1896 	cc->nr_freepages -= 1 << order;
1897 	cc->nr_migratepages -= 1 << order;
1898 	return page_rmappable_folio(&dst->page);
1899 }
1900 
1901 /*
1902  * This is a migrate-callback that "frees" freepages back to the isolated
1903  * freelist.  All pages on the freelist are from the same zone, so there is no
1904  * special handling needed for NUMA.
1905  */
1906 static void compaction_free(struct folio *dst, unsigned long data)
1907 {
1908 	struct compact_control *cc = (struct compact_control *)data;
1909 	int order = folio_order(dst);
1910 	struct page *page = &dst->page;
1911 
1912 	if (folio_put_testzero(dst)) {
1913 		free_pages_prepare(page, order);
1914 		list_add(&dst->lru, &cc->freepages[order]);
1915 		cc->nr_freepages += 1 << order;
1916 	}
1917 	cc->nr_migratepages += 1 << order;
1918 	/*
1919 	 * someone else has referenced the page, we cannot take it back to our
1920 	 * free list.
1921 	 */
1922 }
1923 
1924 /* possible outcome of isolate_migratepages */
1925 typedef enum {
1926 	ISOLATE_ABORT,		/* Abort compaction now */
1927 	ISOLATE_NONE,		/* No pages isolated, continue scanning */
1928 	ISOLATE_SUCCESS,	/* Pages isolated, migrate */
1929 } isolate_migrate_t;
1930 
1931 /*
1932  * Allow userspace to control policy on scanning the unevictable LRU for
1933  * compactable pages.
1934  */
1935 static int sysctl_compact_unevictable_allowed __read_mostly = CONFIG_COMPACT_UNEVICTABLE_DEFAULT;
1936 /*
1937  * Tunable for proactive compaction. It determines how
1938  * aggressively the kernel should compact memory in the
1939  * background. It takes values in the range [0, 100].
1940  */
1941 static unsigned int __read_mostly sysctl_compaction_proactiveness = 20;
1942 static int sysctl_extfrag_threshold = 500;
1943 static int __read_mostly sysctl_compact_memory;
1944 
1945 static inline void
1946 update_fast_start_pfn(struct compact_control *cc, unsigned long pfn)
1947 {
1948 	if (cc->fast_start_pfn == ULONG_MAX)
1949 		return;
1950 
1951 	if (!cc->fast_start_pfn)
1952 		cc->fast_start_pfn = pfn;
1953 
1954 	cc->fast_start_pfn = min(cc->fast_start_pfn, pfn);
1955 }
1956 
1957 static inline unsigned long
1958 reinit_migrate_pfn(struct compact_control *cc)
1959 {
1960 	if (!cc->fast_start_pfn || cc->fast_start_pfn == ULONG_MAX)
1961 		return cc->migrate_pfn;
1962 
1963 	cc->migrate_pfn = cc->fast_start_pfn;
1964 	cc->fast_start_pfn = ULONG_MAX;
1965 
1966 	return cc->migrate_pfn;
1967 }
1968 
1969 /*
1970  * Briefly search the free lists for a migration source that already has
1971  * some free pages to reduce the number of pages that need migration
1972  * before a pageblock is free.
1973  */
1974 static unsigned long fast_find_migrateblock(struct compact_control *cc)
1975 {
1976 	unsigned int limit = freelist_scan_limit(cc);
1977 	unsigned int nr_scanned = 0;
1978 	unsigned long distance;
1979 	unsigned long pfn = cc->migrate_pfn;
1980 	unsigned long high_pfn;
1981 	int order;
1982 	bool found_block = false;
1983 
1984 	/* Skip hints are relied on to avoid repeats on the fast search */
1985 	if (cc->ignore_skip_hint)
1986 		return pfn;
1987 
1988 	/*
1989 	 * If the pageblock should be finished then do not select a different
1990 	 * pageblock.
1991 	 */
1992 	if (cc->finish_pageblock)
1993 		return pfn;
1994 
1995 	/*
1996 	 * If the migrate_pfn is not at the start of a zone or the start
1997 	 * of a pageblock then assume this is a continuation of a previous
1998 	 * scan restarted due to COMPACT_CLUSTER_MAX.
1999 	 */
2000 	if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn))
2001 		return pfn;
2002 
2003 	/*
2004 	 * For smaller orders, just linearly scan as the number of pages
2005 	 * to migrate should be relatively small and does not necessarily
2006 	 * justify freeing up a large block for a small allocation.
2007 	 */
2008 	if (cc->order <= PAGE_ALLOC_COSTLY_ORDER)
2009 		return pfn;
2010 
2011 	/*
2012 	 * Only allow kcompactd and direct requests for movable pages to
2013 	 * quickly clear out a MOVABLE pageblock for allocation. This
2014 	 * reduces the risk that a large movable pageblock is freed for
2015 	 * an unmovable/reclaimable small allocation.
2016 	 */
2017 	if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE)
2018 		return pfn;
2019 
2020 	/*
2021 	 * When starting the migration scanner, pick any pageblock within the
2022 	 * first half of the search space. Otherwise try and pick a pageblock
2023 	 * within the first eighth to reduce the chances that a migration
2024 	 * target later becomes a source.
2025 	 */
2026 	distance = (cc->free_pfn - cc->migrate_pfn) >> 1;
2027 	if (cc->migrate_pfn != cc->zone->zone_start_pfn)
2028 		distance >>= 2;
2029 	high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance);
2030 
2031 	for (order = cc->order - 1;
2032 	     order >= PAGE_ALLOC_COSTLY_ORDER && !found_block && nr_scanned < limit;
2033 	     order--) {
2034 		struct free_area *area = &cc->zone->free_area[order];
2035 		struct list_head *freelist;
2036 		unsigned long flags;
2037 		struct page *freepage;
2038 
2039 		if (!area->nr_free)
2040 			continue;
2041 
2042 		spin_lock_irqsave(&cc->zone->lock, flags);
2043 		freelist = &area->free_list[MIGRATE_MOVABLE];
2044 		list_for_each_entry(freepage, freelist, buddy_list) {
2045 			unsigned long free_pfn;
2046 
2047 			if (nr_scanned++ >= limit) {
2048 				move_freelist_tail(freelist, freepage);
2049 				break;
2050 			}
2051 
2052 			free_pfn = page_to_pfn(freepage);
2053 			if (free_pfn < high_pfn) {
2054 				/*
2055 				 * Avoid if skipped recently. Ideally it would
2056 				 * move to the tail but even safe iteration of
2057 				 * the list assumes an entry is deleted, not
2058 				 * reordered.
2059 				 */
2060 				if (get_pageblock_skip(freepage))
2061 					continue;
2062 
2063 				/* Reorder to so a future search skips recent pages */
2064 				move_freelist_tail(freelist, freepage);
2065 
2066 				update_fast_start_pfn(cc, free_pfn);
2067 				pfn = pageblock_start_pfn(free_pfn);
2068 				if (pfn < cc->zone->zone_start_pfn)
2069 					pfn = cc->zone->zone_start_pfn;
2070 				cc->fast_search_fail = 0;
2071 				found_block = true;
2072 				break;
2073 			}
2074 		}
2075 		spin_unlock_irqrestore(&cc->zone->lock, flags);
2076 	}
2077 
2078 	cc->total_migrate_scanned += nr_scanned;
2079 
2080 	/*
2081 	 * If fast scanning failed then use a cached entry for a page block
2082 	 * that had free pages as the basis for starting a linear scan.
2083 	 */
2084 	if (!found_block) {
2085 		cc->fast_search_fail++;
2086 		pfn = reinit_migrate_pfn(cc);
2087 	}
2088 	return pfn;
2089 }
2090 
2091 /*
2092  * Isolate all pages that can be migrated from the first suitable block,
2093  * starting at the block pointed to by the migrate scanner pfn within
2094  * compact_control.
2095  */
2096 static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
2097 {
2098 	unsigned long block_start_pfn;
2099 	unsigned long block_end_pfn;
2100 	unsigned long low_pfn;
2101 	struct page *page;
2102 	const isolate_mode_t isolate_mode =
2103 		(sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
2104 		(cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0);
2105 	bool fast_find_block;
2106 
2107 	/*
2108 	 * Start at where we last stopped, or beginning of the zone as
2109 	 * initialized by compact_zone(). The first failure will use
2110 	 * the lowest PFN as the starting point for linear scanning.
2111 	 */
2112 	low_pfn = fast_find_migrateblock(cc);
2113 	block_start_pfn = pageblock_start_pfn(low_pfn);
2114 	if (block_start_pfn < cc->zone->zone_start_pfn)
2115 		block_start_pfn = cc->zone->zone_start_pfn;
2116 
2117 	/*
2118 	 * fast_find_migrateblock() has already ensured the pageblock is not
2119 	 * set with a skipped flag, so to avoid the isolation_suitable check
2120 	 * below again, check whether the fast search was successful.
2121 	 */
2122 	fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail;
2123 
2124 	/* Only scan within a pageblock boundary */
2125 	block_end_pfn = pageblock_end_pfn(low_pfn);
2126 
2127 	/*
2128 	 * Iterate over whole pageblocks until we find the first suitable.
2129 	 * Do not cross the free scanner.
2130 	 */
2131 	for (; block_end_pfn <= cc->free_pfn;
2132 			fast_find_block = false,
2133 			cc->migrate_pfn = low_pfn = block_end_pfn,
2134 			block_start_pfn = block_end_pfn,
2135 			block_end_pfn += pageblock_nr_pages) {
2136 
2137 		/*
2138 		 * This can potentially iterate a massively long zone with
2139 		 * many pageblocks unsuitable, so periodically check if we
2140 		 * need to schedule.
2141 		 */
2142 		if (!(low_pfn % (COMPACT_CLUSTER_MAX * pageblock_nr_pages)))
2143 			cond_resched();
2144 
2145 		page = pageblock_pfn_to_page(block_start_pfn,
2146 						block_end_pfn, cc->zone);
2147 		if (!page) {
2148 			unsigned long next_pfn;
2149 
2150 			next_pfn = skip_offline_sections(block_start_pfn);
2151 			if (next_pfn)
2152 				block_end_pfn = min(next_pfn, cc->free_pfn);
2153 			continue;
2154 		}
2155 
2156 		/*
2157 		 * If isolation recently failed, do not retry. Only check the
2158 		 * pageblock once. COMPACT_CLUSTER_MAX causes a pageblock
2159 		 * to be visited multiple times. Assume skip was checked
2160 		 * before making it "skip" so other compaction instances do
2161 		 * not scan the same block.
2162 		 */
2163 		if ((pageblock_aligned(low_pfn) ||
2164 		     low_pfn == cc->zone->zone_start_pfn) &&
2165 		    !fast_find_block && !isolation_suitable(cc, page))
2166 			continue;
2167 
2168 		/*
2169 		 * For async direct compaction, only scan the pageblocks of the
2170 		 * same migratetype without huge pages. Async direct compaction
2171 		 * is optimistic to see if the minimum amount of work satisfies
2172 		 * the allocation. The cached PFN is updated as it's possible
2173 		 * that all remaining blocks between source and target are
2174 		 * unsuitable and the compaction scanners fail to meet.
2175 		 */
2176 		if (!suitable_migration_source(cc, page)) {
2177 			update_cached_migrate(cc, block_end_pfn);
2178 			continue;
2179 		}
2180 
2181 		/* Perform the isolation */
2182 		if (isolate_migratepages_block(cc, low_pfn, block_end_pfn,
2183 						isolate_mode))
2184 			return ISOLATE_ABORT;
2185 
2186 		/*
2187 		 * Either we isolated something and proceed with migration. Or
2188 		 * we failed and compact_zone should decide if we should
2189 		 * continue or not.
2190 		 */
2191 		break;
2192 	}
2193 
2194 	return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
2195 }
2196 
2197 /*
2198  * Determine whether kswapd is (or recently was!) running on this node.
2199  *
2200  * pgdat_kswapd_lock() pins pgdat->kswapd, so a concurrent kswapd_stop() can't
2201  * zero it.
2202  */
2203 static bool kswapd_is_running(pg_data_t *pgdat)
2204 {
2205 	bool running;
2206 
2207 	pgdat_kswapd_lock(pgdat);
2208 	running = pgdat->kswapd && task_is_running(pgdat->kswapd);
2209 	pgdat_kswapd_unlock(pgdat);
2210 
2211 	return running;
2212 }
2213 
2214 /*
2215  * A zone's fragmentation score is the external fragmentation wrt to the
2216  * COMPACTION_HPAGE_ORDER. It returns a value in the range [0, 100].
2217  */
2218 static unsigned int fragmentation_score_zone(struct zone *zone)
2219 {
2220 	return extfrag_for_order(zone, COMPACTION_HPAGE_ORDER);
2221 }
2222 
2223 /*
2224  * A weighted zone's fragmentation score is the external fragmentation
2225  * wrt to the COMPACTION_HPAGE_ORDER scaled by the zone's size. It
2226  * returns a value in the range [0, 100].
2227  *
2228  * The scaling factor ensures that proactive compaction focuses on larger
2229  * zones like ZONE_NORMAL, rather than smaller, specialized zones like
2230  * ZONE_DMA32. For smaller zones, the score value remains close to zero,
2231  * and thus never exceeds the high threshold for proactive compaction.
2232  */
2233 static unsigned int fragmentation_score_zone_weighted(struct zone *zone)
2234 {
2235 	unsigned long score;
2236 
2237 	score = zone->present_pages * fragmentation_score_zone(zone);
2238 	return div64_ul(score, zone->zone_pgdat->node_present_pages + 1);
2239 }
2240 
2241 /*
2242  * The per-node proactive (background) compaction process is started by its
2243  * corresponding kcompactd thread when the node's fragmentation score
2244  * exceeds the high threshold. The compaction process remains active till
2245  * the node's score falls below the low threshold, or one of the back-off
2246  * conditions is met.
2247  */
2248 static unsigned int fragmentation_score_node(pg_data_t *pgdat)
2249 {
2250 	unsigned int score = 0;
2251 	int zoneid;
2252 
2253 	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
2254 		struct zone *zone;
2255 
2256 		zone = &pgdat->node_zones[zoneid];
2257 		if (!populated_zone(zone))
2258 			continue;
2259 		score += fragmentation_score_zone_weighted(zone);
2260 	}
2261 
2262 	return score;
2263 }
2264 
2265 static unsigned int fragmentation_score_wmark(bool low)
2266 {
2267 	unsigned int wmark_low;
2268 
2269 	/*
2270 	 * Cap the low watermark to avoid excessive compaction
2271 	 * activity in case a user sets the proactiveness tunable
2272 	 * close to 100 (maximum).
2273 	 */
2274 	wmark_low = max(100U - sysctl_compaction_proactiveness, 5U);
2275 	return low ? wmark_low : min(wmark_low + 10, 100U);
2276 }
2277 
2278 static bool should_proactive_compact_node(pg_data_t *pgdat)
2279 {
2280 	int wmark_high;
2281 
2282 	if (!sysctl_compaction_proactiveness || kswapd_is_running(pgdat))
2283 		return false;
2284 
2285 	wmark_high = fragmentation_score_wmark(false);
2286 	return fragmentation_score_node(pgdat) > wmark_high;
2287 }
2288 
2289 static enum compact_result __compact_finished(struct compact_control *cc)
2290 {
2291 	unsigned int order;
2292 	const int migratetype = cc->migratetype;
2293 	int ret;
2294 
2295 	/* Compaction run completes if the migrate and free scanner meet */
2296 	if (compact_scanners_met(cc)) {
2297 		/* Let the next compaction start anew. */
2298 		reset_cached_positions(cc->zone);
2299 
2300 		/*
2301 		 * Mark that the PG_migrate_skip information should be cleared
2302 		 * by kswapd when it goes to sleep. kcompactd does not set the
2303 		 * flag itself as the decision to be clear should be directly
2304 		 * based on an allocation request.
2305 		 */
2306 		if (cc->direct_compaction)
2307 			cc->zone->compact_blockskip_flush = true;
2308 
2309 		if (cc->whole_zone)
2310 			return COMPACT_COMPLETE;
2311 		else
2312 			return COMPACT_PARTIAL_SKIPPED;
2313 	}
2314 
2315 	if (cc->proactive_compaction) {
2316 		int score, wmark_low;
2317 		pg_data_t *pgdat;
2318 
2319 		pgdat = cc->zone->zone_pgdat;
2320 		if (kswapd_is_running(pgdat))
2321 			return COMPACT_PARTIAL_SKIPPED;
2322 
2323 		score = fragmentation_score_zone(cc->zone);
2324 		wmark_low = fragmentation_score_wmark(true);
2325 
2326 		if (score > wmark_low)
2327 			ret = COMPACT_CONTINUE;
2328 		else
2329 			ret = COMPACT_SUCCESS;
2330 
2331 		goto out;
2332 	}
2333 
2334 	if (is_via_compact_memory(cc->order))
2335 		return COMPACT_CONTINUE;
2336 
2337 	/*
2338 	 * Always finish scanning a pageblock to reduce the possibility of
2339 	 * fallbacks in the future. This is particularly important when
2340 	 * migration source is unmovable/reclaimable but it's not worth
2341 	 * special casing.
2342 	 */
2343 	if (!pageblock_aligned(cc->migrate_pfn))
2344 		return COMPACT_CONTINUE;
2345 
2346 	/* Direct compactor: Is a suitable page free? */
2347 	ret = COMPACT_NO_SUITABLE_PAGE;
2348 	for (order = cc->order; order < NR_PAGE_ORDERS; order++) {
2349 		struct free_area *area = &cc->zone->free_area[order];
2350 		bool can_steal;
2351 
2352 		/* Job done if page is free of the right migratetype */
2353 		if (!free_area_empty(area, migratetype))
2354 			return COMPACT_SUCCESS;
2355 
2356 #ifdef CONFIG_CMA
2357 		/* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
2358 		if (migratetype == MIGRATE_MOVABLE &&
2359 			!free_area_empty(area, MIGRATE_CMA))
2360 			return COMPACT_SUCCESS;
2361 #endif
2362 		/*
2363 		 * Job done if allocation would steal freepages from
2364 		 * other migratetype buddy lists.
2365 		 */
2366 		if (find_suitable_fallback(area, order, migratetype,
2367 						true, &can_steal) != -1)
2368 			/*
2369 			 * Movable pages are OK in any pageblock. If we are
2370 			 * stealing for a non-movable allocation, make sure
2371 			 * we finish compacting the current pageblock first
2372 			 * (which is assured by the above migrate_pfn align
2373 			 * check) so it is as free as possible and we won't
2374 			 * have to steal another one soon.
2375 			 */
2376 			return COMPACT_SUCCESS;
2377 	}
2378 
2379 out:
2380 	if (cc->contended || fatal_signal_pending(current))
2381 		ret = COMPACT_CONTENDED;
2382 
2383 	return ret;
2384 }
2385 
2386 static enum compact_result compact_finished(struct compact_control *cc)
2387 {
2388 	int ret;
2389 
2390 	ret = __compact_finished(cc);
2391 	trace_mm_compaction_finished(cc->zone, cc->order, ret);
2392 	if (ret == COMPACT_NO_SUITABLE_PAGE)
2393 		ret = COMPACT_CONTINUE;
2394 
2395 	return ret;
2396 }
2397 
2398 static bool __compaction_suitable(struct zone *zone, int order,
2399 				  int highest_zoneidx,
2400 				  unsigned long wmark_target)
2401 {
2402 	unsigned long watermark;
2403 	/*
2404 	 * Watermarks for order-0 must be met for compaction to be able to
2405 	 * isolate free pages for migration targets. This means that the
2406 	 * watermark and alloc_flags have to match, or be more pessimistic than
2407 	 * the check in __isolate_free_page(). We don't use the direct
2408 	 * compactor's alloc_flags, as they are not relevant for freepage
2409 	 * isolation. We however do use the direct compactor's highest_zoneidx
2410 	 * to skip over zones where lowmem reserves would prevent allocation
2411 	 * even if compaction succeeds.
2412 	 * For costly orders, we require low watermark instead of min for
2413 	 * compaction to proceed to increase its chances.
2414 	 * ALLOC_CMA is used, as pages in CMA pageblocks are considered
2415 	 * suitable migration targets
2416 	 */
2417 	watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ?
2418 				low_wmark_pages(zone) : min_wmark_pages(zone);
2419 	watermark += compact_gap(order);
2420 	return __zone_watermark_ok(zone, 0, watermark, highest_zoneidx,
2421 				   ALLOC_CMA, wmark_target);
2422 }
2423 
2424 /*
2425  * compaction_suitable: Is this suitable to run compaction on this zone now?
2426  */
2427 bool compaction_suitable(struct zone *zone, int order, int highest_zoneidx)
2428 {
2429 	enum compact_result compact_result;
2430 	bool suitable;
2431 
2432 	suitable = __compaction_suitable(zone, order, highest_zoneidx,
2433 					 zone_page_state(zone, NR_FREE_PAGES));
2434 	/*
2435 	 * fragmentation index determines if allocation failures are due to
2436 	 * low memory or external fragmentation
2437 	 *
2438 	 * index of -1000 would imply allocations might succeed depending on
2439 	 * watermarks, but we already failed the high-order watermark check
2440 	 * index towards 0 implies failure is due to lack of memory
2441 	 * index towards 1000 implies failure is due to fragmentation
2442 	 *
2443 	 * Only compact if a failure would be due to fragmentation. Also
2444 	 * ignore fragindex for non-costly orders where the alternative to
2445 	 * a successful reclaim/compaction is OOM. Fragindex and the
2446 	 * vm.extfrag_threshold sysctl is meant as a heuristic to prevent
2447 	 * excessive compaction for costly orders, but it should not be at the
2448 	 * expense of system stability.
2449 	 */
2450 	if (suitable) {
2451 		compact_result = COMPACT_CONTINUE;
2452 		if (order > PAGE_ALLOC_COSTLY_ORDER) {
2453 			int fragindex = fragmentation_index(zone, order);
2454 
2455 			if (fragindex >= 0 &&
2456 			    fragindex <= sysctl_extfrag_threshold) {
2457 				suitable = false;
2458 				compact_result = COMPACT_NOT_SUITABLE_ZONE;
2459 			}
2460 		}
2461 	} else {
2462 		compact_result = COMPACT_SKIPPED;
2463 	}
2464 
2465 	trace_mm_compaction_suitable(zone, order, compact_result);
2466 
2467 	return suitable;
2468 }
2469 
2470 bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
2471 		int alloc_flags)
2472 {
2473 	struct zone *zone;
2474 	struct zoneref *z;
2475 
2476 	/*
2477 	 * Make sure at least one zone would pass __compaction_suitable if we continue
2478 	 * retrying the reclaim.
2479 	 */
2480 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
2481 				ac->highest_zoneidx, ac->nodemask) {
2482 		unsigned long available;
2483 
2484 		/*
2485 		 * Do not consider all the reclaimable memory because we do not
2486 		 * want to trash just for a single high order allocation which
2487 		 * is even not guaranteed to appear even if __compaction_suitable
2488 		 * is happy about the watermark check.
2489 		 */
2490 		available = zone_reclaimable_pages(zone) / order;
2491 		available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
2492 		if (__compaction_suitable(zone, order, ac->highest_zoneidx,
2493 					  available))
2494 			return true;
2495 	}
2496 
2497 	return false;
2498 }
2499 
2500 /*
2501  * Should we do compaction for target allocation order.
2502  * Return COMPACT_SUCCESS if allocation for target order can be already
2503  * satisfied
2504  * Return COMPACT_SKIPPED if compaction for target order is likely to fail
2505  * Return COMPACT_CONTINUE if compaction for target order should be ran
2506  */
2507 static enum compact_result
2508 compaction_suit_allocation_order(struct zone *zone, unsigned int order,
2509 				 int highest_zoneidx, unsigned int alloc_flags)
2510 {
2511 	unsigned long watermark;
2512 
2513 	watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
2514 	if (zone_watermark_ok(zone, order, watermark, highest_zoneidx,
2515 			      alloc_flags))
2516 		return COMPACT_SUCCESS;
2517 
2518 	if (!compaction_suitable(zone, order, highest_zoneidx))
2519 		return COMPACT_SKIPPED;
2520 
2521 	return COMPACT_CONTINUE;
2522 }
2523 
2524 static enum compact_result
2525 compact_zone(struct compact_control *cc, struct capture_control *capc)
2526 {
2527 	enum compact_result ret;
2528 	unsigned long start_pfn = cc->zone->zone_start_pfn;
2529 	unsigned long end_pfn = zone_end_pfn(cc->zone);
2530 	unsigned long last_migrated_pfn;
2531 	const bool sync = cc->mode != MIGRATE_ASYNC;
2532 	bool update_cached;
2533 	unsigned int nr_succeeded = 0, nr_migratepages;
2534 	int order;
2535 
2536 	/*
2537 	 * These counters track activities during zone compaction.  Initialize
2538 	 * them before compacting a new zone.
2539 	 */
2540 	cc->total_migrate_scanned = 0;
2541 	cc->total_free_scanned = 0;
2542 	cc->nr_migratepages = 0;
2543 	cc->nr_freepages = 0;
2544 	for (order = 0; order < NR_PAGE_ORDERS; order++)
2545 		INIT_LIST_HEAD(&cc->freepages[order]);
2546 	INIT_LIST_HEAD(&cc->migratepages);
2547 
2548 	cc->migratetype = gfp_migratetype(cc->gfp_mask);
2549 
2550 	if (!is_via_compact_memory(cc->order)) {
2551 		ret = compaction_suit_allocation_order(cc->zone, cc->order,
2552 						       cc->highest_zoneidx,
2553 						       cc->alloc_flags);
2554 		if (ret != COMPACT_CONTINUE)
2555 			return ret;
2556 	}
2557 
2558 	/*
2559 	 * Clear pageblock skip if there were failures recently and compaction
2560 	 * is about to be retried after being deferred.
2561 	 */
2562 	if (compaction_restarting(cc->zone, cc->order))
2563 		__reset_isolation_suitable(cc->zone);
2564 
2565 	/*
2566 	 * Setup to move all movable pages to the end of the zone. Used cached
2567 	 * information on where the scanners should start (unless we explicitly
2568 	 * want to compact the whole zone), but check that it is initialised
2569 	 * by ensuring the values are within zone boundaries.
2570 	 */
2571 	cc->fast_start_pfn = 0;
2572 	if (cc->whole_zone) {
2573 		cc->migrate_pfn = start_pfn;
2574 		cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
2575 	} else {
2576 		cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync];
2577 		cc->free_pfn = cc->zone->compact_cached_free_pfn;
2578 		if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
2579 			cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
2580 			cc->zone->compact_cached_free_pfn = cc->free_pfn;
2581 		}
2582 		if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
2583 			cc->migrate_pfn = start_pfn;
2584 			cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
2585 			cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
2586 		}
2587 
2588 		if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn)
2589 			cc->whole_zone = true;
2590 	}
2591 
2592 	last_migrated_pfn = 0;
2593 
2594 	/*
2595 	 * Migrate has separate cached PFNs for ASYNC and SYNC* migration on
2596 	 * the basis that some migrations will fail in ASYNC mode. However,
2597 	 * if the cached PFNs match and pageblocks are skipped due to having
2598 	 * no isolation candidates, then the sync state does not matter.
2599 	 * Until a pageblock with isolation candidates is found, keep the
2600 	 * cached PFNs in sync to avoid revisiting the same blocks.
2601 	 */
2602 	update_cached = !sync &&
2603 		cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1];
2604 
2605 	trace_mm_compaction_begin(cc, start_pfn, end_pfn, sync);
2606 
2607 	/* lru_add_drain_all could be expensive with involving other CPUs */
2608 	lru_add_drain();
2609 
2610 	while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) {
2611 		int err;
2612 		unsigned long iteration_start_pfn = cc->migrate_pfn;
2613 
2614 		/*
2615 		 * Avoid multiple rescans of the same pageblock which can
2616 		 * happen if a page cannot be isolated (dirty/writeback in
2617 		 * async mode) or if the migrated pages are being allocated
2618 		 * before the pageblock is cleared.  The first rescan will
2619 		 * capture the entire pageblock for migration. If it fails,
2620 		 * it'll be marked skip and scanning will proceed as normal.
2621 		 */
2622 		cc->finish_pageblock = false;
2623 		if (pageblock_start_pfn(last_migrated_pfn) ==
2624 		    pageblock_start_pfn(iteration_start_pfn)) {
2625 			cc->finish_pageblock = true;
2626 		}
2627 
2628 rescan:
2629 		switch (isolate_migratepages(cc)) {
2630 		case ISOLATE_ABORT:
2631 			ret = COMPACT_CONTENDED;
2632 			putback_movable_pages(&cc->migratepages);
2633 			cc->nr_migratepages = 0;
2634 			goto out;
2635 		case ISOLATE_NONE:
2636 			if (update_cached) {
2637 				cc->zone->compact_cached_migrate_pfn[1] =
2638 					cc->zone->compact_cached_migrate_pfn[0];
2639 			}
2640 
2641 			/*
2642 			 * We haven't isolated and migrated anything, but
2643 			 * there might still be unflushed migrations from
2644 			 * previous cc->order aligned block.
2645 			 */
2646 			goto check_drain;
2647 		case ISOLATE_SUCCESS:
2648 			update_cached = false;
2649 			last_migrated_pfn = max(cc->zone->zone_start_pfn,
2650 				pageblock_start_pfn(cc->migrate_pfn - 1));
2651 		}
2652 
2653 		/*
2654 		 * Record the number of pages to migrate since the
2655 		 * compaction_alloc/free() will update cc->nr_migratepages
2656 		 * properly.
2657 		 */
2658 		nr_migratepages = cc->nr_migratepages;
2659 		err = migrate_pages(&cc->migratepages, compaction_alloc,
2660 				compaction_free, (unsigned long)cc, cc->mode,
2661 				MR_COMPACTION, &nr_succeeded);
2662 
2663 		trace_mm_compaction_migratepages(nr_migratepages, nr_succeeded);
2664 
2665 		/* All pages were either migrated or will be released */
2666 		cc->nr_migratepages = 0;
2667 		if (err) {
2668 			putback_movable_pages(&cc->migratepages);
2669 			/*
2670 			 * migrate_pages() may return -ENOMEM when scanners meet
2671 			 * and we want compact_finished() to detect it
2672 			 */
2673 			if (err == -ENOMEM && !compact_scanners_met(cc)) {
2674 				ret = COMPACT_CONTENDED;
2675 				goto out;
2676 			}
2677 			/*
2678 			 * If an ASYNC or SYNC_LIGHT fails to migrate a page
2679 			 * within the pageblock_order-aligned block and
2680 			 * fast_find_migrateblock may be used then scan the
2681 			 * remainder of the pageblock. This will mark the
2682 			 * pageblock "skip" to avoid rescanning in the near
2683 			 * future. This will isolate more pages than necessary
2684 			 * for the request but avoid loops due to
2685 			 * fast_find_migrateblock revisiting blocks that were
2686 			 * recently partially scanned.
2687 			 */
2688 			if (!pageblock_aligned(cc->migrate_pfn) &&
2689 			    !cc->ignore_skip_hint && !cc->finish_pageblock &&
2690 			    (cc->mode < MIGRATE_SYNC)) {
2691 				cc->finish_pageblock = true;
2692 
2693 				/*
2694 				 * Draining pcplists does not help THP if
2695 				 * any page failed to migrate. Even after
2696 				 * drain, the pageblock will not be free.
2697 				 */
2698 				if (cc->order == COMPACTION_HPAGE_ORDER)
2699 					last_migrated_pfn = 0;
2700 
2701 				goto rescan;
2702 			}
2703 		}
2704 
2705 		/* Stop if a page has been captured */
2706 		if (capc && capc->page) {
2707 			ret = COMPACT_SUCCESS;
2708 			break;
2709 		}
2710 
2711 check_drain:
2712 		/*
2713 		 * Has the migration scanner moved away from the previous
2714 		 * cc->order aligned block where we migrated from? If yes,
2715 		 * flush the pages that were freed, so that they can merge and
2716 		 * compact_finished() can detect immediately if allocation
2717 		 * would succeed.
2718 		 */
2719 		if (cc->order > 0 && last_migrated_pfn) {
2720 			unsigned long current_block_start =
2721 				block_start_pfn(cc->migrate_pfn, cc->order);
2722 
2723 			if (last_migrated_pfn < current_block_start) {
2724 				lru_add_drain_cpu_zone(cc->zone);
2725 				/* No more flushing until we migrate again */
2726 				last_migrated_pfn = 0;
2727 			}
2728 		}
2729 	}
2730 
2731 out:
2732 	/*
2733 	 * Release free pages and update where the free scanner should restart,
2734 	 * so we don't leave any returned pages behind in the next attempt.
2735 	 */
2736 	if (cc->nr_freepages > 0) {
2737 		unsigned long free_pfn = release_free_list(cc->freepages);
2738 
2739 		cc->nr_freepages = 0;
2740 		VM_BUG_ON(free_pfn == 0);
2741 		/* The cached pfn is always the first in a pageblock */
2742 		free_pfn = pageblock_start_pfn(free_pfn);
2743 		/*
2744 		 * Only go back, not forward. The cached pfn might have been
2745 		 * already reset to zone end in compact_finished()
2746 		 */
2747 		if (free_pfn > cc->zone->compact_cached_free_pfn)
2748 			cc->zone->compact_cached_free_pfn = free_pfn;
2749 	}
2750 
2751 	count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned);
2752 	count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned);
2753 
2754 	trace_mm_compaction_end(cc, start_pfn, end_pfn, sync, ret);
2755 
2756 	VM_BUG_ON(!list_empty(&cc->migratepages));
2757 
2758 	return ret;
2759 }
2760 
2761 static enum compact_result compact_zone_order(struct zone *zone, int order,
2762 		gfp_t gfp_mask, enum compact_priority prio,
2763 		unsigned int alloc_flags, int highest_zoneidx,
2764 		struct page **capture)
2765 {
2766 	enum compact_result ret;
2767 	struct compact_control cc = {
2768 		.order = order,
2769 		.search_order = order,
2770 		.gfp_mask = gfp_mask,
2771 		.zone = zone,
2772 		.mode = (prio == COMPACT_PRIO_ASYNC) ?
2773 					MIGRATE_ASYNC :	MIGRATE_SYNC_LIGHT,
2774 		.alloc_flags = alloc_flags,
2775 		.highest_zoneidx = highest_zoneidx,
2776 		.direct_compaction = true,
2777 		.whole_zone = (prio == MIN_COMPACT_PRIORITY),
2778 		.ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY),
2779 		.ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY)
2780 	};
2781 	struct capture_control capc = {
2782 		.cc = &cc,
2783 		.page = NULL,
2784 	};
2785 
2786 	/*
2787 	 * Make sure the structs are really initialized before we expose the
2788 	 * capture control, in case we are interrupted and the interrupt handler
2789 	 * frees a page.
2790 	 */
2791 	barrier();
2792 	WRITE_ONCE(current->capture_control, &capc);
2793 
2794 	ret = compact_zone(&cc, &capc);
2795 
2796 	/*
2797 	 * Make sure we hide capture control first before we read the captured
2798 	 * page pointer, otherwise an interrupt could free and capture a page
2799 	 * and we would leak it.
2800 	 */
2801 	WRITE_ONCE(current->capture_control, NULL);
2802 	*capture = READ_ONCE(capc.page);
2803 	/*
2804 	 * Technically, it is also possible that compaction is skipped but
2805 	 * the page is still captured out of luck(IRQ came and freed the page).
2806 	 * Returning COMPACT_SUCCESS in such cases helps in properly accounting
2807 	 * the COMPACT[STALL|FAIL] when compaction is skipped.
2808 	 */
2809 	if (*capture)
2810 		ret = COMPACT_SUCCESS;
2811 
2812 	return ret;
2813 }
2814 
2815 /**
2816  * try_to_compact_pages - Direct compact to satisfy a high-order allocation
2817  * @gfp_mask: The GFP mask of the current allocation
2818  * @order: The order of the current allocation
2819  * @alloc_flags: The allocation flags of the current allocation
2820  * @ac: The context of current allocation
2821  * @prio: Determines how hard direct compaction should try to succeed
2822  * @capture: Pointer to free page created by compaction will be stored here
2823  *
2824  * This is the main entry point for direct page compaction.
2825  */
2826 enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
2827 		unsigned int alloc_flags, const struct alloc_context *ac,
2828 		enum compact_priority prio, struct page **capture)
2829 {
2830 	struct zoneref *z;
2831 	struct zone *zone;
2832 	enum compact_result rc = COMPACT_SKIPPED;
2833 
2834 	if (!gfp_compaction_allowed(gfp_mask))
2835 		return COMPACT_SKIPPED;
2836 
2837 	trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
2838 
2839 	/* Compact each zone in the list */
2840 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
2841 					ac->highest_zoneidx, ac->nodemask) {
2842 		enum compact_result status;
2843 
2844 		if (prio > MIN_COMPACT_PRIORITY
2845 					&& compaction_deferred(zone, order)) {
2846 			rc = max_t(enum compact_result, COMPACT_DEFERRED, rc);
2847 			continue;
2848 		}
2849 
2850 		status = compact_zone_order(zone, order, gfp_mask, prio,
2851 				alloc_flags, ac->highest_zoneidx, capture);
2852 		rc = max(status, rc);
2853 
2854 		/* The allocation should succeed, stop compacting */
2855 		if (status == COMPACT_SUCCESS) {
2856 			/*
2857 			 * We think the allocation will succeed in this zone,
2858 			 * but it is not certain, hence the false. The caller
2859 			 * will repeat this with true if allocation indeed
2860 			 * succeeds in this zone.
2861 			 */
2862 			compaction_defer_reset(zone, order, false);
2863 
2864 			break;
2865 		}
2866 
2867 		if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE ||
2868 					status == COMPACT_PARTIAL_SKIPPED))
2869 			/*
2870 			 * We think that allocation won't succeed in this zone
2871 			 * so we defer compaction there. If it ends up
2872 			 * succeeding after all, it will be reset.
2873 			 */
2874 			defer_compaction(zone, order);
2875 
2876 		/*
2877 		 * We might have stopped compacting due to need_resched() in
2878 		 * async compaction, or due to a fatal signal detected. In that
2879 		 * case do not try further zones
2880 		 */
2881 		if ((prio == COMPACT_PRIO_ASYNC && need_resched())
2882 					|| fatal_signal_pending(current))
2883 			break;
2884 	}
2885 
2886 	return rc;
2887 }
2888 
2889 /*
2890  * compact_node() - compact all zones within a node
2891  * @pgdat: The node page data
2892  * @proactive: Whether the compaction is proactive
2893  *
2894  * For proactive compaction, compact till each zone's fragmentation score
2895  * reaches within proactive compaction thresholds (as determined by the
2896  * proactiveness tunable), it is possible that the function returns before
2897  * reaching score targets due to various back-off conditions, such as,
2898  * contention on per-node or per-zone locks.
2899  */
2900 static int compact_node(pg_data_t *pgdat, bool proactive)
2901 {
2902 	int zoneid;
2903 	struct zone *zone;
2904 	struct compact_control cc = {
2905 		.order = -1,
2906 		.mode = proactive ? MIGRATE_SYNC_LIGHT : MIGRATE_SYNC,
2907 		.ignore_skip_hint = true,
2908 		.whole_zone = true,
2909 		.gfp_mask = GFP_KERNEL,
2910 		.proactive_compaction = proactive,
2911 	};
2912 
2913 	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
2914 		zone = &pgdat->node_zones[zoneid];
2915 		if (!populated_zone(zone))
2916 			continue;
2917 
2918 		if (fatal_signal_pending(current))
2919 			return -EINTR;
2920 
2921 		cc.zone = zone;
2922 
2923 		compact_zone(&cc, NULL);
2924 
2925 		if (proactive) {
2926 			count_compact_events(KCOMPACTD_MIGRATE_SCANNED,
2927 					     cc.total_migrate_scanned);
2928 			count_compact_events(KCOMPACTD_FREE_SCANNED,
2929 					     cc.total_free_scanned);
2930 		}
2931 	}
2932 
2933 	return 0;
2934 }
2935 
2936 /* Compact all zones of all nodes in the system */
2937 static int compact_nodes(void)
2938 {
2939 	int ret, nid;
2940 
2941 	/* Flush pending updates to the LRU lists */
2942 	lru_add_drain_all();
2943 
2944 	for_each_online_node(nid) {
2945 		ret = compact_node(NODE_DATA(nid), false);
2946 		if (ret)
2947 			return ret;
2948 	}
2949 
2950 	return 0;
2951 }
2952 
2953 static int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int write,
2954 		void *buffer, size_t *length, loff_t *ppos)
2955 {
2956 	int rc, nid;
2957 
2958 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
2959 	if (rc)
2960 		return rc;
2961 
2962 	if (write && sysctl_compaction_proactiveness) {
2963 		for_each_online_node(nid) {
2964 			pg_data_t *pgdat = NODE_DATA(nid);
2965 
2966 			if (pgdat->proactive_compact_trigger)
2967 				continue;
2968 
2969 			pgdat->proactive_compact_trigger = true;
2970 			trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, -1,
2971 							     pgdat->nr_zones - 1);
2972 			wake_up_interruptible(&pgdat->kcompactd_wait);
2973 		}
2974 	}
2975 
2976 	return 0;
2977 }
2978 
2979 /*
2980  * This is the entry point for compacting all nodes via
2981  * /proc/sys/vm/compact_memory
2982  */
2983 static int sysctl_compaction_handler(struct ctl_table *table, int write,
2984 			void *buffer, size_t *length, loff_t *ppos)
2985 {
2986 	int ret;
2987 
2988 	ret = proc_dointvec(table, write, buffer, length, ppos);
2989 	if (ret)
2990 		return ret;
2991 
2992 	if (sysctl_compact_memory != 1)
2993 		return -EINVAL;
2994 
2995 	if (write)
2996 		ret = compact_nodes();
2997 
2998 	return ret;
2999 }
3000 
3001 #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
3002 static ssize_t compact_store(struct device *dev,
3003 			     struct device_attribute *attr,
3004 			     const char *buf, size_t count)
3005 {
3006 	int nid = dev->id;
3007 
3008 	if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
3009 		/* Flush pending updates to the LRU lists */
3010 		lru_add_drain_all();
3011 
3012 		compact_node(NODE_DATA(nid), false);
3013 	}
3014 
3015 	return count;
3016 }
3017 static DEVICE_ATTR_WO(compact);
3018 
3019 int compaction_register_node(struct node *node)
3020 {
3021 	return device_create_file(&node->dev, &dev_attr_compact);
3022 }
3023 
3024 void compaction_unregister_node(struct node *node)
3025 {
3026 	device_remove_file(&node->dev, &dev_attr_compact);
3027 }
3028 #endif /* CONFIG_SYSFS && CONFIG_NUMA */
3029 
3030 static inline bool kcompactd_work_requested(pg_data_t *pgdat)
3031 {
3032 	return pgdat->kcompactd_max_order > 0 || kthread_should_stop() ||
3033 		pgdat->proactive_compact_trigger;
3034 }
3035 
3036 static bool kcompactd_node_suitable(pg_data_t *pgdat)
3037 {
3038 	int zoneid;
3039 	struct zone *zone;
3040 	enum zone_type highest_zoneidx = pgdat->kcompactd_highest_zoneidx;
3041 	enum compact_result ret;
3042 
3043 	for (zoneid = 0; zoneid <= highest_zoneidx; zoneid++) {
3044 		zone = &pgdat->node_zones[zoneid];
3045 
3046 		if (!populated_zone(zone))
3047 			continue;
3048 
3049 		ret = compaction_suit_allocation_order(zone,
3050 				pgdat->kcompactd_max_order,
3051 				highest_zoneidx, ALLOC_WMARK_MIN);
3052 		if (ret == COMPACT_CONTINUE)
3053 			return true;
3054 	}
3055 
3056 	return false;
3057 }
3058 
3059 static void kcompactd_do_work(pg_data_t *pgdat)
3060 {
3061 	/*
3062 	 * With no special task, compact all zones so that a page of requested
3063 	 * order is allocatable.
3064 	 */
3065 	int zoneid;
3066 	struct zone *zone;
3067 	struct compact_control cc = {
3068 		.order = pgdat->kcompactd_max_order,
3069 		.search_order = pgdat->kcompactd_max_order,
3070 		.highest_zoneidx = pgdat->kcompactd_highest_zoneidx,
3071 		.mode = MIGRATE_SYNC_LIGHT,
3072 		.ignore_skip_hint = false,
3073 		.gfp_mask = GFP_KERNEL,
3074 	};
3075 	enum compact_result ret;
3076 
3077 	trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
3078 							cc.highest_zoneidx);
3079 	count_compact_event(KCOMPACTD_WAKE);
3080 
3081 	for (zoneid = 0; zoneid <= cc.highest_zoneidx; zoneid++) {
3082 		int status;
3083 
3084 		zone = &pgdat->node_zones[zoneid];
3085 		if (!populated_zone(zone))
3086 			continue;
3087 
3088 		if (compaction_deferred(zone, cc.order))
3089 			continue;
3090 
3091 		ret = compaction_suit_allocation_order(zone,
3092 				cc.order, zoneid, ALLOC_WMARK_MIN);
3093 		if (ret != COMPACT_CONTINUE)
3094 			continue;
3095 
3096 		if (kthread_should_stop())
3097 			return;
3098 
3099 		cc.zone = zone;
3100 		status = compact_zone(&cc, NULL);
3101 
3102 		if (status == COMPACT_SUCCESS) {
3103 			compaction_defer_reset(zone, cc.order, false);
3104 		} else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) {
3105 			/*
3106 			 * Buddy pages may become stranded on pcps that could
3107 			 * otherwise coalesce on the zone's free area for
3108 			 * order >= cc.order.  This is ratelimited by the
3109 			 * upcoming deferral.
3110 			 */
3111 			drain_all_pages(zone);
3112 
3113 			/*
3114 			 * We use sync migration mode here, so we defer like
3115 			 * sync direct compaction does.
3116 			 */
3117 			defer_compaction(zone, cc.order);
3118 		}
3119 
3120 		count_compact_events(KCOMPACTD_MIGRATE_SCANNED,
3121 				     cc.total_migrate_scanned);
3122 		count_compact_events(KCOMPACTD_FREE_SCANNED,
3123 				     cc.total_free_scanned);
3124 	}
3125 
3126 	/*
3127 	 * Regardless of success, we are done until woken up next. But remember
3128 	 * the requested order/highest_zoneidx in case it was higher/tighter
3129 	 * than our current ones
3130 	 */
3131 	if (pgdat->kcompactd_max_order <= cc.order)
3132 		pgdat->kcompactd_max_order = 0;
3133 	if (pgdat->kcompactd_highest_zoneidx >= cc.highest_zoneidx)
3134 		pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1;
3135 }
3136 
3137 void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx)
3138 {
3139 	if (!order)
3140 		return;
3141 
3142 	if (pgdat->kcompactd_max_order < order)
3143 		pgdat->kcompactd_max_order = order;
3144 
3145 	if (pgdat->kcompactd_highest_zoneidx > highest_zoneidx)
3146 		pgdat->kcompactd_highest_zoneidx = highest_zoneidx;
3147 
3148 	/*
3149 	 * Pairs with implicit barrier in wait_event_freezable()
3150 	 * such that wakeups are not missed.
3151 	 */
3152 	if (!wq_has_sleeper(&pgdat->kcompactd_wait))
3153 		return;
3154 
3155 	if (!kcompactd_node_suitable(pgdat))
3156 		return;
3157 
3158 	trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
3159 							highest_zoneidx);
3160 	wake_up_interruptible(&pgdat->kcompactd_wait);
3161 }
3162 
3163 /*
3164  * The background compaction daemon, started as a kernel thread
3165  * from the init process.
3166  */
3167 static int kcompactd(void *p)
3168 {
3169 	pg_data_t *pgdat = (pg_data_t *)p;
3170 	struct task_struct *tsk = current;
3171 	long default_timeout = msecs_to_jiffies(HPAGE_FRAG_CHECK_INTERVAL_MSEC);
3172 	long timeout = default_timeout;
3173 
3174 	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
3175 
3176 	if (!cpumask_empty(cpumask))
3177 		set_cpus_allowed_ptr(tsk, cpumask);
3178 
3179 	set_freezable();
3180 
3181 	pgdat->kcompactd_max_order = 0;
3182 	pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1;
3183 
3184 	while (!kthread_should_stop()) {
3185 		unsigned long pflags;
3186 
3187 		/*
3188 		 * Avoid the unnecessary wakeup for proactive compaction
3189 		 * when it is disabled.
3190 		 */
3191 		if (!sysctl_compaction_proactiveness)
3192 			timeout = MAX_SCHEDULE_TIMEOUT;
3193 		trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
3194 		if (wait_event_freezable_timeout(pgdat->kcompactd_wait,
3195 			kcompactd_work_requested(pgdat), timeout) &&
3196 			!pgdat->proactive_compact_trigger) {
3197 
3198 			psi_memstall_enter(&pflags);
3199 			kcompactd_do_work(pgdat);
3200 			psi_memstall_leave(&pflags);
3201 			/*
3202 			 * Reset the timeout value. The defer timeout from
3203 			 * proactive compaction is lost here but that is fine
3204 			 * as the condition of the zone changing substantionally
3205 			 * then carrying on with the previous defer interval is
3206 			 * not useful.
3207 			 */
3208 			timeout = default_timeout;
3209 			continue;
3210 		}
3211 
3212 		/*
3213 		 * Start the proactive work with default timeout. Based
3214 		 * on the fragmentation score, this timeout is updated.
3215 		 */
3216 		timeout = default_timeout;
3217 		if (should_proactive_compact_node(pgdat)) {
3218 			unsigned int prev_score, score;
3219 
3220 			prev_score = fragmentation_score_node(pgdat);
3221 			compact_node(pgdat, true);
3222 			score = fragmentation_score_node(pgdat);
3223 			/*
3224 			 * Defer proactive compaction if the fragmentation
3225 			 * score did not go down i.e. no progress made.
3226 			 */
3227 			if (unlikely(score >= prev_score))
3228 				timeout =
3229 				   default_timeout << COMPACT_MAX_DEFER_SHIFT;
3230 		}
3231 		if (unlikely(pgdat->proactive_compact_trigger))
3232 			pgdat->proactive_compact_trigger = false;
3233 	}
3234 
3235 	return 0;
3236 }
3237 
3238 /*
3239  * This kcompactd start function will be called by init and node-hot-add.
3240  * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added.
3241  */
3242 void __meminit kcompactd_run(int nid)
3243 {
3244 	pg_data_t *pgdat = NODE_DATA(nid);
3245 
3246 	if (pgdat->kcompactd)
3247 		return;
3248 
3249 	pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid);
3250 	if (IS_ERR(pgdat->kcompactd)) {
3251 		pr_err("Failed to start kcompactd on node %d\n", nid);
3252 		pgdat->kcompactd = NULL;
3253 	}
3254 }
3255 
3256 /*
3257  * Called by memory hotplug when all memory in a node is offlined. Caller must
3258  * be holding mem_hotplug_begin/done().
3259  */
3260 void __meminit kcompactd_stop(int nid)
3261 {
3262 	struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd;
3263 
3264 	if (kcompactd) {
3265 		kthread_stop(kcompactd);
3266 		NODE_DATA(nid)->kcompactd = NULL;
3267 	}
3268 }
3269 
3270 /*
3271  * It's optimal to keep kcompactd on the same CPUs as their memory, but
3272  * not required for correctness. So if the last cpu in a node goes
3273  * away, we get changed to run anywhere: as the first one comes back,
3274  * restore their cpu bindings.
3275  */
3276 static int kcompactd_cpu_online(unsigned int cpu)
3277 {
3278 	int nid;
3279 
3280 	for_each_node_state(nid, N_MEMORY) {
3281 		pg_data_t *pgdat = NODE_DATA(nid);
3282 		const struct cpumask *mask;
3283 
3284 		mask = cpumask_of_node(pgdat->node_id);
3285 
3286 		if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
3287 			/* One of our CPUs online: restore mask */
3288 			if (pgdat->kcompactd)
3289 				set_cpus_allowed_ptr(pgdat->kcompactd, mask);
3290 	}
3291 	return 0;
3292 }
3293 
3294 static int proc_dointvec_minmax_warn_RT_change(struct ctl_table *table,
3295 		int write, void *buffer, size_t *lenp, loff_t *ppos)
3296 {
3297 	int ret, old;
3298 
3299 	if (!IS_ENABLED(CONFIG_PREEMPT_RT) || !write)
3300 		return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
3301 
3302 	old = *(int *)table->data;
3303 	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
3304 	if (ret)
3305 		return ret;
3306 	if (old != *(int *)table->data)
3307 		pr_warn_once("sysctl attribute %s changed by %s[%d]\n",
3308 			     table->procname, current->comm,
3309 			     task_pid_nr(current));
3310 	return ret;
3311 }
3312 
3313 static struct ctl_table vm_compaction[] = {
3314 	{
3315 		.procname	= "compact_memory",
3316 		.data		= &sysctl_compact_memory,
3317 		.maxlen		= sizeof(int),
3318 		.mode		= 0200,
3319 		.proc_handler	= sysctl_compaction_handler,
3320 	},
3321 	{
3322 		.procname	= "compaction_proactiveness",
3323 		.data		= &sysctl_compaction_proactiveness,
3324 		.maxlen		= sizeof(sysctl_compaction_proactiveness),
3325 		.mode		= 0644,
3326 		.proc_handler	= compaction_proactiveness_sysctl_handler,
3327 		.extra1		= SYSCTL_ZERO,
3328 		.extra2		= SYSCTL_ONE_HUNDRED,
3329 	},
3330 	{
3331 		.procname	= "extfrag_threshold",
3332 		.data		= &sysctl_extfrag_threshold,
3333 		.maxlen		= sizeof(int),
3334 		.mode		= 0644,
3335 		.proc_handler	= proc_dointvec_minmax,
3336 		.extra1		= SYSCTL_ZERO,
3337 		.extra2		= SYSCTL_ONE_THOUSAND,
3338 	},
3339 	{
3340 		.procname	= "compact_unevictable_allowed",
3341 		.data		= &sysctl_compact_unevictable_allowed,
3342 		.maxlen		= sizeof(int),
3343 		.mode		= 0644,
3344 		.proc_handler	= proc_dointvec_minmax_warn_RT_change,
3345 		.extra1		= SYSCTL_ZERO,
3346 		.extra2		= SYSCTL_ONE,
3347 	},
3348 	{ }
3349 };
3350 
3351 static int __init kcompactd_init(void)
3352 {
3353 	int nid;
3354 	int ret;
3355 
3356 	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
3357 					"mm/compaction:online",
3358 					kcompactd_cpu_online, NULL);
3359 	if (ret < 0) {
3360 		pr_err("kcompactd: failed to register hotplug callbacks.\n");
3361 		return ret;
3362 	}
3363 
3364 	for_each_node_state(nid, N_MEMORY)
3365 		kcompactd_run(nid);
3366 	register_sysctl_init("vm", vm_compaction);
3367 	return 0;
3368 }
3369 subsys_initcall(kcompactd_init)
3370 
3371 #endif /* CONFIG_COMPACTION */
3372