1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2748446bbSMel Gorman /* 3748446bbSMel Gorman * linux/mm/compaction.c 4748446bbSMel Gorman * 5748446bbSMel Gorman * Memory compaction for the reduction of external fragmentation. Note that 6748446bbSMel Gorman * this heavily depends upon page migration to do all the real heavy 7748446bbSMel Gorman * lifting 8748446bbSMel Gorman * 9748446bbSMel Gorman * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> 10748446bbSMel Gorman */ 11698b1b30SVlastimil Babka #include <linux/cpu.h> 12748446bbSMel Gorman #include <linux/swap.h> 13748446bbSMel Gorman #include <linux/migrate.h> 14748446bbSMel Gorman #include <linux/compaction.h> 15748446bbSMel Gorman #include <linux/mm_inline.h> 16174cd4b1SIngo Molnar #include <linux/sched/signal.h> 17748446bbSMel Gorman #include <linux/backing-dev.h> 1876ab0f53SMel Gorman #include <linux/sysctl.h> 19ed4a6d7fSMel Gorman #include <linux/sysfs.h> 20194159fbSMinchan Kim #include <linux/page-isolation.h> 21b8c73fc2SAndrey Ryabinin #include <linux/kasan.h> 22698b1b30SVlastimil Babka #include <linux/kthread.h> 23698b1b30SVlastimil Babka #include <linux/freezer.h> 2483358eceSJoonsoo Kim #include <linux/page_owner.h> 25eb414681SJohannes Weiner #include <linux/psi.h> 26748446bbSMel Gorman #include "internal.h" 27748446bbSMel Gorman 28010fc29aSMinchan Kim #ifdef CONFIG_COMPACTION 2931ca72faSCharan Teja Kalla /* 3031ca72faSCharan Teja Kalla * Fragmentation score check interval for proactive compaction purposes. 3131ca72faSCharan Teja Kalla */ 3231ca72faSCharan Teja Kalla #define HPAGE_FRAG_CHECK_INTERVAL_MSEC (500) 3331ca72faSCharan Teja Kalla 34010fc29aSMinchan Kim static inline void count_compact_event(enum vm_event_item item) 35010fc29aSMinchan Kim { 36010fc29aSMinchan Kim count_vm_event(item); 37010fc29aSMinchan Kim } 38010fc29aSMinchan Kim 39010fc29aSMinchan Kim static inline void count_compact_events(enum vm_event_item item, long delta) 40010fc29aSMinchan Kim { 41010fc29aSMinchan Kim count_vm_events(item, delta); 42010fc29aSMinchan Kim } 43010fc29aSMinchan Kim #else 44010fc29aSMinchan Kim #define count_compact_event(item) do { } while (0) 45010fc29aSMinchan Kim #define count_compact_events(item, delta) do { } while (0) 46010fc29aSMinchan Kim #endif 47010fc29aSMinchan Kim 48ff9543fdSMichal Nazarewicz #if defined CONFIG_COMPACTION || defined CONFIG_CMA 49ff9543fdSMichal Nazarewicz 50b7aba698SMel Gorman #define CREATE_TRACE_POINTS 51b7aba698SMel Gorman #include <trace/events/compaction.h> 52b7aba698SMel Gorman 5306b6640aSVlastimil Babka #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) 5406b6640aSVlastimil Babka #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) 5506b6640aSVlastimil Babka 56facdaa91SNitin Gupta /* 57facdaa91SNitin Gupta * Page order with-respect-to which proactive compaction 58facdaa91SNitin Gupta * calculates external fragmentation, which is used as 59facdaa91SNitin Gupta * the "fragmentation score" of a node/zone. 60facdaa91SNitin Gupta */ 61facdaa91SNitin Gupta #if defined CONFIG_TRANSPARENT_HUGEPAGE 62facdaa91SNitin Gupta #define COMPACTION_HPAGE_ORDER HPAGE_PMD_ORDER 6325788738SNitin Gupta #elif defined CONFIG_HUGETLBFS 64facdaa91SNitin Gupta #define COMPACTION_HPAGE_ORDER HUGETLB_PAGE_ORDER 65facdaa91SNitin Gupta #else 66facdaa91SNitin Gupta #define COMPACTION_HPAGE_ORDER (PMD_SHIFT - PAGE_SHIFT) 67facdaa91SNitin Gupta #endif 68facdaa91SNitin Gupta 69748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist) 70748446bbSMel Gorman { 71748446bbSMel Gorman struct page *page, *next; 726bace090SVlastimil Babka unsigned long high_pfn = 0; 73748446bbSMel Gorman 74748446bbSMel Gorman list_for_each_entry_safe(page, next, freelist, lru) { 756bace090SVlastimil Babka unsigned long pfn = page_to_pfn(page); 76748446bbSMel Gorman list_del(&page->lru); 77748446bbSMel Gorman __free_page(page); 786bace090SVlastimil Babka if (pfn > high_pfn) 796bace090SVlastimil Babka high_pfn = pfn; 80748446bbSMel Gorman } 81748446bbSMel Gorman 826bace090SVlastimil Babka return high_pfn; 83748446bbSMel Gorman } 84748446bbSMel Gorman 854469ab98SMel Gorman static void split_map_pages(struct list_head *list) 86ff9543fdSMichal Nazarewicz { 8766c64223SJoonsoo Kim unsigned int i, order, nr_pages; 8866c64223SJoonsoo Kim struct page *page, *next; 8966c64223SJoonsoo Kim LIST_HEAD(tmp_list); 90ff9543fdSMichal Nazarewicz 9166c64223SJoonsoo Kim list_for_each_entry_safe(page, next, list, lru) { 9266c64223SJoonsoo Kim list_del(&page->lru); 9366c64223SJoonsoo Kim 9466c64223SJoonsoo Kim order = page_private(page); 9566c64223SJoonsoo Kim nr_pages = 1 << order; 9666c64223SJoonsoo Kim 9746f24fd8SJoonsoo Kim post_alloc_hook(page, order, __GFP_MOVABLE); 9866c64223SJoonsoo Kim if (order) 9966c64223SJoonsoo Kim split_page(page, order); 10066c64223SJoonsoo Kim 10166c64223SJoonsoo Kim for (i = 0; i < nr_pages; i++) { 10266c64223SJoonsoo Kim list_add(&page->lru, &tmp_list); 10366c64223SJoonsoo Kim page++; 104ff9543fdSMichal Nazarewicz } 105ff9543fdSMichal Nazarewicz } 106ff9543fdSMichal Nazarewicz 10766c64223SJoonsoo Kim list_splice(&tmp_list, list); 10866c64223SJoonsoo Kim } 10966c64223SJoonsoo Kim 110bb13ffebSMel Gorman #ifdef CONFIG_COMPACTION 11168f2736aSMatthew Wilcox (Oracle) bool PageMovable(struct page *page) 112bda807d4SMinchan Kim { 11368f2736aSMatthew Wilcox (Oracle) const struct movable_operations *mops; 114bda807d4SMinchan Kim 115bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageLocked(page), page); 116bda807d4SMinchan Kim if (!__PageMovable(page)) 11768f2736aSMatthew Wilcox (Oracle) return false; 118bda807d4SMinchan Kim 11968f2736aSMatthew Wilcox (Oracle) mops = page_movable_ops(page); 12068f2736aSMatthew Wilcox (Oracle) if (mops) 12168f2736aSMatthew Wilcox (Oracle) return true; 122bda807d4SMinchan Kim 12368f2736aSMatthew Wilcox (Oracle) return false; 124bda807d4SMinchan Kim } 125bda807d4SMinchan Kim EXPORT_SYMBOL(PageMovable); 126bda807d4SMinchan Kim 12768f2736aSMatthew Wilcox (Oracle) void __SetPageMovable(struct page *page, const struct movable_operations *mops) 128bda807d4SMinchan Kim { 129bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageLocked(page), page); 13068f2736aSMatthew Wilcox (Oracle) VM_BUG_ON_PAGE((unsigned long)mops & PAGE_MAPPING_MOVABLE, page); 13168f2736aSMatthew Wilcox (Oracle) page->mapping = (void *)((unsigned long)mops | PAGE_MAPPING_MOVABLE); 132bda807d4SMinchan Kim } 133bda807d4SMinchan Kim EXPORT_SYMBOL(__SetPageMovable); 134bda807d4SMinchan Kim 135bda807d4SMinchan Kim void __ClearPageMovable(struct page *page) 136bda807d4SMinchan Kim { 137bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageMovable(page), page); 138bda807d4SMinchan Kim /* 13968f2736aSMatthew Wilcox (Oracle) * This page still has the type of a movable page, but it's 14068f2736aSMatthew Wilcox (Oracle) * actually not movable any more. 141bda807d4SMinchan Kim */ 14268f2736aSMatthew Wilcox (Oracle) page->mapping = (void *)PAGE_MAPPING_MOVABLE; 143bda807d4SMinchan Kim } 144bda807d4SMinchan Kim EXPORT_SYMBOL(__ClearPageMovable); 145bda807d4SMinchan Kim 14624e2716fSJoonsoo Kim /* Do not skip compaction more than 64 times */ 14724e2716fSJoonsoo Kim #define COMPACT_MAX_DEFER_SHIFT 6 14824e2716fSJoonsoo Kim 14924e2716fSJoonsoo Kim /* 15024e2716fSJoonsoo Kim * Compaction is deferred when compaction fails to result in a page 151860b3272SAlex Shi * allocation success. 1 << compact_defer_shift, compactions are skipped up 15224e2716fSJoonsoo Kim * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT 15324e2716fSJoonsoo Kim */ 1542271b016SHui Su static void defer_compaction(struct zone *zone, int order) 15524e2716fSJoonsoo Kim { 15624e2716fSJoonsoo Kim zone->compact_considered = 0; 15724e2716fSJoonsoo Kim zone->compact_defer_shift++; 15824e2716fSJoonsoo Kim 15924e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 16024e2716fSJoonsoo Kim zone->compact_order_failed = order; 16124e2716fSJoonsoo Kim 16224e2716fSJoonsoo Kim if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) 16324e2716fSJoonsoo Kim zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; 16424e2716fSJoonsoo Kim 16524e2716fSJoonsoo Kim trace_mm_compaction_defer_compaction(zone, order); 16624e2716fSJoonsoo Kim } 16724e2716fSJoonsoo Kim 16824e2716fSJoonsoo Kim /* Returns true if compaction should be skipped this time */ 1692271b016SHui Su static bool compaction_deferred(struct zone *zone, int order) 17024e2716fSJoonsoo Kim { 17124e2716fSJoonsoo Kim unsigned long defer_limit = 1UL << zone->compact_defer_shift; 17224e2716fSJoonsoo Kim 17324e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 17424e2716fSJoonsoo Kim return false; 17524e2716fSJoonsoo Kim 17624e2716fSJoonsoo Kim /* Avoid possible overflow */ 17762b35fe0SMateusz Nosek if (++zone->compact_considered >= defer_limit) { 17824e2716fSJoonsoo Kim zone->compact_considered = defer_limit; 17924e2716fSJoonsoo Kim return false; 18062b35fe0SMateusz Nosek } 18124e2716fSJoonsoo Kim 18224e2716fSJoonsoo Kim trace_mm_compaction_deferred(zone, order); 18324e2716fSJoonsoo Kim 18424e2716fSJoonsoo Kim return true; 18524e2716fSJoonsoo Kim } 18624e2716fSJoonsoo Kim 18724e2716fSJoonsoo Kim /* 18824e2716fSJoonsoo Kim * Update defer tracking counters after successful compaction of given order, 18924e2716fSJoonsoo Kim * which means an allocation either succeeded (alloc_success == true) or is 19024e2716fSJoonsoo Kim * expected to succeed. 19124e2716fSJoonsoo Kim */ 19224e2716fSJoonsoo Kim void compaction_defer_reset(struct zone *zone, int order, 19324e2716fSJoonsoo Kim bool alloc_success) 19424e2716fSJoonsoo Kim { 19524e2716fSJoonsoo Kim if (alloc_success) { 19624e2716fSJoonsoo Kim zone->compact_considered = 0; 19724e2716fSJoonsoo Kim zone->compact_defer_shift = 0; 19824e2716fSJoonsoo Kim } 19924e2716fSJoonsoo Kim if (order >= zone->compact_order_failed) 20024e2716fSJoonsoo Kim zone->compact_order_failed = order + 1; 20124e2716fSJoonsoo Kim 20224e2716fSJoonsoo Kim trace_mm_compaction_defer_reset(zone, order); 20324e2716fSJoonsoo Kim } 20424e2716fSJoonsoo Kim 20524e2716fSJoonsoo Kim /* Returns true if restarting compaction after many failures */ 2062271b016SHui Su static bool compaction_restarting(struct zone *zone, int order) 20724e2716fSJoonsoo Kim { 20824e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 20924e2716fSJoonsoo Kim return false; 21024e2716fSJoonsoo Kim 21124e2716fSJoonsoo Kim return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && 21224e2716fSJoonsoo Kim zone->compact_considered >= 1UL << zone->compact_defer_shift; 21324e2716fSJoonsoo Kim } 21424e2716fSJoonsoo Kim 215bb13ffebSMel Gorman /* Returns true if the pageblock should be scanned for pages to isolate. */ 216bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 217bb13ffebSMel Gorman struct page *page) 218bb13ffebSMel Gorman { 219bb13ffebSMel Gorman if (cc->ignore_skip_hint) 220bb13ffebSMel Gorman return true; 221bb13ffebSMel Gorman 222bb13ffebSMel Gorman return !get_pageblock_skip(page); 223bb13ffebSMel Gorman } 224bb13ffebSMel Gorman 22502333641SVlastimil Babka static void reset_cached_positions(struct zone *zone) 22602333641SVlastimil Babka { 22702333641SVlastimil Babka zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; 22802333641SVlastimil Babka zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; 229623446e4SJoonsoo Kim zone->compact_cached_free_pfn = 23006b6640aSVlastimil Babka pageblock_start_pfn(zone_end_pfn(zone) - 1); 23102333641SVlastimil Babka } 23202333641SVlastimil Babka 233bb13ffebSMel Gorman /* 2342271b016SHui Su * Compound pages of >= pageblock_order should consistently be skipped until 235b527cfe5SVlastimil Babka * released. It is always pointless to compact pages of such order (if they are 236b527cfe5SVlastimil Babka * migratable), and the pageblocks they occupy cannot contain any free pages. 23721dc7e02SDavid Rientjes */ 238b527cfe5SVlastimil Babka static bool pageblock_skip_persistent(struct page *page) 23921dc7e02SDavid Rientjes { 240b527cfe5SVlastimil Babka if (!PageCompound(page)) 24121dc7e02SDavid Rientjes return false; 242b527cfe5SVlastimil Babka 243b527cfe5SVlastimil Babka page = compound_head(page); 244b527cfe5SVlastimil Babka 245b527cfe5SVlastimil Babka if (compound_order(page) >= pageblock_order) 24621dc7e02SDavid Rientjes return true; 247b527cfe5SVlastimil Babka 248b527cfe5SVlastimil Babka return false; 24921dc7e02SDavid Rientjes } 25021dc7e02SDavid Rientjes 251e332f741SMel Gorman static bool 252e332f741SMel Gorman __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source, 253e332f741SMel Gorman bool check_target) 254e332f741SMel Gorman { 255e332f741SMel Gorman struct page *page = pfn_to_online_page(pfn); 2566b0868c8SMel Gorman struct page *block_page; 257e332f741SMel Gorman struct page *end_page; 258e332f741SMel Gorman unsigned long block_pfn; 259e332f741SMel Gorman 260e332f741SMel Gorman if (!page) 261e332f741SMel Gorman return false; 262e332f741SMel Gorman if (zone != page_zone(page)) 263e332f741SMel Gorman return false; 264e332f741SMel Gorman if (pageblock_skip_persistent(page)) 265e332f741SMel Gorman return false; 266e332f741SMel Gorman 267e332f741SMel Gorman /* 268e332f741SMel Gorman * If skip is already cleared do no further checking once the 269e332f741SMel Gorman * restart points have been set. 270e332f741SMel Gorman */ 271e332f741SMel Gorman if (check_source && check_target && !get_pageblock_skip(page)) 272e332f741SMel Gorman return true; 273e332f741SMel Gorman 274e332f741SMel Gorman /* 275e332f741SMel Gorman * If clearing skip for the target scanner, do not select a 276e332f741SMel Gorman * non-movable pageblock as the starting point. 277e332f741SMel Gorman */ 278e332f741SMel Gorman if (!check_source && check_target && 279e332f741SMel Gorman get_pageblock_migratetype(page) != MIGRATE_MOVABLE) 280e332f741SMel Gorman return false; 281e332f741SMel Gorman 2826b0868c8SMel Gorman /* Ensure the start of the pageblock or zone is online and valid */ 2836b0868c8SMel Gorman block_pfn = pageblock_start_pfn(pfn); 284a2e9a5afSVlastimil Babka block_pfn = max(block_pfn, zone->zone_start_pfn); 285a2e9a5afSVlastimil Babka block_page = pfn_to_online_page(block_pfn); 2866b0868c8SMel Gorman if (block_page) { 2876b0868c8SMel Gorman page = block_page; 2886b0868c8SMel Gorman pfn = block_pfn; 2896b0868c8SMel Gorman } 2906b0868c8SMel Gorman 2916b0868c8SMel Gorman /* Ensure the end of the pageblock or zone is online and valid */ 292a2e9a5afSVlastimil Babka block_pfn = pageblock_end_pfn(pfn) - 1; 2936b0868c8SMel Gorman block_pfn = min(block_pfn, zone_end_pfn(zone) - 1); 2946b0868c8SMel Gorman end_page = pfn_to_online_page(block_pfn); 2956b0868c8SMel Gorman if (!end_page) 2966b0868c8SMel Gorman return false; 2976b0868c8SMel Gorman 298e332f741SMel Gorman /* 299e332f741SMel Gorman * Only clear the hint if a sample indicates there is either a 300e332f741SMel Gorman * free page or an LRU page in the block. One or other condition 301e332f741SMel Gorman * is necessary for the block to be a migration source/target. 302e332f741SMel Gorman */ 303e332f741SMel Gorman do { 304e332f741SMel Gorman if (check_source && PageLRU(page)) { 305e332f741SMel Gorman clear_pageblock_skip(page); 306e332f741SMel Gorman return true; 307e332f741SMel Gorman } 308e332f741SMel Gorman 309e332f741SMel Gorman if (check_target && PageBuddy(page)) { 310e332f741SMel Gorman clear_pageblock_skip(page); 311e332f741SMel Gorman return true; 312e332f741SMel Gorman } 313e332f741SMel Gorman 314e332f741SMel Gorman page += (1 << PAGE_ALLOC_COSTLY_ORDER); 315a2e9a5afSVlastimil Babka } while (page <= end_page); 316e332f741SMel Gorman 317e332f741SMel Gorman return false; 318e332f741SMel Gorman } 319e332f741SMel Gorman 32021dc7e02SDavid Rientjes /* 321bb13ffebSMel Gorman * This function is called to clear all cached information on pageblocks that 322bb13ffebSMel Gorman * should be skipped for page isolation when the migrate and free page scanner 323bb13ffebSMel Gorman * meet. 324bb13ffebSMel Gorman */ 32562997027SMel Gorman static void __reset_isolation_suitable(struct zone *zone) 326bb13ffebSMel Gorman { 327e332f741SMel Gorman unsigned long migrate_pfn = zone->zone_start_pfn; 3286b0868c8SMel Gorman unsigned long free_pfn = zone_end_pfn(zone) - 1; 329e332f741SMel Gorman unsigned long reset_migrate = free_pfn; 330e332f741SMel Gorman unsigned long reset_free = migrate_pfn; 331e332f741SMel Gorman bool source_set = false; 332e332f741SMel Gorman bool free_set = false; 333e332f741SMel Gorman 334e332f741SMel Gorman if (!zone->compact_blockskip_flush) 335e332f741SMel Gorman return; 336bb13ffebSMel Gorman 33762997027SMel Gorman zone->compact_blockskip_flush = false; 338bb13ffebSMel Gorman 339e332f741SMel Gorman /* 340e332f741SMel Gorman * Walk the zone and update pageblock skip information. Source looks 341e332f741SMel Gorman * for PageLRU while target looks for PageBuddy. When the scanner 342e332f741SMel Gorman * is found, both PageBuddy and PageLRU are checked as the pageblock 343e332f741SMel Gorman * is suitable as both source and target. 344e332f741SMel Gorman */ 345e332f741SMel Gorman for (; migrate_pfn < free_pfn; migrate_pfn += pageblock_nr_pages, 346e332f741SMel Gorman free_pfn -= pageblock_nr_pages) { 347bb13ffebSMel Gorman cond_resched(); 348bb13ffebSMel Gorman 349e332f741SMel Gorman /* Update the migrate PFN */ 350e332f741SMel Gorman if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) && 351e332f741SMel Gorman migrate_pfn < reset_migrate) { 352e332f741SMel Gorman source_set = true; 353e332f741SMel Gorman reset_migrate = migrate_pfn; 354e332f741SMel Gorman zone->compact_init_migrate_pfn = reset_migrate; 355e332f741SMel Gorman zone->compact_cached_migrate_pfn[0] = reset_migrate; 356e332f741SMel Gorman zone->compact_cached_migrate_pfn[1] = reset_migrate; 357bb13ffebSMel Gorman } 35802333641SVlastimil Babka 359e332f741SMel Gorman /* Update the free PFN */ 360e332f741SMel Gorman if (__reset_isolation_pfn(zone, free_pfn, free_set, true) && 361e332f741SMel Gorman free_pfn > reset_free) { 362e332f741SMel Gorman free_set = true; 363e332f741SMel Gorman reset_free = free_pfn; 364e332f741SMel Gorman zone->compact_init_free_pfn = reset_free; 365e332f741SMel Gorman zone->compact_cached_free_pfn = reset_free; 366e332f741SMel Gorman } 367e332f741SMel Gorman } 368e332f741SMel Gorman 369e332f741SMel Gorman /* Leave no distance if no suitable block was reset */ 370e332f741SMel Gorman if (reset_migrate >= reset_free) { 371e332f741SMel Gorman zone->compact_cached_migrate_pfn[0] = migrate_pfn; 372e332f741SMel Gorman zone->compact_cached_migrate_pfn[1] = migrate_pfn; 373e332f741SMel Gorman zone->compact_cached_free_pfn = free_pfn; 374e332f741SMel Gorman } 375bb13ffebSMel Gorman } 376bb13ffebSMel Gorman 37762997027SMel Gorman void reset_isolation_suitable(pg_data_t *pgdat) 37862997027SMel Gorman { 37962997027SMel Gorman int zoneid; 38062997027SMel Gorman 38162997027SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 38262997027SMel Gorman struct zone *zone = &pgdat->node_zones[zoneid]; 38362997027SMel Gorman if (!populated_zone(zone)) 38462997027SMel Gorman continue; 38562997027SMel Gorman 38662997027SMel Gorman /* Only flush if a full compaction finished recently */ 38762997027SMel Gorman if (zone->compact_blockskip_flush) 38862997027SMel Gorman __reset_isolation_suitable(zone); 38962997027SMel Gorman } 39062997027SMel Gorman } 39162997027SMel Gorman 392bb13ffebSMel Gorman /* 393e380bebeSMel Gorman * Sets the pageblock skip bit if it was clear. Note that this is a hint as 394e380bebeSMel Gorman * locks are not required for read/writers. Returns true if it was already set. 395e380bebeSMel Gorman */ 396e380bebeSMel Gorman static bool test_and_set_skip(struct compact_control *cc, struct page *page, 397e380bebeSMel Gorman unsigned long pfn) 398e380bebeSMel Gorman { 399e380bebeSMel Gorman bool skip; 400e380bebeSMel Gorman 401e380bebeSMel Gorman /* Do no update if skip hint is being ignored */ 402e380bebeSMel Gorman if (cc->ignore_skip_hint) 403e380bebeSMel Gorman return false; 404e380bebeSMel Gorman 405*ee0913c4SKefeng Wang if (!pageblock_aligned(pfn)) 406e380bebeSMel Gorman return false; 407e380bebeSMel Gorman 408e380bebeSMel Gorman skip = get_pageblock_skip(page); 409e380bebeSMel Gorman if (!skip && !cc->no_set_skip_hint) 410e380bebeSMel Gorman set_pageblock_skip(page); 411e380bebeSMel Gorman 412e380bebeSMel Gorman return skip; 413e380bebeSMel Gorman } 414e380bebeSMel Gorman 415e380bebeSMel Gorman static void update_cached_migrate(struct compact_control *cc, unsigned long pfn) 416e380bebeSMel Gorman { 417e380bebeSMel Gorman struct zone *zone = cc->zone; 418e380bebeSMel Gorman 419e380bebeSMel Gorman pfn = pageblock_end_pfn(pfn); 420e380bebeSMel Gorman 421e380bebeSMel Gorman /* Set for isolation rather than compaction */ 422e380bebeSMel Gorman if (cc->no_set_skip_hint) 423e380bebeSMel Gorman return; 424e380bebeSMel Gorman 425e380bebeSMel Gorman if (pfn > zone->compact_cached_migrate_pfn[0]) 426e380bebeSMel Gorman zone->compact_cached_migrate_pfn[0] = pfn; 427e380bebeSMel Gorman if (cc->mode != MIGRATE_ASYNC && 428e380bebeSMel Gorman pfn > zone->compact_cached_migrate_pfn[1]) 429e380bebeSMel Gorman zone->compact_cached_migrate_pfn[1] = pfn; 430e380bebeSMel Gorman } 431e380bebeSMel Gorman 432e380bebeSMel Gorman /* 433bb13ffebSMel Gorman * If no pages were isolated then mark this pageblock to be skipped in the 43462997027SMel Gorman * future. The information is later cleared by __reset_isolation_suitable(). 435bb13ffebSMel Gorman */ 436c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 437d097a6f6SMel Gorman struct page *page, unsigned long pfn) 438bb13ffebSMel Gorman { 439c89511abSMel Gorman struct zone *zone = cc->zone; 4406815bf3fSJoonsoo Kim 4412583d671SVlastimil Babka if (cc->no_set_skip_hint) 4426815bf3fSJoonsoo Kim return; 4436815bf3fSJoonsoo Kim 444bb13ffebSMel Gorman if (!page) 445bb13ffebSMel Gorman return; 446bb13ffebSMel Gorman 447bb13ffebSMel Gorman set_pageblock_skip(page); 448c89511abSMel Gorman 44935979ef3SDavid Rientjes /* Update where async and sync compaction should restart */ 45035979ef3SDavid Rientjes if (pfn < zone->compact_cached_free_pfn) 451c89511abSMel Gorman zone->compact_cached_free_pfn = pfn; 452c89511abSMel Gorman } 453bb13ffebSMel Gorman #else 454bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 455bb13ffebSMel Gorman struct page *page) 456bb13ffebSMel Gorman { 457bb13ffebSMel Gorman return true; 458bb13ffebSMel Gorman } 459bb13ffebSMel Gorman 460b527cfe5SVlastimil Babka static inline bool pageblock_skip_persistent(struct page *page) 46121dc7e02SDavid Rientjes { 46221dc7e02SDavid Rientjes return false; 46321dc7e02SDavid Rientjes } 46421dc7e02SDavid Rientjes 46521dc7e02SDavid Rientjes static inline void update_pageblock_skip(struct compact_control *cc, 466d097a6f6SMel Gorman struct page *page, unsigned long pfn) 467bb13ffebSMel Gorman { 468bb13ffebSMel Gorman } 469e380bebeSMel Gorman 470e380bebeSMel Gorman static void update_cached_migrate(struct compact_control *cc, unsigned long pfn) 471e380bebeSMel Gorman { 472e380bebeSMel Gorman } 473e380bebeSMel Gorman 474e380bebeSMel Gorman static bool test_and_set_skip(struct compact_control *cc, struct page *page, 475e380bebeSMel Gorman unsigned long pfn) 476e380bebeSMel Gorman { 477e380bebeSMel Gorman return false; 478e380bebeSMel Gorman } 479bb13ffebSMel Gorman #endif /* CONFIG_COMPACTION */ 480bb13ffebSMel Gorman 4811f9efdefSVlastimil Babka /* 4828b44d279SVlastimil Babka * Compaction requires the taking of some coarse locks that are potentially 483cb2dcaf0SMel Gorman * very heavily contended. For async compaction, trylock and record if the 484cb2dcaf0SMel Gorman * lock is contended. The lock will still be acquired but compaction will 485cb2dcaf0SMel Gorman * abort when the current block is finished regardless of success rate. 486cb2dcaf0SMel Gorman * Sync compaction acquires the lock. 4878b44d279SVlastimil Babka * 488cb2dcaf0SMel Gorman * Always returns true which makes it easier to track lock state in callers. 4891f9efdefSVlastimil Babka */ 490cb2dcaf0SMel Gorman static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags, 4918b44d279SVlastimil Babka struct compact_control *cc) 49277337edeSJules Irenge __acquires(lock) 4938b44d279SVlastimil Babka { 494cb2dcaf0SMel Gorman /* Track if the lock is contended in async mode */ 495cb2dcaf0SMel Gorman if (cc->mode == MIGRATE_ASYNC && !cc->contended) { 496cb2dcaf0SMel Gorman if (spin_trylock_irqsave(lock, *flags)) 497cb2dcaf0SMel Gorman return true; 498cb2dcaf0SMel Gorman 499c3486f53SVlastimil Babka cc->contended = true; 5008b44d279SVlastimil Babka } 5011f9efdefSVlastimil Babka 502cb2dcaf0SMel Gorman spin_lock_irqsave(lock, *flags); 5038b44d279SVlastimil Babka return true; 5042a1402aaSMel Gorman } 5052a1402aaSMel Gorman 50685aa125fSMichal Nazarewicz /* 507c67fe375SMel Gorman * Compaction requires the taking of some coarse locks that are potentially 5088b44d279SVlastimil Babka * very heavily contended. The lock should be periodically unlocked to avoid 5098b44d279SVlastimil Babka * having disabled IRQs for a long time, even when there is nobody waiting on 5108b44d279SVlastimil Babka * the lock. It might also be that allowing the IRQs will result in 511d56c1584SMiaohe Lin * need_resched() becoming true. If scheduling is needed, compaction schedules. 5128b44d279SVlastimil Babka * Either compaction type will also abort if a fatal signal is pending. 5138b44d279SVlastimil Babka * In either case if the lock was locked, it is dropped and not regained. 514c67fe375SMel Gorman * 515d56c1584SMiaohe Lin * Returns true if compaction should abort due to fatal signal pending. 516d56c1584SMiaohe Lin * Returns false when compaction can continue. 517c67fe375SMel Gorman */ 5188b44d279SVlastimil Babka static bool compact_unlock_should_abort(spinlock_t *lock, 5198b44d279SVlastimil Babka unsigned long flags, bool *locked, struct compact_control *cc) 520c67fe375SMel Gorman { 5218b44d279SVlastimil Babka if (*locked) { 5228b44d279SVlastimil Babka spin_unlock_irqrestore(lock, flags); 5238b44d279SVlastimil Babka *locked = false; 524c67fe375SMel Gorman } 525c67fe375SMel Gorman 5268b44d279SVlastimil Babka if (fatal_signal_pending(current)) { 527c3486f53SVlastimil Babka cc->contended = true; 5288b44d279SVlastimil Babka return true; 5298b44d279SVlastimil Babka } 5308b44d279SVlastimil Babka 531cf66f070SMel Gorman cond_resched(); 532be976572SVlastimil Babka 533be976572SVlastimil Babka return false; 534be976572SVlastimil Babka } 535be976572SVlastimil Babka 536c67fe375SMel Gorman /* 5379e4be470SJerome Marchand * Isolate free pages onto a private freelist. If @strict is true, will abort 5389e4be470SJerome Marchand * returning 0 on any invalid PFNs or non-free pages inside of the pageblock 5399e4be470SJerome Marchand * (even though it may still end up isolating some pages). 54085aa125fSMichal Nazarewicz */ 541f40d1e42SMel Gorman static unsigned long isolate_freepages_block(struct compact_control *cc, 542e14c720eSVlastimil Babka unsigned long *start_pfn, 54385aa125fSMichal Nazarewicz unsigned long end_pfn, 54485aa125fSMichal Nazarewicz struct list_head *freelist, 5454fca9730SMel Gorman unsigned int stride, 54685aa125fSMichal Nazarewicz bool strict) 547748446bbSMel Gorman { 548b7aba698SMel Gorman int nr_scanned = 0, total_isolated = 0; 549d097a6f6SMel Gorman struct page *cursor; 550b8b2d825SXiubo Li unsigned long flags = 0; 551f40d1e42SMel Gorman bool locked = false; 552e14c720eSVlastimil Babka unsigned long blockpfn = *start_pfn; 55366c64223SJoonsoo Kim unsigned int order; 554748446bbSMel Gorman 5554fca9730SMel Gorman /* Strict mode is for isolation, speed is secondary */ 5564fca9730SMel Gorman if (strict) 5574fca9730SMel Gorman stride = 1; 5584fca9730SMel Gorman 559748446bbSMel Gorman cursor = pfn_to_page(blockpfn); 560748446bbSMel Gorman 561f40d1e42SMel Gorman /* Isolate free pages. */ 5624fca9730SMel Gorman for (; blockpfn < end_pfn; blockpfn += stride, cursor += stride) { 56366c64223SJoonsoo Kim int isolated; 564748446bbSMel Gorman struct page *page = cursor; 565748446bbSMel Gorman 5668b44d279SVlastimil Babka /* 5678b44d279SVlastimil Babka * Periodically drop the lock (if held) regardless of its 5688b44d279SVlastimil Babka * contention, to give chance to IRQs. Abort if fatal signal 569d56c1584SMiaohe Lin * pending. 5708b44d279SVlastimil Babka */ 571c036ddffSMiaohe Lin if (!(blockpfn % COMPACT_CLUSTER_MAX) 5728b44d279SVlastimil Babka && compact_unlock_should_abort(&cc->zone->lock, flags, 5738b44d279SVlastimil Babka &locked, cc)) 5748b44d279SVlastimil Babka break; 5758b44d279SVlastimil Babka 576b7aba698SMel Gorman nr_scanned++; 5772af120bcSLaura Abbott 5789fcd6d2eSVlastimil Babka /* 5799fcd6d2eSVlastimil Babka * For compound pages such as THP and hugetlbfs, we can save 5809fcd6d2eSVlastimil Babka * potentially a lot of iterations if we skip them at once. 5819fcd6d2eSVlastimil Babka * The check is racy, but we can consider only valid values 5829fcd6d2eSVlastimil Babka * and the only danger is skipping too much. 5839fcd6d2eSVlastimil Babka */ 5849fcd6d2eSVlastimil Babka if (PageCompound(page)) { 58521dc7e02SDavid Rientjes const unsigned int order = compound_order(page); 5869fcd6d2eSVlastimil Babka 587d3c85badSVlastimil Babka if (likely(order < MAX_ORDER)) { 58821dc7e02SDavid Rientjes blockpfn += (1UL << order) - 1; 58921dc7e02SDavid Rientjes cursor += (1UL << order) - 1; 5909fcd6d2eSVlastimil Babka } 5919fcd6d2eSVlastimil Babka goto isolate_fail; 5929fcd6d2eSVlastimil Babka } 5939fcd6d2eSVlastimil Babka 594f40d1e42SMel Gorman if (!PageBuddy(page)) 5952af120bcSLaura Abbott goto isolate_fail; 596f40d1e42SMel Gorman 59785f73e6dSMiaohe Lin /* If we already hold the lock, we can skip some rechecking. */ 59869b7189fSVlastimil Babka if (!locked) { 599cb2dcaf0SMel Gorman locked = compact_lock_irqsave(&cc->zone->lock, 6008b44d279SVlastimil Babka &flags, cc); 601f40d1e42SMel Gorman 602f40d1e42SMel Gorman /* Recheck this is a buddy page under lock */ 603f40d1e42SMel Gorman if (!PageBuddy(page)) 6042af120bcSLaura Abbott goto isolate_fail; 60569b7189fSVlastimil Babka } 606748446bbSMel Gorman 60766c64223SJoonsoo Kim /* Found a free page, will break it into order-0 pages */ 608ab130f91SMatthew Wilcox (Oracle) order = buddy_order(page); 60966c64223SJoonsoo Kim isolated = __isolate_free_page(page, order); 610a4f04f2cSDavid Rientjes if (!isolated) 611a4f04f2cSDavid Rientjes break; 61266c64223SJoonsoo Kim set_page_private(page, order); 613a4f04f2cSDavid Rientjes 614b717d6b9SWilliam Lam nr_scanned += isolated - 1; 615748446bbSMel Gorman total_isolated += isolated; 616a4f04f2cSDavid Rientjes cc->nr_freepages += isolated; 61766c64223SJoonsoo Kim list_add_tail(&page->lru, freelist); 61866c64223SJoonsoo Kim 619a4f04f2cSDavid Rientjes if (!strict && cc->nr_migratepages <= cc->nr_freepages) { 620932ff6bbSJoonsoo Kim blockpfn += isolated; 621932ff6bbSJoonsoo Kim break; 622932ff6bbSJoonsoo Kim } 623a4f04f2cSDavid Rientjes /* Advance to the end of split page */ 624748446bbSMel Gorman blockpfn += isolated - 1; 625748446bbSMel Gorman cursor += isolated - 1; 6262af120bcSLaura Abbott continue; 6272af120bcSLaura Abbott 6282af120bcSLaura Abbott isolate_fail: 6292af120bcSLaura Abbott if (strict) 6302af120bcSLaura Abbott break; 6312af120bcSLaura Abbott else 6322af120bcSLaura Abbott continue; 6332af120bcSLaura Abbott 634748446bbSMel Gorman } 635748446bbSMel Gorman 636a4f04f2cSDavid Rientjes if (locked) 637a4f04f2cSDavid Rientjes spin_unlock_irqrestore(&cc->zone->lock, flags); 638a4f04f2cSDavid Rientjes 6399fcd6d2eSVlastimil Babka /* 6409fcd6d2eSVlastimil Babka * There is a tiny chance that we have read bogus compound_order(), 6419fcd6d2eSVlastimil Babka * so be careful to not go outside of the pageblock. 6429fcd6d2eSVlastimil Babka */ 6439fcd6d2eSVlastimil Babka if (unlikely(blockpfn > end_pfn)) 6449fcd6d2eSVlastimil Babka blockpfn = end_pfn; 6459fcd6d2eSVlastimil Babka 646e34d85f0SJoonsoo Kim trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn, 647e34d85f0SJoonsoo Kim nr_scanned, total_isolated); 648e34d85f0SJoonsoo Kim 649e14c720eSVlastimil Babka /* Record how far we have got within the block */ 650e14c720eSVlastimil Babka *start_pfn = blockpfn; 651e14c720eSVlastimil Babka 652f40d1e42SMel Gorman /* 653f40d1e42SMel Gorman * If strict isolation is requested by CMA then check that all the 654f40d1e42SMel Gorman * pages requested were isolated. If there were any failures, 0 is 655f40d1e42SMel Gorman * returned and CMA will fail. 656f40d1e42SMel Gorman */ 6572af120bcSLaura Abbott if (strict && blockpfn < end_pfn) 658f40d1e42SMel Gorman total_isolated = 0; 659f40d1e42SMel Gorman 6607f354a54SDavid Rientjes cc->total_free_scanned += nr_scanned; 661397487dbSMel Gorman if (total_isolated) 662010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, total_isolated); 663748446bbSMel Gorman return total_isolated; 664748446bbSMel Gorman } 665748446bbSMel Gorman 66685aa125fSMichal Nazarewicz /** 66785aa125fSMichal Nazarewicz * isolate_freepages_range() - isolate free pages. 668e8b098fcSMike Rapoport * @cc: Compaction control structure. 66985aa125fSMichal Nazarewicz * @start_pfn: The first PFN to start isolating. 67085aa125fSMichal Nazarewicz * @end_pfn: The one-past-last PFN. 67185aa125fSMichal Nazarewicz * 67285aa125fSMichal Nazarewicz * Non-free pages, invalid PFNs, or zone boundaries within the 67385aa125fSMichal Nazarewicz * [start_pfn, end_pfn) range are considered errors, cause function to 67485aa125fSMichal Nazarewicz * undo its actions and return zero. 67585aa125fSMichal Nazarewicz * 67685aa125fSMichal Nazarewicz * Otherwise, function returns one-past-the-last PFN of isolated page 67785aa125fSMichal Nazarewicz * (which may be greater then end_pfn if end fell in a middle of 67885aa125fSMichal Nazarewicz * a free page). 67985aa125fSMichal Nazarewicz */ 680ff9543fdSMichal Nazarewicz unsigned long 681bb13ffebSMel Gorman isolate_freepages_range(struct compact_control *cc, 682bb13ffebSMel Gorman unsigned long start_pfn, unsigned long end_pfn) 68385aa125fSMichal Nazarewicz { 684e1409c32SJoonsoo Kim unsigned long isolated, pfn, block_start_pfn, block_end_pfn; 68585aa125fSMichal Nazarewicz LIST_HEAD(freelist); 68685aa125fSMichal Nazarewicz 6877d49d886SVlastimil Babka pfn = start_pfn; 68806b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(pfn); 689e1409c32SJoonsoo Kim if (block_start_pfn < cc->zone->zone_start_pfn) 690e1409c32SJoonsoo Kim block_start_pfn = cc->zone->zone_start_pfn; 69106b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(pfn); 6927d49d886SVlastimil Babka 6937d49d886SVlastimil Babka for (; pfn < end_pfn; pfn += isolated, 694e1409c32SJoonsoo Kim block_start_pfn = block_end_pfn, 6957d49d886SVlastimil Babka block_end_pfn += pageblock_nr_pages) { 696e14c720eSVlastimil Babka /* Protect pfn from changing by isolate_freepages_block */ 697e14c720eSVlastimil Babka unsigned long isolate_start_pfn = pfn; 6987d49d886SVlastimil Babka 69985aa125fSMichal Nazarewicz block_end_pfn = min(block_end_pfn, end_pfn); 70085aa125fSMichal Nazarewicz 70158420016SJoonsoo Kim /* 70258420016SJoonsoo Kim * pfn could pass the block_end_pfn if isolated freepage 70358420016SJoonsoo Kim * is more than pageblock order. In this case, we adjust 70458420016SJoonsoo Kim * scanning range to right one. 70558420016SJoonsoo Kim */ 70658420016SJoonsoo Kim if (pfn >= block_end_pfn) { 70706b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(pfn); 70806b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(pfn); 70958420016SJoonsoo Kim block_end_pfn = min(block_end_pfn, end_pfn); 71058420016SJoonsoo Kim } 71158420016SJoonsoo Kim 712e1409c32SJoonsoo Kim if (!pageblock_pfn_to_page(block_start_pfn, 713e1409c32SJoonsoo Kim block_end_pfn, cc->zone)) 7147d49d886SVlastimil Babka break; 7157d49d886SVlastimil Babka 716e14c720eSVlastimil Babka isolated = isolate_freepages_block(cc, &isolate_start_pfn, 7174fca9730SMel Gorman block_end_pfn, &freelist, 0, true); 71885aa125fSMichal Nazarewicz 71985aa125fSMichal Nazarewicz /* 72085aa125fSMichal Nazarewicz * In strict mode, isolate_freepages_block() returns 0 if 72185aa125fSMichal Nazarewicz * there are any holes in the block (ie. invalid PFNs or 72285aa125fSMichal Nazarewicz * non-free pages). 72385aa125fSMichal Nazarewicz */ 72485aa125fSMichal Nazarewicz if (!isolated) 72585aa125fSMichal Nazarewicz break; 72685aa125fSMichal Nazarewicz 72785aa125fSMichal Nazarewicz /* 72885aa125fSMichal Nazarewicz * If we managed to isolate pages, it is always (1 << n) * 72985aa125fSMichal Nazarewicz * pageblock_nr_pages for some non-negative n. (Max order 73085aa125fSMichal Nazarewicz * page may span two pageblocks). 73185aa125fSMichal Nazarewicz */ 73285aa125fSMichal Nazarewicz } 73385aa125fSMichal Nazarewicz 73466c64223SJoonsoo Kim /* __isolate_free_page() does not map the pages */ 7354469ab98SMel Gorman split_map_pages(&freelist); 73685aa125fSMichal Nazarewicz 73785aa125fSMichal Nazarewicz if (pfn < end_pfn) { 73885aa125fSMichal Nazarewicz /* Loop terminated early, cleanup. */ 73985aa125fSMichal Nazarewicz release_freepages(&freelist); 74085aa125fSMichal Nazarewicz return 0; 74185aa125fSMichal Nazarewicz } 74285aa125fSMichal Nazarewicz 74385aa125fSMichal Nazarewicz /* We don't use freelists for anything. */ 74485aa125fSMichal Nazarewicz return pfn; 74585aa125fSMichal Nazarewicz } 74685aa125fSMichal Nazarewicz 747748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */ 7485f438eeeSAndrey Ryabinin static bool too_many_isolated(pg_data_t *pgdat) 749748446bbSMel Gorman { 750d818fca1SMel Gorman bool too_many; 751d818fca1SMel Gorman 752bc693045SMinchan Kim unsigned long active, inactive, isolated; 753748446bbSMel Gorman 7545f438eeeSAndrey Ryabinin inactive = node_page_state(pgdat, NR_INACTIVE_FILE) + 7555f438eeeSAndrey Ryabinin node_page_state(pgdat, NR_INACTIVE_ANON); 7565f438eeeSAndrey Ryabinin active = node_page_state(pgdat, NR_ACTIVE_FILE) + 7575f438eeeSAndrey Ryabinin node_page_state(pgdat, NR_ACTIVE_ANON); 7585f438eeeSAndrey Ryabinin isolated = node_page_state(pgdat, NR_ISOLATED_FILE) + 7595f438eeeSAndrey Ryabinin node_page_state(pgdat, NR_ISOLATED_ANON); 760748446bbSMel Gorman 761d818fca1SMel Gorman too_many = isolated > (inactive + active) / 2; 762d818fca1SMel Gorman if (!too_many) 763d818fca1SMel Gorman wake_throttle_isolated(pgdat); 764d818fca1SMel Gorman 765d818fca1SMel Gorman return too_many; 766748446bbSMel Gorman } 767748446bbSMel Gorman 7682fe86e00SMichal Nazarewicz /** 769edc2ca61SVlastimil Babka * isolate_migratepages_block() - isolate all migrate-able pages within 770edc2ca61SVlastimil Babka * a single pageblock 7712fe86e00SMichal Nazarewicz * @cc: Compaction control structure. 772edc2ca61SVlastimil Babka * @low_pfn: The first PFN to isolate 773edc2ca61SVlastimil Babka * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock 77489f6c88aSHugh Dickins * @mode: Isolation mode to be used. 7752fe86e00SMichal Nazarewicz * 7762fe86e00SMichal Nazarewicz * Isolate all pages that can be migrated from the range specified by 777edc2ca61SVlastimil Babka * [low_pfn, end_pfn). The range is expected to be within same pageblock. 778c2ad7a1fSOscar Salvador * Returns errno, like -EAGAIN or -EINTR in case e.g signal pending or congestion, 779369fa227SOscar Salvador * -ENOMEM in case we could not allocate a page, or 0. 780c2ad7a1fSOscar Salvador * cc->migrate_pfn will contain the next pfn to scan. 7812fe86e00SMichal Nazarewicz * 782edc2ca61SVlastimil Babka * The pages are isolated on cc->migratepages list (not required to be empty), 783c2ad7a1fSOscar Salvador * and cc->nr_migratepages is updated accordingly. 784748446bbSMel Gorman */ 785c2ad7a1fSOscar Salvador static int 786edc2ca61SVlastimil Babka isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, 78789f6c88aSHugh Dickins unsigned long end_pfn, isolate_mode_t mode) 788748446bbSMel Gorman { 7895f438eeeSAndrey Ryabinin pg_data_t *pgdat = cc->zone->zone_pgdat; 790b7aba698SMel Gorman unsigned long nr_scanned = 0, nr_isolated = 0; 791fa9add64SHugh Dickins struct lruvec *lruvec; 792b8b2d825SXiubo Li unsigned long flags = 0; 7936168d0daSAlex Shi struct lruvec *locked = NULL; 794bb13ffebSMel Gorman struct page *page = NULL, *valid_page = NULL; 79589f6c88aSHugh Dickins struct address_space *mapping; 796e34d85f0SJoonsoo Kim unsigned long start_pfn = low_pfn; 797fdd048e1SVlastimil Babka bool skip_on_failure = false; 798fdd048e1SVlastimil Babka unsigned long next_skip_pfn = 0; 799e380bebeSMel Gorman bool skip_updated = false; 800c2ad7a1fSOscar Salvador int ret = 0; 801c2ad7a1fSOscar Salvador 802c2ad7a1fSOscar Salvador cc->migrate_pfn = low_pfn; 803748446bbSMel Gorman 804748446bbSMel Gorman /* 805748446bbSMel Gorman * Ensure that there are not too many pages isolated from the LRU 806748446bbSMel Gorman * list by either parallel reclaimers or compaction. If there are, 807748446bbSMel Gorman * delay for some time until fewer pages are isolated 808748446bbSMel Gorman */ 8095f438eeeSAndrey Ryabinin while (unlikely(too_many_isolated(pgdat))) { 810d20bdd57SZi Yan /* stop isolation if there are still pages not migrated */ 811d20bdd57SZi Yan if (cc->nr_migratepages) 812c2ad7a1fSOscar Salvador return -EAGAIN; 813d20bdd57SZi Yan 814f9e35b3bSMel Gorman /* async migration should just abort */ 815e0b9daebSDavid Rientjes if (cc->mode == MIGRATE_ASYNC) 816c2ad7a1fSOscar Salvador return -EAGAIN; 817f9e35b3bSMel Gorman 818c3f4a9a2SMel Gorman reclaim_throttle(pgdat, VMSCAN_THROTTLE_ISOLATED); 819748446bbSMel Gorman 820748446bbSMel Gorman if (fatal_signal_pending(current)) 821c2ad7a1fSOscar Salvador return -EINTR; 822748446bbSMel Gorman } 823748446bbSMel Gorman 824cf66f070SMel Gorman cond_resched(); 825aeef4b83SDavid Rientjes 826fdd048e1SVlastimil Babka if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) { 827fdd048e1SVlastimil Babka skip_on_failure = true; 828fdd048e1SVlastimil Babka next_skip_pfn = block_end_pfn(low_pfn, cc->order); 829fdd048e1SVlastimil Babka } 830fdd048e1SVlastimil Babka 831748446bbSMel Gorman /* Time to isolate some pages for migration */ 832748446bbSMel Gorman for (; low_pfn < end_pfn; low_pfn++) { 83329c0dde8SVlastimil Babka 834fdd048e1SVlastimil Babka if (skip_on_failure && low_pfn >= next_skip_pfn) { 835fdd048e1SVlastimil Babka /* 836fdd048e1SVlastimil Babka * We have isolated all migration candidates in the 837fdd048e1SVlastimil Babka * previous order-aligned block, and did not skip it due 838fdd048e1SVlastimil Babka * to failure. We should migrate the pages now and 839fdd048e1SVlastimil Babka * hopefully succeed compaction. 840fdd048e1SVlastimil Babka */ 841fdd048e1SVlastimil Babka if (nr_isolated) 842fdd048e1SVlastimil Babka break; 843fdd048e1SVlastimil Babka 844fdd048e1SVlastimil Babka /* 845fdd048e1SVlastimil Babka * We failed to isolate in the previous order-aligned 846fdd048e1SVlastimil Babka * block. Set the new boundary to the end of the 847fdd048e1SVlastimil Babka * current block. Note we can't simply increase 848fdd048e1SVlastimil Babka * next_skip_pfn by 1 << order, as low_pfn might have 849fdd048e1SVlastimil Babka * been incremented by a higher number due to skipping 850fdd048e1SVlastimil Babka * a compound or a high-order buddy page in the 851fdd048e1SVlastimil Babka * previous loop iteration. 852fdd048e1SVlastimil Babka */ 853fdd048e1SVlastimil Babka next_skip_pfn = block_end_pfn(low_pfn, cc->order); 854fdd048e1SVlastimil Babka } 855fdd048e1SVlastimil Babka 8568b44d279SVlastimil Babka /* 8578b44d279SVlastimil Babka * Periodically drop the lock (if held) regardless of its 858670105a2SMel Gorman * contention, to give chance to IRQs. Abort completely if 859670105a2SMel Gorman * a fatal signal is pending. 8608b44d279SVlastimil Babka */ 861c036ddffSMiaohe Lin if (!(low_pfn % COMPACT_CLUSTER_MAX)) { 8626168d0daSAlex Shi if (locked) { 8636168d0daSAlex Shi unlock_page_lruvec_irqrestore(locked, flags); 8646168d0daSAlex Shi locked = NULL; 8656168d0daSAlex Shi } 8666168d0daSAlex Shi 8676168d0daSAlex Shi if (fatal_signal_pending(current)) { 8686168d0daSAlex Shi cc->contended = true; 869c2ad7a1fSOscar Salvador ret = -EINTR; 8706168d0daSAlex Shi 871670105a2SMel Gorman goto fatal_pending; 872670105a2SMel Gorman } 873b2eef8c0SAndrea Arcangeli 8746168d0daSAlex Shi cond_resched(); 8756168d0daSAlex Shi } 8766168d0daSAlex Shi 877b7aba698SMel Gorman nr_scanned++; 878748446bbSMel Gorman 879748446bbSMel Gorman page = pfn_to_page(low_pfn); 880dc908600SMel Gorman 881e380bebeSMel Gorman /* 882e380bebeSMel Gorman * Check if the pageblock has already been marked skipped. 883e380bebeSMel Gorman * Only the aligned PFN is checked as the caller isolates 884e380bebeSMel Gorman * COMPACT_CLUSTER_MAX at a time so the second call must 885e380bebeSMel Gorman * not falsely conclude that the block should be skipped. 886e380bebeSMel Gorman */ 887*ee0913c4SKefeng Wang if (!valid_page && pageblock_aligned(low_pfn)) { 8884af12d04SMiaohe Lin if (!isolation_suitable(cc, page)) { 889e380bebeSMel Gorman low_pfn = end_pfn; 8909df41314SAlex Shi page = NULL; 891e380bebeSMel Gorman goto isolate_abort; 892e380bebeSMel Gorman } 893bb13ffebSMel Gorman valid_page = page; 894e380bebeSMel Gorman } 895bb13ffebSMel Gorman 896369fa227SOscar Salvador if (PageHuge(page) && cc->alloc_contig) { 897ae37c7ffSOscar Salvador ret = isolate_or_dissolve_huge_page(page, &cc->migratepages); 898369fa227SOscar Salvador 899369fa227SOscar Salvador /* 900369fa227SOscar Salvador * Fail isolation in case isolate_or_dissolve_huge_page() 901369fa227SOscar Salvador * reports an error. In case of -ENOMEM, abort right away. 902369fa227SOscar Salvador */ 903369fa227SOscar Salvador if (ret < 0) { 904369fa227SOscar Salvador /* Do not report -EBUSY down the chain */ 905369fa227SOscar Salvador if (ret == -EBUSY) 906369fa227SOscar Salvador ret = 0; 90766fe1cf7SMiaohe Lin low_pfn += compound_nr(page) - 1; 908369fa227SOscar Salvador goto isolate_fail; 909369fa227SOscar Salvador } 910369fa227SOscar Salvador 911ae37c7ffSOscar Salvador if (PageHuge(page)) { 912ae37c7ffSOscar Salvador /* 913ae37c7ffSOscar Salvador * Hugepage was successfully isolated and placed 914ae37c7ffSOscar Salvador * on the cc->migratepages list. 915ae37c7ffSOscar Salvador */ 916ae37c7ffSOscar Salvador low_pfn += compound_nr(page) - 1; 917ae37c7ffSOscar Salvador goto isolate_success_no_list; 918ae37c7ffSOscar Salvador } 919ae37c7ffSOscar Salvador 920369fa227SOscar Salvador /* 921369fa227SOscar Salvador * Ok, the hugepage was dissolved. Now these pages are 922369fa227SOscar Salvador * Buddy and cannot be re-allocated because they are 923369fa227SOscar Salvador * isolated. Fall-through as the check below handles 924369fa227SOscar Salvador * Buddy pages. 925369fa227SOscar Salvador */ 926369fa227SOscar Salvador } 927369fa227SOscar Salvador 928c122b208SJoonsoo Kim /* 92999c0fd5eSVlastimil Babka * Skip if free. We read page order here without zone lock 93099c0fd5eSVlastimil Babka * which is generally unsafe, but the race window is small and 93199c0fd5eSVlastimil Babka * the worst thing that can happen is that we skip some 93299c0fd5eSVlastimil Babka * potential isolation targets. 9336c14466cSMel Gorman */ 93499c0fd5eSVlastimil Babka if (PageBuddy(page)) { 935ab130f91SMatthew Wilcox (Oracle) unsigned long freepage_order = buddy_order_unsafe(page); 93699c0fd5eSVlastimil Babka 93799c0fd5eSVlastimil Babka /* 93899c0fd5eSVlastimil Babka * Without lock, we cannot be sure that what we got is 93999c0fd5eSVlastimil Babka * a valid page order. Consider only values in the 94099c0fd5eSVlastimil Babka * valid order range to prevent low_pfn overflow. 94199c0fd5eSVlastimil Babka */ 94299c0fd5eSVlastimil Babka if (freepage_order > 0 && freepage_order < MAX_ORDER) 94399c0fd5eSVlastimil Babka low_pfn += (1UL << freepage_order) - 1; 944748446bbSMel Gorman continue; 94599c0fd5eSVlastimil Babka } 946748446bbSMel Gorman 9479927af74SMel Gorman /* 94829c0dde8SVlastimil Babka * Regardless of being on LRU, compound pages such as THP and 9491da2f328SRik van Riel * hugetlbfs are not to be compacted unless we are attempting 9501da2f328SRik van Riel * an allocation much larger than the huge page size (eg CMA). 9511da2f328SRik van Riel * We can potentially save a lot of iterations if we skip them 9521da2f328SRik van Riel * at once. The check is racy, but we can consider only valid 9531da2f328SRik van Riel * values and the only danger is skipping too much. 954bc835011SAndrea Arcangeli */ 9551da2f328SRik van Riel if (PageCompound(page) && !cc->alloc_contig) { 95621dc7e02SDavid Rientjes const unsigned int order = compound_order(page); 95729c0dde8SVlastimil Babka 958d3c85badSVlastimil Babka if (likely(order < MAX_ORDER)) 95921dc7e02SDavid Rientjes low_pfn += (1UL << order) - 1; 960fdd048e1SVlastimil Babka goto isolate_fail; 9612a1402aaSMel Gorman } 9622a1402aaSMel Gorman 963bda807d4SMinchan Kim /* 964bda807d4SMinchan Kim * Check may be lockless but that's ok as we recheck later. 965bda807d4SMinchan Kim * It's possible to migrate LRU and non-lru movable pages. 966bda807d4SMinchan Kim * Skip any other type of page 967bda807d4SMinchan Kim */ 968bda807d4SMinchan Kim if (!PageLRU(page)) { 969bda807d4SMinchan Kim /* 970bda807d4SMinchan Kim * __PageMovable can return false positive so we need 971bda807d4SMinchan Kim * to verify it under page_lock. 972bda807d4SMinchan Kim */ 973bda807d4SMinchan Kim if (unlikely(__PageMovable(page)) && 974bda807d4SMinchan Kim !PageIsolated(page)) { 975bda807d4SMinchan Kim if (locked) { 9766168d0daSAlex Shi unlock_page_lruvec_irqrestore(locked, flags); 9776168d0daSAlex Shi locked = NULL; 978bda807d4SMinchan Kim } 979bda807d4SMinchan Kim 98089f6c88aSHugh Dickins if (!isolate_movable_page(page, mode)) 981bda807d4SMinchan Kim goto isolate_success; 982bda807d4SMinchan Kim } 983bda807d4SMinchan Kim 984fdd048e1SVlastimil Babka goto isolate_fail; 985bda807d4SMinchan Kim } 98629c0dde8SVlastimil Babka 987119d6d59SDavid Rientjes /* 988119d6d59SDavid Rientjes * Migration will fail if an anonymous page is pinned in memory, 989119d6d59SDavid Rientjes * so avoid taking lru_lock and isolating it unnecessarily in an 990119d6d59SDavid Rientjes * admittedly racy check. 991119d6d59SDavid Rientjes */ 99289f6c88aSHugh Dickins mapping = page_mapping(page); 99389f6c88aSHugh Dickins if (!mapping && page_count(page) > page_mapcount(page)) 994fdd048e1SVlastimil Babka goto isolate_fail; 995119d6d59SDavid Rientjes 99673e64c51SMichal Hocko /* 99773e64c51SMichal Hocko * Only allow to migrate anonymous pages in GFP_NOFS context 99873e64c51SMichal Hocko * because those do not depend on fs locks. 99973e64c51SMichal Hocko */ 100089f6c88aSHugh Dickins if (!(cc->gfp_mask & __GFP_FS) && mapping) 100173e64c51SMichal Hocko goto isolate_fail; 100273e64c51SMichal Hocko 10039df41314SAlex Shi /* 10049df41314SAlex Shi * Be careful not to clear PageLRU until after we're 10059df41314SAlex Shi * sure the page is not being freed elsewhere -- the 10069df41314SAlex Shi * page release code relies on it. 10079df41314SAlex Shi */ 10089df41314SAlex Shi if (unlikely(!get_page_unless_zero(page))) 10099df41314SAlex Shi goto isolate_fail; 10109df41314SAlex Shi 101189f6c88aSHugh Dickins /* Only take pages on LRU: a check now makes later tests safe */ 101289f6c88aSHugh Dickins if (!PageLRU(page)) 10139df41314SAlex Shi goto isolate_fail_put; 10149df41314SAlex Shi 101589f6c88aSHugh Dickins /* Compaction might skip unevictable pages but CMA takes them */ 101689f6c88aSHugh Dickins if (!(mode & ISOLATE_UNEVICTABLE) && PageUnevictable(page)) 101789f6c88aSHugh Dickins goto isolate_fail_put; 101889f6c88aSHugh Dickins 101989f6c88aSHugh Dickins /* 102089f6c88aSHugh Dickins * To minimise LRU disruption, the caller can indicate with 102189f6c88aSHugh Dickins * ISOLATE_ASYNC_MIGRATE that it only wants to isolate pages 102289f6c88aSHugh Dickins * it will be able to migrate without blocking - clean pages 102389f6c88aSHugh Dickins * for the most part. PageWriteback would require blocking. 102489f6c88aSHugh Dickins */ 102589f6c88aSHugh Dickins if ((mode & ISOLATE_ASYNC_MIGRATE) && PageWriteback(page)) 102689f6c88aSHugh Dickins goto isolate_fail_put; 102789f6c88aSHugh Dickins 102889f6c88aSHugh Dickins if ((mode & ISOLATE_ASYNC_MIGRATE) && PageDirty(page)) { 102989f6c88aSHugh Dickins bool migrate_dirty; 103089f6c88aSHugh Dickins 103189f6c88aSHugh Dickins /* 103289f6c88aSHugh Dickins * Only pages without mappings or that have a 10339d0ddc0cSMatthew Wilcox (Oracle) * ->migrate_folio callback are possible to migrate 103489f6c88aSHugh Dickins * without blocking. However, we can be racing with 103589f6c88aSHugh Dickins * truncation so it's necessary to lock the page 103689f6c88aSHugh Dickins * to stabilise the mapping as truncation holds 103789f6c88aSHugh Dickins * the page lock until after the page is removed 103889f6c88aSHugh Dickins * from the page cache. 103989f6c88aSHugh Dickins */ 104089f6c88aSHugh Dickins if (!trylock_page(page)) 104189f6c88aSHugh Dickins goto isolate_fail_put; 104289f6c88aSHugh Dickins 104389f6c88aSHugh Dickins mapping = page_mapping(page); 10445490da4fSMatthew Wilcox (Oracle) migrate_dirty = !mapping || 10459d0ddc0cSMatthew Wilcox (Oracle) mapping->a_ops->migrate_folio; 104689f6c88aSHugh Dickins unlock_page(page); 104789f6c88aSHugh Dickins if (!migrate_dirty) 104889f6c88aSHugh Dickins goto isolate_fail_put; 104989f6c88aSHugh Dickins } 105089f6c88aSHugh Dickins 10519df41314SAlex Shi /* Try isolate the page */ 10529df41314SAlex Shi if (!TestClearPageLRU(page)) 10539df41314SAlex Shi goto isolate_fail_put; 10549df41314SAlex Shi 1055b1baabd9SMatthew Wilcox (Oracle) lruvec = folio_lruvec(page_folio(page)); 10566168d0daSAlex Shi 105769b7189fSVlastimil Babka /* If we already hold the lock, we can skip some rechecking */ 10586168d0daSAlex Shi if (lruvec != locked) { 10596168d0daSAlex Shi if (locked) 10606168d0daSAlex Shi unlock_page_lruvec_irqrestore(locked, flags); 10616168d0daSAlex Shi 10626168d0daSAlex Shi compact_lock_irqsave(&lruvec->lru_lock, &flags, cc); 10636168d0daSAlex Shi locked = lruvec; 10646168d0daSAlex Shi 1065e809c3feSMatthew Wilcox (Oracle) lruvec_memcg_debug(lruvec, page_folio(page)); 1066e380bebeSMel Gorman 1067e380bebeSMel Gorman /* Try get exclusive access under lock */ 1068e380bebeSMel Gorman if (!skip_updated) { 1069e380bebeSMel Gorman skip_updated = true; 1070e380bebeSMel Gorman if (test_and_set_skip(cc, page, low_pfn)) 1071e380bebeSMel Gorman goto isolate_abort; 1072e380bebeSMel Gorman } 10732a1402aaSMel Gorman 107429c0dde8SVlastimil Babka /* 107529c0dde8SVlastimil Babka * Page become compound since the non-locked check, 107629c0dde8SVlastimil Babka * and it's on LRU. It can only be a THP so the order 107729c0dde8SVlastimil Babka * is safe to read and it's 0 for tail pages. 107829c0dde8SVlastimil Babka */ 10791da2f328SRik van Riel if (unlikely(PageCompound(page) && !cc->alloc_contig)) { 1080d8c6546bSMatthew Wilcox (Oracle) low_pfn += compound_nr(page) - 1; 10819df41314SAlex Shi SetPageLRU(page); 10829df41314SAlex Shi goto isolate_fail_put; 1083bc835011SAndrea Arcangeli } 1084d99fd5feSAlex Shi } 1085fa9add64SHugh Dickins 10861da2f328SRik van Riel /* The whole page is taken off the LRU; skip the tail pages. */ 10871da2f328SRik van Riel if (PageCompound(page)) 10881da2f328SRik van Riel low_pfn += compound_nr(page) - 1; 1089bc835011SAndrea Arcangeli 1090748446bbSMel Gorman /* Successfully isolated */ 109146ae6b2cSYu Zhao del_page_from_lru_list(page, lruvec); 10921da2f328SRik van Riel mod_node_page_state(page_pgdat(page), 10939de4f22aSHuang Ying NR_ISOLATED_ANON + page_is_file_lru(page), 10946c357848SMatthew Wilcox (Oracle) thp_nr_pages(page)); 1095b6c75016SJoonsoo Kim 1096b6c75016SJoonsoo Kim isolate_success: 1097fdd048e1SVlastimil Babka list_add(&page->lru, &cc->migratepages); 1098ae37c7ffSOscar Salvador isolate_success_no_list: 109938935861SZi Yan cc->nr_migratepages += compound_nr(page); 110038935861SZi Yan nr_isolated += compound_nr(page); 1101b717d6b9SWilliam Lam nr_scanned += compound_nr(page) - 1; 1102748446bbSMel Gorman 1103804d3121SMel Gorman /* 1104804d3121SMel Gorman * Avoid isolating too much unless this block is being 1105cb2dcaf0SMel Gorman * rescanned (e.g. dirty/writeback pages, parallel allocation) 1106cb2dcaf0SMel Gorman * or a lock is contended. For contention, isolate quickly to 1107cb2dcaf0SMel Gorman * potentially remove one source of contention. 1108804d3121SMel Gorman */ 110938935861SZi Yan if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX && 1110cb2dcaf0SMel Gorman !cc->rescan && !cc->contended) { 111131b8384aSHillf Danton ++low_pfn; 1112748446bbSMel Gorman break; 1113748446bbSMel Gorman } 1114fdd048e1SVlastimil Babka 1115fdd048e1SVlastimil Babka continue; 11169df41314SAlex Shi 11179df41314SAlex Shi isolate_fail_put: 11189df41314SAlex Shi /* Avoid potential deadlock in freeing page under lru_lock */ 11199df41314SAlex Shi if (locked) { 11206168d0daSAlex Shi unlock_page_lruvec_irqrestore(locked, flags); 11216168d0daSAlex Shi locked = NULL; 11229df41314SAlex Shi } 11239df41314SAlex Shi put_page(page); 11249df41314SAlex Shi 1125fdd048e1SVlastimil Babka isolate_fail: 1126369fa227SOscar Salvador if (!skip_on_failure && ret != -ENOMEM) 1127fdd048e1SVlastimil Babka continue; 1128fdd048e1SVlastimil Babka 1129fdd048e1SVlastimil Babka /* 1130fdd048e1SVlastimil Babka * We have isolated some pages, but then failed. Release them 1131fdd048e1SVlastimil Babka * instead of migrating, as we cannot form the cc->order buddy 1132fdd048e1SVlastimil Babka * page anyway. 1133fdd048e1SVlastimil Babka */ 1134fdd048e1SVlastimil Babka if (nr_isolated) { 1135fdd048e1SVlastimil Babka if (locked) { 11366168d0daSAlex Shi unlock_page_lruvec_irqrestore(locked, flags); 11376168d0daSAlex Shi locked = NULL; 1138fdd048e1SVlastimil Babka } 1139fdd048e1SVlastimil Babka putback_movable_pages(&cc->migratepages); 1140fdd048e1SVlastimil Babka cc->nr_migratepages = 0; 1141fdd048e1SVlastimil Babka nr_isolated = 0; 1142fdd048e1SVlastimil Babka } 1143fdd048e1SVlastimil Babka 1144fdd048e1SVlastimil Babka if (low_pfn < next_skip_pfn) { 1145fdd048e1SVlastimil Babka low_pfn = next_skip_pfn - 1; 1146fdd048e1SVlastimil Babka /* 1147fdd048e1SVlastimil Babka * The check near the loop beginning would have updated 1148fdd048e1SVlastimil Babka * next_skip_pfn too, but this is a bit simpler. 1149fdd048e1SVlastimil Babka */ 1150fdd048e1SVlastimil Babka next_skip_pfn += 1UL << cc->order; 1151fdd048e1SVlastimil Babka } 1152369fa227SOscar Salvador 1153369fa227SOscar Salvador if (ret == -ENOMEM) 1154369fa227SOscar Salvador break; 115531b8384aSHillf Danton } 1156748446bbSMel Gorman 115799c0fd5eSVlastimil Babka /* 115899c0fd5eSVlastimil Babka * The PageBuddy() check could have potentially brought us outside 115999c0fd5eSVlastimil Babka * the range to be scanned. 116099c0fd5eSVlastimil Babka */ 116199c0fd5eSVlastimil Babka if (unlikely(low_pfn > end_pfn)) 116299c0fd5eSVlastimil Babka low_pfn = end_pfn; 116399c0fd5eSVlastimil Babka 11649df41314SAlex Shi page = NULL; 11659df41314SAlex Shi 1166e380bebeSMel Gorman isolate_abort: 1167c67fe375SMel Gorman if (locked) 11686168d0daSAlex Shi unlock_page_lruvec_irqrestore(locked, flags); 11699df41314SAlex Shi if (page) { 11709df41314SAlex Shi SetPageLRU(page); 11719df41314SAlex Shi put_page(page); 11729df41314SAlex Shi } 1173748446bbSMel Gorman 117450b5b094SVlastimil Babka /* 1175804d3121SMel Gorman * Updated the cached scanner pfn once the pageblock has been scanned 1176804d3121SMel Gorman * Pages will either be migrated in which case there is no point 1177804d3121SMel Gorman * scanning in the near future or migration failed in which case the 1178804d3121SMel Gorman * failure reason may persist. The block is marked for skipping if 1179804d3121SMel Gorman * there were no pages isolated in the block or if the block is 1180804d3121SMel Gorman * rescanned twice in a row. 118150b5b094SVlastimil Babka */ 1182804d3121SMel Gorman if (low_pfn == end_pfn && (!nr_isolated || cc->rescan)) { 1183e380bebeSMel Gorman if (valid_page && !skip_updated) 1184e380bebeSMel Gorman set_pageblock_skip(valid_page); 1185e380bebeSMel Gorman update_cached_migrate(cc, low_pfn); 1186e380bebeSMel Gorman } 1187bb13ffebSMel Gorman 1188e34d85f0SJoonsoo Kim trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn, 1189e34d85f0SJoonsoo Kim nr_scanned, nr_isolated); 1190b7aba698SMel Gorman 1191670105a2SMel Gorman fatal_pending: 11927f354a54SDavid Rientjes cc->total_migrate_scanned += nr_scanned; 1193397487dbSMel Gorman if (nr_isolated) 1194010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, nr_isolated); 1195397487dbSMel Gorman 1196c2ad7a1fSOscar Salvador cc->migrate_pfn = low_pfn; 1197c2ad7a1fSOscar Salvador 1198c2ad7a1fSOscar Salvador return ret; 11992fe86e00SMichal Nazarewicz } 12002fe86e00SMichal Nazarewicz 1201edc2ca61SVlastimil Babka /** 1202edc2ca61SVlastimil Babka * isolate_migratepages_range() - isolate migrate-able pages in a PFN range 1203edc2ca61SVlastimil Babka * @cc: Compaction control structure. 1204edc2ca61SVlastimil Babka * @start_pfn: The first PFN to start isolating. 1205edc2ca61SVlastimil Babka * @end_pfn: The one-past-last PFN. 1206edc2ca61SVlastimil Babka * 1207369fa227SOscar Salvador * Returns -EAGAIN when contented, -EINTR in case of a signal pending, -ENOMEM 1208369fa227SOscar Salvador * in case we could not allocate a page, or 0. 1209edc2ca61SVlastimil Babka */ 1210c2ad7a1fSOscar Salvador int 1211edc2ca61SVlastimil Babka isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, 1212edc2ca61SVlastimil Babka unsigned long end_pfn) 1213edc2ca61SVlastimil Babka { 1214e1409c32SJoonsoo Kim unsigned long pfn, block_start_pfn, block_end_pfn; 1215c2ad7a1fSOscar Salvador int ret = 0; 1216edc2ca61SVlastimil Babka 1217edc2ca61SVlastimil Babka /* Scan block by block. First and last block may be incomplete */ 1218edc2ca61SVlastimil Babka pfn = start_pfn; 121906b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(pfn); 1220e1409c32SJoonsoo Kim if (block_start_pfn < cc->zone->zone_start_pfn) 1221e1409c32SJoonsoo Kim block_start_pfn = cc->zone->zone_start_pfn; 122206b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(pfn); 1223edc2ca61SVlastimil Babka 1224edc2ca61SVlastimil Babka for (; pfn < end_pfn; pfn = block_end_pfn, 1225e1409c32SJoonsoo Kim block_start_pfn = block_end_pfn, 1226edc2ca61SVlastimil Babka block_end_pfn += pageblock_nr_pages) { 1227edc2ca61SVlastimil Babka 1228edc2ca61SVlastimil Babka block_end_pfn = min(block_end_pfn, end_pfn); 1229edc2ca61SVlastimil Babka 1230e1409c32SJoonsoo Kim if (!pageblock_pfn_to_page(block_start_pfn, 1231e1409c32SJoonsoo Kim block_end_pfn, cc->zone)) 1232edc2ca61SVlastimil Babka continue; 1233edc2ca61SVlastimil Babka 1234c2ad7a1fSOscar Salvador ret = isolate_migratepages_block(cc, pfn, block_end_pfn, 1235edc2ca61SVlastimil Babka ISOLATE_UNEVICTABLE); 1236edc2ca61SVlastimil Babka 1237c2ad7a1fSOscar Salvador if (ret) 1238edc2ca61SVlastimil Babka break; 12396ea41c0cSJoonsoo Kim 124038935861SZi Yan if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX) 12416ea41c0cSJoonsoo Kim break; 1242edc2ca61SVlastimil Babka } 1243edc2ca61SVlastimil Babka 1244c2ad7a1fSOscar Salvador return ret; 1245edc2ca61SVlastimil Babka } 1246edc2ca61SVlastimil Babka 1247ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION || CONFIG_CMA */ 1248ff9543fdSMichal Nazarewicz #ifdef CONFIG_COMPACTION 1249018e9a49SAndrew Morton 1250b682debdSVlastimil Babka static bool suitable_migration_source(struct compact_control *cc, 1251b682debdSVlastimil Babka struct page *page) 1252b682debdSVlastimil Babka { 1253282722b0SVlastimil Babka int block_mt; 1254282722b0SVlastimil Babka 12559bebefd5SMel Gorman if (pageblock_skip_persistent(page)) 12569bebefd5SMel Gorman return false; 12579bebefd5SMel Gorman 1258282722b0SVlastimil Babka if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction) 1259b682debdSVlastimil Babka return true; 1260b682debdSVlastimil Babka 1261282722b0SVlastimil Babka block_mt = get_pageblock_migratetype(page); 1262282722b0SVlastimil Babka 1263282722b0SVlastimil Babka if (cc->migratetype == MIGRATE_MOVABLE) 1264282722b0SVlastimil Babka return is_migrate_movable(block_mt); 1265282722b0SVlastimil Babka else 1266282722b0SVlastimil Babka return block_mt == cc->migratetype; 1267b682debdSVlastimil Babka } 1268b682debdSVlastimil Babka 1269018e9a49SAndrew Morton /* Returns true if the page is within a block suitable for migration to */ 12709f7e3387SVlastimil Babka static bool suitable_migration_target(struct compact_control *cc, 12719f7e3387SVlastimil Babka struct page *page) 1272018e9a49SAndrew Morton { 1273018e9a49SAndrew Morton /* If the page is a large free page, then disallow migration */ 1274018e9a49SAndrew Morton if (PageBuddy(page)) { 1275018e9a49SAndrew Morton /* 1276018e9a49SAndrew Morton * We are checking page_order without zone->lock taken. But 1277018e9a49SAndrew Morton * the only small danger is that we skip a potentially suitable 1278018e9a49SAndrew Morton * pageblock, so it's not worth to check order for valid range. 1279018e9a49SAndrew Morton */ 1280ab130f91SMatthew Wilcox (Oracle) if (buddy_order_unsafe(page) >= pageblock_order) 1281018e9a49SAndrew Morton return false; 1282018e9a49SAndrew Morton } 1283018e9a49SAndrew Morton 12841ef36db2SYisheng Xie if (cc->ignore_block_suitable) 12851ef36db2SYisheng Xie return true; 12861ef36db2SYisheng Xie 1287018e9a49SAndrew Morton /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 1288b682debdSVlastimil Babka if (is_migrate_movable(get_pageblock_migratetype(page))) 1289018e9a49SAndrew Morton return true; 1290018e9a49SAndrew Morton 1291018e9a49SAndrew Morton /* Otherwise skip the block */ 1292018e9a49SAndrew Morton return false; 1293018e9a49SAndrew Morton } 1294018e9a49SAndrew Morton 129570b44595SMel Gorman static inline unsigned int 129670b44595SMel Gorman freelist_scan_limit(struct compact_control *cc) 129770b44595SMel Gorman { 1298dd7ef7bdSQian Cai unsigned short shift = BITS_PER_LONG - 1; 1299dd7ef7bdSQian Cai 1300dd7ef7bdSQian Cai return (COMPACT_CLUSTER_MAX >> min(shift, cc->fast_search_fail)) + 1; 130170b44595SMel Gorman } 130270b44595SMel Gorman 1303ff9543fdSMichal Nazarewicz /* 1304f2849aa0SVlastimil Babka * Test whether the free scanner has reached the same or lower pageblock than 1305f2849aa0SVlastimil Babka * the migration scanner, and compaction should thus terminate. 1306f2849aa0SVlastimil Babka */ 1307f2849aa0SVlastimil Babka static inline bool compact_scanners_met(struct compact_control *cc) 1308f2849aa0SVlastimil Babka { 1309f2849aa0SVlastimil Babka return (cc->free_pfn >> pageblock_order) 1310f2849aa0SVlastimil Babka <= (cc->migrate_pfn >> pageblock_order); 1311f2849aa0SVlastimil Babka } 1312f2849aa0SVlastimil Babka 13135a811889SMel Gorman /* 13145a811889SMel Gorman * Used when scanning for a suitable migration target which scans freelists 13155a811889SMel Gorman * in reverse. Reorders the list such as the unscanned pages are scanned 13165a811889SMel Gorman * first on the next iteration of the free scanner 13175a811889SMel Gorman */ 13185a811889SMel Gorman static void 13195a811889SMel Gorman move_freelist_head(struct list_head *freelist, struct page *freepage) 13205a811889SMel Gorman { 13215a811889SMel Gorman LIST_HEAD(sublist); 13225a811889SMel Gorman 13235a811889SMel Gorman if (!list_is_last(freelist, &freepage->lru)) { 13245a811889SMel Gorman list_cut_before(&sublist, freelist, &freepage->lru); 13255a811889SMel Gorman list_splice_tail(&sublist, freelist); 13265a811889SMel Gorman } 13275a811889SMel Gorman } 13285a811889SMel Gorman 13295a811889SMel Gorman /* 13305a811889SMel Gorman * Similar to move_freelist_head except used by the migration scanner 13315a811889SMel Gorman * when scanning forward. It's possible for these list operations to 13325a811889SMel Gorman * move against each other if they search the free list exactly in 13335a811889SMel Gorman * lockstep. 13345a811889SMel Gorman */ 133570b44595SMel Gorman static void 133670b44595SMel Gorman move_freelist_tail(struct list_head *freelist, struct page *freepage) 133770b44595SMel Gorman { 133870b44595SMel Gorman LIST_HEAD(sublist); 133970b44595SMel Gorman 134070b44595SMel Gorman if (!list_is_first(freelist, &freepage->lru)) { 134170b44595SMel Gorman list_cut_position(&sublist, freelist, &freepage->lru); 134270b44595SMel Gorman list_splice_tail(&sublist, freelist); 134370b44595SMel Gorman } 134470b44595SMel Gorman } 134570b44595SMel Gorman 13465a811889SMel Gorman static void 13475a811889SMel Gorman fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long nr_isolated) 13485a811889SMel Gorman { 13495a811889SMel Gorman unsigned long start_pfn, end_pfn; 13506e2b7044SVlastimil Babka struct page *page; 13515a811889SMel Gorman 13525a811889SMel Gorman /* Do not search around if there are enough pages already */ 13535a811889SMel Gorman if (cc->nr_freepages >= cc->nr_migratepages) 13545a811889SMel Gorman return; 13555a811889SMel Gorman 13565a811889SMel Gorman /* Minimise scanning during async compaction */ 13575a811889SMel Gorman if (cc->direct_compaction && cc->mode == MIGRATE_ASYNC) 13585a811889SMel Gorman return; 13595a811889SMel Gorman 13605a811889SMel Gorman /* Pageblock boundaries */ 13616e2b7044SVlastimil Babka start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn); 13626e2b7044SVlastimil Babka end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)); 13636e2b7044SVlastimil Babka 13646e2b7044SVlastimil Babka page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone); 13656e2b7044SVlastimil Babka if (!page) 13666e2b7044SVlastimil Babka return; 13675a811889SMel Gorman 13685a811889SMel Gorman /* Scan before */ 13695a811889SMel Gorman if (start_pfn != pfn) { 13704fca9730SMel Gorman isolate_freepages_block(cc, &start_pfn, pfn, &cc->freepages, 1, false); 13715a811889SMel Gorman if (cc->nr_freepages >= cc->nr_migratepages) 13725a811889SMel Gorman return; 13735a811889SMel Gorman } 13745a811889SMel Gorman 13755a811889SMel Gorman /* Scan after */ 13765a811889SMel Gorman start_pfn = pfn + nr_isolated; 137760fce36aSMel Gorman if (start_pfn < end_pfn) 13784fca9730SMel Gorman isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false); 13795a811889SMel Gorman 13805a811889SMel Gorman /* Skip this pageblock in the future as it's full or nearly full */ 13815a811889SMel Gorman if (cc->nr_freepages < cc->nr_migratepages) 13825a811889SMel Gorman set_pageblock_skip(page); 13835a811889SMel Gorman } 13845a811889SMel Gorman 1385dbe2d4e4SMel Gorman /* Search orders in round-robin fashion */ 1386dbe2d4e4SMel Gorman static int next_search_order(struct compact_control *cc, int order) 1387dbe2d4e4SMel Gorman { 1388dbe2d4e4SMel Gorman order--; 1389dbe2d4e4SMel Gorman if (order < 0) 1390dbe2d4e4SMel Gorman order = cc->order - 1; 1391dbe2d4e4SMel Gorman 1392dbe2d4e4SMel Gorman /* Search wrapped around? */ 1393dbe2d4e4SMel Gorman if (order == cc->search_order) { 1394dbe2d4e4SMel Gorman cc->search_order--; 1395dbe2d4e4SMel Gorman if (cc->search_order < 0) 1396dbe2d4e4SMel Gorman cc->search_order = cc->order - 1; 1397dbe2d4e4SMel Gorman return -1; 1398dbe2d4e4SMel Gorman } 1399dbe2d4e4SMel Gorman 1400dbe2d4e4SMel Gorman return order; 1401dbe2d4e4SMel Gorman } 1402dbe2d4e4SMel Gorman 14035a811889SMel Gorman static unsigned long 14045a811889SMel Gorman fast_isolate_freepages(struct compact_control *cc) 14055a811889SMel Gorman { 1406b55ca526SWonhyuk Yang unsigned int limit = max(1U, freelist_scan_limit(cc) >> 1); 14075a811889SMel Gorman unsigned int nr_scanned = 0; 140874e21484SRokudo Yan unsigned long low_pfn, min_pfn, highest = 0; 14095a811889SMel Gorman unsigned long nr_isolated = 0; 14105a811889SMel Gorman unsigned long distance; 14115a811889SMel Gorman struct page *page = NULL; 14125a811889SMel Gorman bool scan_start = false; 14135a811889SMel Gorman int order; 14145a811889SMel Gorman 14155a811889SMel Gorman /* Full compaction passes in a negative order */ 14165a811889SMel Gorman if (cc->order <= 0) 14175a811889SMel Gorman return cc->free_pfn; 14185a811889SMel Gorman 14195a811889SMel Gorman /* 14205a811889SMel Gorman * If starting the scan, use a deeper search and use the highest 14215a811889SMel Gorman * PFN found if a suitable one is not found. 14225a811889SMel Gorman */ 1423e332f741SMel Gorman if (cc->free_pfn >= cc->zone->compact_init_free_pfn) { 14245a811889SMel Gorman limit = pageblock_nr_pages >> 1; 14255a811889SMel Gorman scan_start = true; 14265a811889SMel Gorman } 14275a811889SMel Gorman 14285a811889SMel Gorman /* 14295a811889SMel Gorman * Preferred point is in the top quarter of the scan space but take 14305a811889SMel Gorman * a pfn from the top half if the search is problematic. 14315a811889SMel Gorman */ 14325a811889SMel Gorman distance = (cc->free_pfn - cc->migrate_pfn); 14335a811889SMel Gorman low_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 2)); 14345a811889SMel Gorman min_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 1)); 14355a811889SMel Gorman 14365a811889SMel Gorman if (WARN_ON_ONCE(min_pfn > low_pfn)) 14375a811889SMel Gorman low_pfn = min_pfn; 14385a811889SMel Gorman 1439dbe2d4e4SMel Gorman /* 1440dbe2d4e4SMel Gorman * Search starts from the last successful isolation order or the next 1441dbe2d4e4SMel Gorman * order to search after a previous failure 1442dbe2d4e4SMel Gorman */ 1443dbe2d4e4SMel Gorman cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order); 1444dbe2d4e4SMel Gorman 1445dbe2d4e4SMel Gorman for (order = cc->search_order; 1446dbe2d4e4SMel Gorman !page && order >= 0; 1447dbe2d4e4SMel Gorman order = next_search_order(cc, order)) { 14485a811889SMel Gorman struct free_area *area = &cc->zone->free_area[order]; 14495a811889SMel Gorman struct list_head *freelist; 14505a811889SMel Gorman struct page *freepage; 14515a811889SMel Gorman unsigned long flags; 14525a811889SMel Gorman unsigned int order_scanned = 0; 145374e21484SRokudo Yan unsigned long high_pfn = 0; 14545a811889SMel Gorman 14555a811889SMel Gorman if (!area->nr_free) 14565a811889SMel Gorman continue; 14575a811889SMel Gorman 14585a811889SMel Gorman spin_lock_irqsave(&cc->zone->lock, flags); 14595a811889SMel Gorman freelist = &area->free_list[MIGRATE_MOVABLE]; 14605a811889SMel Gorman list_for_each_entry_reverse(freepage, freelist, lru) { 14615a811889SMel Gorman unsigned long pfn; 14625a811889SMel Gorman 14635a811889SMel Gorman order_scanned++; 14645a811889SMel Gorman nr_scanned++; 14655a811889SMel Gorman pfn = page_to_pfn(freepage); 14665a811889SMel Gorman 14675a811889SMel Gorman if (pfn >= highest) 14686e2b7044SVlastimil Babka highest = max(pageblock_start_pfn(pfn), 14696e2b7044SVlastimil Babka cc->zone->zone_start_pfn); 14705a811889SMel Gorman 14715a811889SMel Gorman if (pfn >= low_pfn) { 14725a811889SMel Gorman cc->fast_search_fail = 0; 1473dbe2d4e4SMel Gorman cc->search_order = order; 14745a811889SMel Gorman page = freepage; 14755a811889SMel Gorman break; 14765a811889SMel Gorman } 14775a811889SMel Gorman 14785a811889SMel Gorman if (pfn >= min_pfn && pfn > high_pfn) { 14795a811889SMel Gorman high_pfn = pfn; 14805a811889SMel Gorman 14815a811889SMel Gorman /* Shorten the scan if a candidate is found */ 14825a811889SMel Gorman limit >>= 1; 14835a811889SMel Gorman } 14845a811889SMel Gorman 14855a811889SMel Gorman if (order_scanned >= limit) 14865a811889SMel Gorman break; 14875a811889SMel Gorman } 14885a811889SMel Gorman 14895a811889SMel Gorman /* Use a minimum pfn if a preferred one was not found */ 14905a811889SMel Gorman if (!page && high_pfn) { 14915a811889SMel Gorman page = pfn_to_page(high_pfn); 14925a811889SMel Gorman 14935a811889SMel Gorman /* Update freepage for the list reorder below */ 14945a811889SMel Gorman freepage = page; 14955a811889SMel Gorman } 14965a811889SMel Gorman 14975a811889SMel Gorman /* Reorder to so a future search skips recent pages */ 14985a811889SMel Gorman move_freelist_head(freelist, freepage); 14995a811889SMel Gorman 15005a811889SMel Gorman /* Isolate the page if available */ 15015a811889SMel Gorman if (page) { 15025a811889SMel Gorman if (__isolate_free_page(page, order)) { 15035a811889SMel Gorman set_page_private(page, order); 15045a811889SMel Gorman nr_isolated = 1 << order; 1505b717d6b9SWilliam Lam nr_scanned += nr_isolated - 1; 15065a811889SMel Gorman cc->nr_freepages += nr_isolated; 15075a811889SMel Gorman list_add_tail(&page->lru, &cc->freepages); 15085a811889SMel Gorman count_compact_events(COMPACTISOLATED, nr_isolated); 15095a811889SMel Gorman } else { 15105a811889SMel Gorman /* If isolation fails, abort the search */ 15115b56d996SQian Cai order = cc->search_order + 1; 15125a811889SMel Gorman page = NULL; 15135a811889SMel Gorman } 15145a811889SMel Gorman } 15155a811889SMel Gorman 15165a811889SMel Gorman spin_unlock_irqrestore(&cc->zone->lock, flags); 15175a811889SMel Gorman 15185a811889SMel Gorman /* 1519b55ca526SWonhyuk Yang * Smaller scan on next order so the total scan is related 15205a811889SMel Gorman * to freelist_scan_limit. 15215a811889SMel Gorman */ 15225a811889SMel Gorman if (order_scanned >= limit) 1523b55ca526SWonhyuk Yang limit = max(1U, limit >> 1); 15245a811889SMel Gorman } 15255a811889SMel Gorman 15265a811889SMel Gorman if (!page) { 15275a811889SMel Gorman cc->fast_search_fail++; 15285a811889SMel Gorman if (scan_start) { 15295a811889SMel Gorman /* 15305a811889SMel Gorman * Use the highest PFN found above min. If one was 1531f3867755SEthon Paul * not found, be pessimistic for direct compaction 15325a811889SMel Gorman * and use the min mark. 15335a811889SMel Gorman */ 1534ca2864e5SMiaohe Lin if (highest >= min_pfn) { 15355a811889SMel Gorman page = pfn_to_page(highest); 15365a811889SMel Gorman cc->free_pfn = highest; 15375a811889SMel Gorman } else { 1538e577c8b6SSuzuki K Poulose if (cc->direct_compaction && pfn_valid(min_pfn)) { 153973a6e474SBaoquan He page = pageblock_pfn_to_page(min_pfn, 15406e2b7044SVlastimil Babka min(pageblock_end_pfn(min_pfn), 15416e2b7044SVlastimil Babka zone_end_pfn(cc->zone)), 154273a6e474SBaoquan He cc->zone); 15435a811889SMel Gorman cc->free_pfn = min_pfn; 15445a811889SMel Gorman } 15455a811889SMel Gorman } 15465a811889SMel Gorman } 15475a811889SMel Gorman } 15485a811889SMel Gorman 1549d097a6f6SMel Gorman if (highest && highest >= cc->zone->compact_cached_free_pfn) { 1550d097a6f6SMel Gorman highest -= pageblock_nr_pages; 15515a811889SMel Gorman cc->zone->compact_cached_free_pfn = highest; 1552d097a6f6SMel Gorman } 15535a811889SMel Gorman 15545a811889SMel Gorman cc->total_free_scanned += nr_scanned; 15555a811889SMel Gorman if (!page) 15565a811889SMel Gorman return cc->free_pfn; 15575a811889SMel Gorman 15585a811889SMel Gorman low_pfn = page_to_pfn(page); 15595a811889SMel Gorman fast_isolate_around(cc, low_pfn, nr_isolated); 15605a811889SMel Gorman return low_pfn; 15615a811889SMel Gorman } 15625a811889SMel Gorman 1563f2849aa0SVlastimil Babka /* 1564ff9543fdSMichal Nazarewicz * Based on information in the current compact_control, find blocks 1565ff9543fdSMichal Nazarewicz * suitable for isolating free pages from and then isolate them. 1566ff9543fdSMichal Nazarewicz */ 1567edc2ca61SVlastimil Babka static void isolate_freepages(struct compact_control *cc) 1568ff9543fdSMichal Nazarewicz { 1569edc2ca61SVlastimil Babka struct zone *zone = cc->zone; 1570ff9543fdSMichal Nazarewicz struct page *page; 1571c96b9e50SVlastimil Babka unsigned long block_start_pfn; /* start of current pageblock */ 1572e14c720eSVlastimil Babka unsigned long isolate_start_pfn; /* exact pfn we start at */ 1573c96b9e50SVlastimil Babka unsigned long block_end_pfn; /* end of current pageblock */ 1574c96b9e50SVlastimil Babka unsigned long low_pfn; /* lowest pfn scanner is able to scan */ 1575ff9543fdSMichal Nazarewicz struct list_head *freelist = &cc->freepages; 15764fca9730SMel Gorman unsigned int stride; 15772fe86e00SMichal Nazarewicz 15785a811889SMel Gorman /* Try a small search of the free lists for a candidate */ 157900bc102fSMiaohe Lin fast_isolate_freepages(cc); 15805a811889SMel Gorman if (cc->nr_freepages) 15815a811889SMel Gorman goto splitmap; 15825a811889SMel Gorman 1583ff9543fdSMichal Nazarewicz /* 1584ff9543fdSMichal Nazarewicz * Initialise the free scanner. The starting point is where we last 158549e068f0SVlastimil Babka * successfully isolated from, zone-cached value, or the end of the 1586e14c720eSVlastimil Babka * zone when isolating for the first time. For looping we also need 1587e14c720eSVlastimil Babka * this pfn aligned down to the pageblock boundary, because we do 1588c96b9e50SVlastimil Babka * block_start_pfn -= pageblock_nr_pages in the for loop. 1589c96b9e50SVlastimil Babka * For ending point, take care when isolating in last pageblock of a 1590a1c1dbebSRandy Dunlap * zone which ends in the middle of a pageblock. 159149e068f0SVlastimil Babka * The low boundary is the end of the pageblock the migration scanner 159249e068f0SVlastimil Babka * is using. 1593ff9543fdSMichal Nazarewicz */ 1594e14c720eSVlastimil Babka isolate_start_pfn = cc->free_pfn; 15955a811889SMel Gorman block_start_pfn = pageblock_start_pfn(isolate_start_pfn); 1596c96b9e50SVlastimil Babka block_end_pfn = min(block_start_pfn + pageblock_nr_pages, 1597c96b9e50SVlastimil Babka zone_end_pfn(zone)); 159806b6640aSVlastimil Babka low_pfn = pageblock_end_pfn(cc->migrate_pfn); 15994fca9730SMel Gorman stride = cc->mode == MIGRATE_ASYNC ? COMPACT_CLUSTER_MAX : 1; 16002fe86e00SMichal Nazarewicz 1601ff9543fdSMichal Nazarewicz /* 1602ff9543fdSMichal Nazarewicz * Isolate free pages until enough are available to migrate the 1603ff9543fdSMichal Nazarewicz * pages on cc->migratepages. We stop searching if the migrate 1604ff9543fdSMichal Nazarewicz * and free page scanners meet or enough free pages are isolated. 1605ff9543fdSMichal Nazarewicz */ 1606f5f61a32SVlastimil Babka for (; block_start_pfn >= low_pfn; 1607c96b9e50SVlastimil Babka block_end_pfn = block_start_pfn, 1608e14c720eSVlastimil Babka block_start_pfn -= pageblock_nr_pages, 1609e14c720eSVlastimil Babka isolate_start_pfn = block_start_pfn) { 16104fca9730SMel Gorman unsigned long nr_isolated; 16114fca9730SMel Gorman 1612f6ea3adbSDavid Rientjes /* 1613f6ea3adbSDavid Rientjes * This can iterate a massively long zone without finding any 1614cb810ad2SMel Gorman * suitable migration targets, so periodically check resched. 1615f6ea3adbSDavid Rientjes */ 1616c036ddffSMiaohe Lin if (!(block_start_pfn % (COMPACT_CLUSTER_MAX * pageblock_nr_pages))) 1617cf66f070SMel Gorman cond_resched(); 1618f6ea3adbSDavid Rientjes 16197d49d886SVlastimil Babka page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, 16207d49d886SVlastimil Babka zone); 16217d49d886SVlastimil Babka if (!page) 1622ff9543fdSMichal Nazarewicz continue; 1623ff9543fdSMichal Nazarewicz 1624ff9543fdSMichal Nazarewicz /* Check the block is suitable for migration */ 16259f7e3387SVlastimil Babka if (!suitable_migration_target(cc, page)) 1626ff9543fdSMichal Nazarewicz continue; 162768e3e926SLinus Torvalds 1628bb13ffebSMel Gorman /* If isolation recently failed, do not retry */ 1629bb13ffebSMel Gorman if (!isolation_suitable(cc, page)) 1630bb13ffebSMel Gorman continue; 1631bb13ffebSMel Gorman 1632e14c720eSVlastimil Babka /* Found a block suitable for isolating free pages from. */ 16334fca9730SMel Gorman nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn, 16344fca9730SMel Gorman block_end_pfn, freelist, stride, false); 1635ff9543fdSMichal Nazarewicz 1636d097a6f6SMel Gorman /* Update the skip hint if the full pageblock was scanned */ 1637d097a6f6SMel Gorman if (isolate_start_pfn == block_end_pfn) 1638d097a6f6SMel Gorman update_pageblock_skip(cc, page, block_start_pfn); 1639d097a6f6SMel Gorman 1640cb2dcaf0SMel Gorman /* Are enough freepages isolated? */ 1641cb2dcaf0SMel Gorman if (cc->nr_freepages >= cc->nr_migratepages) { 1642a46cbf3bSDavid Rientjes if (isolate_start_pfn >= block_end_pfn) { 1643a46cbf3bSDavid Rientjes /* 1644a46cbf3bSDavid Rientjes * Restart at previous pageblock if more 1645a46cbf3bSDavid Rientjes * freepages can be isolated next time. 1646a46cbf3bSDavid Rientjes */ 1647f5f61a32SVlastimil Babka isolate_start_pfn = 1648e14c720eSVlastimil Babka block_start_pfn - pageblock_nr_pages; 1649a46cbf3bSDavid Rientjes } 1650be976572SVlastimil Babka break; 1651a46cbf3bSDavid Rientjes } else if (isolate_start_pfn < block_end_pfn) { 1652f5f61a32SVlastimil Babka /* 1653a46cbf3bSDavid Rientjes * If isolation failed early, do not continue 1654a46cbf3bSDavid Rientjes * needlessly. 1655f5f61a32SVlastimil Babka */ 1656a46cbf3bSDavid Rientjes break; 1657f5f61a32SVlastimil Babka } 16584fca9730SMel Gorman 16594fca9730SMel Gorman /* Adjust stride depending on isolation */ 16604fca9730SMel Gorman if (nr_isolated) { 16614fca9730SMel Gorman stride = 1; 16624fca9730SMel Gorman continue; 16634fca9730SMel Gorman } 16644fca9730SMel Gorman stride = min_t(unsigned int, COMPACT_CLUSTER_MAX, stride << 1); 1665c89511abSMel Gorman } 1666ff9543fdSMichal Nazarewicz 16677ed695e0SVlastimil Babka /* 1668f5f61a32SVlastimil Babka * Record where the free scanner will restart next time. Either we 1669f5f61a32SVlastimil Babka * broke from the loop and set isolate_start_pfn based on the last 1670f5f61a32SVlastimil Babka * call to isolate_freepages_block(), or we met the migration scanner 1671f5f61a32SVlastimil Babka * and the loop terminated due to isolate_start_pfn < low_pfn 16727ed695e0SVlastimil Babka */ 1673f5f61a32SVlastimil Babka cc->free_pfn = isolate_start_pfn; 16745a811889SMel Gorman 16755a811889SMel Gorman splitmap: 16765a811889SMel Gorman /* __isolate_free_page() does not map the pages */ 16775a811889SMel Gorman split_map_pages(freelist); 1678748446bbSMel Gorman } 1679748446bbSMel Gorman 1680748446bbSMel Gorman /* 1681748446bbSMel Gorman * This is a migrate-callback that "allocates" freepages by taking pages 1682748446bbSMel Gorman * from the isolated freelists in the block we are migrating to. 1683748446bbSMel Gorman */ 1684748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage, 1685666feb21SMichal Hocko unsigned long data) 1686748446bbSMel Gorman { 1687748446bbSMel Gorman struct compact_control *cc = (struct compact_control *)data; 1688748446bbSMel Gorman struct page *freepage; 1689748446bbSMel Gorman 1690748446bbSMel Gorman if (list_empty(&cc->freepages)) { 1691edc2ca61SVlastimil Babka isolate_freepages(cc); 1692748446bbSMel Gorman 1693748446bbSMel Gorman if (list_empty(&cc->freepages)) 1694748446bbSMel Gorman return NULL; 1695748446bbSMel Gorman } 1696748446bbSMel Gorman 1697748446bbSMel Gorman freepage = list_entry(cc->freepages.next, struct page, lru); 1698748446bbSMel Gorman list_del(&freepage->lru); 1699748446bbSMel Gorman cc->nr_freepages--; 1700748446bbSMel Gorman 1701748446bbSMel Gorman return freepage; 1702748446bbSMel Gorman } 1703748446bbSMel Gorman 1704748446bbSMel Gorman /* 1705d53aea3dSDavid Rientjes * This is a migrate-callback that "frees" freepages back to the isolated 1706d53aea3dSDavid Rientjes * freelist. All pages on the freelist are from the same zone, so there is no 1707d53aea3dSDavid Rientjes * special handling needed for NUMA. 1708d53aea3dSDavid Rientjes */ 1709d53aea3dSDavid Rientjes static void compaction_free(struct page *page, unsigned long data) 1710d53aea3dSDavid Rientjes { 1711d53aea3dSDavid Rientjes struct compact_control *cc = (struct compact_control *)data; 1712d53aea3dSDavid Rientjes 1713d53aea3dSDavid Rientjes list_add(&page->lru, &cc->freepages); 1714d53aea3dSDavid Rientjes cc->nr_freepages++; 1715d53aea3dSDavid Rientjes } 1716d53aea3dSDavid Rientjes 1717ff9543fdSMichal Nazarewicz /* possible outcome of isolate_migratepages */ 1718ff9543fdSMichal Nazarewicz typedef enum { 1719ff9543fdSMichal Nazarewicz ISOLATE_ABORT, /* Abort compaction now */ 1720ff9543fdSMichal Nazarewicz ISOLATE_NONE, /* No pages isolated, continue scanning */ 1721ff9543fdSMichal Nazarewicz ISOLATE_SUCCESS, /* Pages isolated, migrate */ 1722ff9543fdSMichal Nazarewicz } isolate_migrate_t; 1723ff9543fdSMichal Nazarewicz 1724ff9543fdSMichal Nazarewicz /* 17255bbe3547SEric B Munson * Allow userspace to control policy on scanning the unevictable LRU for 17265bbe3547SEric B Munson * compactable pages. 17275bbe3547SEric B Munson */ 17286923aa0dSSebastian Andrzej Siewior #ifdef CONFIG_PREEMPT_RT 17296923aa0dSSebastian Andrzej Siewior int sysctl_compact_unevictable_allowed __read_mostly = 0; 17306923aa0dSSebastian Andrzej Siewior #else 17315bbe3547SEric B Munson int sysctl_compact_unevictable_allowed __read_mostly = 1; 17326923aa0dSSebastian Andrzej Siewior #endif 17335bbe3547SEric B Munson 173470b44595SMel Gorman static inline void 173570b44595SMel Gorman update_fast_start_pfn(struct compact_control *cc, unsigned long pfn) 173670b44595SMel Gorman { 173770b44595SMel Gorman if (cc->fast_start_pfn == ULONG_MAX) 173870b44595SMel Gorman return; 173970b44595SMel Gorman 174070b44595SMel Gorman if (!cc->fast_start_pfn) 174170b44595SMel Gorman cc->fast_start_pfn = pfn; 174270b44595SMel Gorman 174370b44595SMel Gorman cc->fast_start_pfn = min(cc->fast_start_pfn, pfn); 174470b44595SMel Gorman } 174570b44595SMel Gorman 174670b44595SMel Gorman static inline unsigned long 174770b44595SMel Gorman reinit_migrate_pfn(struct compact_control *cc) 174870b44595SMel Gorman { 174970b44595SMel Gorman if (!cc->fast_start_pfn || cc->fast_start_pfn == ULONG_MAX) 175070b44595SMel Gorman return cc->migrate_pfn; 175170b44595SMel Gorman 175270b44595SMel Gorman cc->migrate_pfn = cc->fast_start_pfn; 175370b44595SMel Gorman cc->fast_start_pfn = ULONG_MAX; 175470b44595SMel Gorman 175570b44595SMel Gorman return cc->migrate_pfn; 175670b44595SMel Gorman } 175770b44595SMel Gorman 175870b44595SMel Gorman /* 175970b44595SMel Gorman * Briefly search the free lists for a migration source that already has 176070b44595SMel Gorman * some free pages to reduce the number of pages that need migration 176170b44595SMel Gorman * before a pageblock is free. 176270b44595SMel Gorman */ 176370b44595SMel Gorman static unsigned long fast_find_migrateblock(struct compact_control *cc) 176470b44595SMel Gorman { 176570b44595SMel Gorman unsigned int limit = freelist_scan_limit(cc); 176670b44595SMel Gorman unsigned int nr_scanned = 0; 176770b44595SMel Gorman unsigned long distance; 176870b44595SMel Gorman unsigned long pfn = cc->migrate_pfn; 176970b44595SMel Gorman unsigned long high_pfn; 177070b44595SMel Gorman int order; 177115d28d0dSWonhyuk Yang bool found_block = false; 177270b44595SMel Gorman 177370b44595SMel Gorman /* Skip hints are relied on to avoid repeats on the fast search */ 177470b44595SMel Gorman if (cc->ignore_skip_hint) 177570b44595SMel Gorman return pfn; 177670b44595SMel Gorman 177770b44595SMel Gorman /* 177870b44595SMel Gorman * If the migrate_pfn is not at the start of a zone or the start 177970b44595SMel Gorman * of a pageblock then assume this is a continuation of a previous 178070b44595SMel Gorman * scan restarted due to COMPACT_CLUSTER_MAX. 178170b44595SMel Gorman */ 178270b44595SMel Gorman if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn)) 178370b44595SMel Gorman return pfn; 178470b44595SMel Gorman 178570b44595SMel Gorman /* 178670b44595SMel Gorman * For smaller orders, just linearly scan as the number of pages 178770b44595SMel Gorman * to migrate should be relatively small and does not necessarily 178870b44595SMel Gorman * justify freeing up a large block for a small allocation. 178970b44595SMel Gorman */ 179070b44595SMel Gorman if (cc->order <= PAGE_ALLOC_COSTLY_ORDER) 179170b44595SMel Gorman return pfn; 179270b44595SMel Gorman 179370b44595SMel Gorman /* 179470b44595SMel Gorman * Only allow kcompactd and direct requests for movable pages to 179570b44595SMel Gorman * quickly clear out a MOVABLE pageblock for allocation. This 179670b44595SMel Gorman * reduces the risk that a large movable pageblock is freed for 179770b44595SMel Gorman * an unmovable/reclaimable small allocation. 179870b44595SMel Gorman */ 179970b44595SMel Gorman if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE) 180070b44595SMel Gorman return pfn; 180170b44595SMel Gorman 180270b44595SMel Gorman /* 180370b44595SMel Gorman * When starting the migration scanner, pick any pageblock within the 180470b44595SMel Gorman * first half of the search space. Otherwise try and pick a pageblock 180570b44595SMel Gorman * within the first eighth to reduce the chances that a migration 180670b44595SMel Gorman * target later becomes a source. 180770b44595SMel Gorman */ 180870b44595SMel Gorman distance = (cc->free_pfn - cc->migrate_pfn) >> 1; 180970b44595SMel Gorman if (cc->migrate_pfn != cc->zone->zone_start_pfn) 181070b44595SMel Gorman distance >>= 2; 181170b44595SMel Gorman high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance); 181270b44595SMel Gorman 181370b44595SMel Gorman for (order = cc->order - 1; 181415d28d0dSWonhyuk Yang order >= PAGE_ALLOC_COSTLY_ORDER && !found_block && nr_scanned < limit; 181570b44595SMel Gorman order--) { 181670b44595SMel Gorman struct free_area *area = &cc->zone->free_area[order]; 181770b44595SMel Gorman struct list_head *freelist; 181870b44595SMel Gorman unsigned long flags; 181970b44595SMel Gorman struct page *freepage; 182070b44595SMel Gorman 182170b44595SMel Gorman if (!area->nr_free) 182270b44595SMel Gorman continue; 182370b44595SMel Gorman 182470b44595SMel Gorman spin_lock_irqsave(&cc->zone->lock, flags); 182570b44595SMel Gorman freelist = &area->free_list[MIGRATE_MOVABLE]; 182670b44595SMel Gorman list_for_each_entry(freepage, freelist, lru) { 182770b44595SMel Gorman unsigned long free_pfn; 182870b44595SMel Gorman 182915d28d0dSWonhyuk Yang if (nr_scanned++ >= limit) { 183015d28d0dSWonhyuk Yang move_freelist_tail(freelist, freepage); 183115d28d0dSWonhyuk Yang break; 183215d28d0dSWonhyuk Yang } 183315d28d0dSWonhyuk Yang 183470b44595SMel Gorman free_pfn = page_to_pfn(freepage); 183570b44595SMel Gorman if (free_pfn < high_pfn) { 183670b44595SMel Gorman /* 183770b44595SMel Gorman * Avoid if skipped recently. Ideally it would 183870b44595SMel Gorman * move to the tail but even safe iteration of 183970b44595SMel Gorman * the list assumes an entry is deleted, not 184070b44595SMel Gorman * reordered. 184170b44595SMel Gorman */ 184215d28d0dSWonhyuk Yang if (get_pageblock_skip(freepage)) 184370b44595SMel Gorman continue; 184470b44595SMel Gorman 184570b44595SMel Gorman /* Reorder to so a future search skips recent pages */ 184670b44595SMel Gorman move_freelist_tail(freelist, freepage); 184770b44595SMel Gorman 1848e380bebeSMel Gorman update_fast_start_pfn(cc, free_pfn); 184970b44595SMel Gorman pfn = pageblock_start_pfn(free_pfn); 1850bbe832b9SRei Yamamoto if (pfn < cc->zone->zone_start_pfn) 1851bbe832b9SRei Yamamoto pfn = cc->zone->zone_start_pfn; 185270b44595SMel Gorman cc->fast_search_fail = 0; 185315d28d0dSWonhyuk Yang found_block = true; 185470b44595SMel Gorman set_pageblock_skip(freepage); 185570b44595SMel Gorman break; 185670b44595SMel Gorman } 185770b44595SMel Gorman } 185870b44595SMel Gorman spin_unlock_irqrestore(&cc->zone->lock, flags); 185970b44595SMel Gorman } 186070b44595SMel Gorman 186170b44595SMel Gorman cc->total_migrate_scanned += nr_scanned; 186270b44595SMel Gorman 186370b44595SMel Gorman /* 186470b44595SMel Gorman * If fast scanning failed then use a cached entry for a page block 186570b44595SMel Gorman * that had free pages as the basis for starting a linear scan. 186670b44595SMel Gorman */ 186715d28d0dSWonhyuk Yang if (!found_block) { 186815d28d0dSWonhyuk Yang cc->fast_search_fail++; 186970b44595SMel Gorman pfn = reinit_migrate_pfn(cc); 187015d28d0dSWonhyuk Yang } 187170b44595SMel Gorman return pfn; 187270b44595SMel Gorman } 187370b44595SMel Gorman 18745bbe3547SEric B Munson /* 1875edc2ca61SVlastimil Babka * Isolate all pages that can be migrated from the first suitable block, 1876edc2ca61SVlastimil Babka * starting at the block pointed to by the migrate scanner pfn within 1877edc2ca61SVlastimil Babka * compact_control. 1878ff9543fdSMichal Nazarewicz */ 187932aaf055SPengfei Li static isolate_migrate_t isolate_migratepages(struct compact_control *cc) 1880ff9543fdSMichal Nazarewicz { 1881e1409c32SJoonsoo Kim unsigned long block_start_pfn; 1882e1409c32SJoonsoo Kim unsigned long block_end_pfn; 1883e1409c32SJoonsoo Kim unsigned long low_pfn; 1884edc2ca61SVlastimil Babka struct page *page; 1885edc2ca61SVlastimil Babka const isolate_mode_t isolate_mode = 18865bbe3547SEric B Munson (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) | 18871d2047feSHugh Dickins (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0); 188870b44595SMel Gorman bool fast_find_block; 1889ff9543fdSMichal Nazarewicz 1890edc2ca61SVlastimil Babka /* 1891edc2ca61SVlastimil Babka * Start at where we last stopped, or beginning of the zone as 189270b44595SMel Gorman * initialized by compact_zone(). The first failure will use 189370b44595SMel Gorman * the lowest PFN as the starting point for linear scanning. 1894edc2ca61SVlastimil Babka */ 189570b44595SMel Gorman low_pfn = fast_find_migrateblock(cc); 189606b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(low_pfn); 189732aaf055SPengfei Li if (block_start_pfn < cc->zone->zone_start_pfn) 189832aaf055SPengfei Li block_start_pfn = cc->zone->zone_start_pfn; 1899ff9543fdSMichal Nazarewicz 190070b44595SMel Gorman /* 190170b44595SMel Gorman * fast_find_migrateblock marks a pageblock skipped so to avoid 190270b44595SMel Gorman * the isolation_suitable check below, check whether the fast 190370b44595SMel Gorman * search was successful. 190470b44595SMel Gorman */ 190570b44595SMel Gorman fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail; 190670b44595SMel Gorman 1907ff9543fdSMichal Nazarewicz /* Only scan within a pageblock boundary */ 190806b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(low_pfn); 1909ff9543fdSMichal Nazarewicz 1910edc2ca61SVlastimil Babka /* 1911edc2ca61SVlastimil Babka * Iterate over whole pageblocks until we find the first suitable. 1912edc2ca61SVlastimil Babka * Do not cross the free scanner. 1913edc2ca61SVlastimil Babka */ 1914e1409c32SJoonsoo Kim for (; block_end_pfn <= cc->free_pfn; 191570b44595SMel Gorman fast_find_block = false, 1916c2ad7a1fSOscar Salvador cc->migrate_pfn = low_pfn = block_end_pfn, 1917e1409c32SJoonsoo Kim block_start_pfn = block_end_pfn, 1918e1409c32SJoonsoo Kim block_end_pfn += pageblock_nr_pages) { 1919edc2ca61SVlastimil Babka 1920edc2ca61SVlastimil Babka /* 1921edc2ca61SVlastimil Babka * This can potentially iterate a massively long zone with 1922edc2ca61SVlastimil Babka * many pageblocks unsuitable, so periodically check if we 1923cb810ad2SMel Gorman * need to schedule. 1924edc2ca61SVlastimil Babka */ 1925c036ddffSMiaohe Lin if (!(low_pfn % (COMPACT_CLUSTER_MAX * pageblock_nr_pages))) 1926cf66f070SMel Gorman cond_resched(); 1927edc2ca61SVlastimil Babka 192832aaf055SPengfei Li page = pageblock_pfn_to_page(block_start_pfn, 192932aaf055SPengfei Li block_end_pfn, cc->zone); 19307d49d886SVlastimil Babka if (!page) 1931edc2ca61SVlastimil Babka continue; 1932edc2ca61SVlastimil Babka 1933e380bebeSMel Gorman /* 1934e380bebeSMel Gorman * If isolation recently failed, do not retry. Only check the 1935e380bebeSMel Gorman * pageblock once. COMPACT_CLUSTER_MAX causes a pageblock 1936e380bebeSMel Gorman * to be visited multiple times. Assume skip was checked 1937e380bebeSMel Gorman * before making it "skip" so other compaction instances do 1938e380bebeSMel Gorman * not scan the same block. 1939e380bebeSMel Gorman */ 1940*ee0913c4SKefeng Wang if (pageblock_aligned(low_pfn) && 1941e380bebeSMel Gorman !fast_find_block && !isolation_suitable(cc, page)) 1942edc2ca61SVlastimil Babka continue; 1943edc2ca61SVlastimil Babka 1944edc2ca61SVlastimil Babka /* 1945556162bfSMiaohe Lin * For async direct compaction, only scan the pageblocks of the 1946556162bfSMiaohe Lin * same migratetype without huge pages. Async direct compaction 1947556162bfSMiaohe Lin * is optimistic to see if the minimum amount of work satisfies 1948556162bfSMiaohe Lin * the allocation. The cached PFN is updated as it's possible 1949556162bfSMiaohe Lin * that all remaining blocks between source and target are 1950556162bfSMiaohe Lin * unsuitable and the compaction scanners fail to meet. 1951edc2ca61SVlastimil Babka */ 19529bebefd5SMel Gorman if (!suitable_migration_source(cc, page)) { 19539bebefd5SMel Gorman update_cached_migrate(cc, block_end_pfn); 1954edc2ca61SVlastimil Babka continue; 19559bebefd5SMel Gorman } 1956ff9543fdSMichal Nazarewicz 1957ff9543fdSMichal Nazarewicz /* Perform the isolation */ 1958c2ad7a1fSOscar Salvador if (isolate_migratepages_block(cc, low_pfn, block_end_pfn, 1959c2ad7a1fSOscar Salvador isolate_mode)) 1960ff9543fdSMichal Nazarewicz return ISOLATE_ABORT; 1961ff9543fdSMichal Nazarewicz 1962edc2ca61SVlastimil Babka /* 1963edc2ca61SVlastimil Babka * Either we isolated something and proceed with migration. Or 1964edc2ca61SVlastimil Babka * we failed and compact_zone should decide if we should 1965edc2ca61SVlastimil Babka * continue or not. 1966edc2ca61SVlastimil Babka */ 1967edc2ca61SVlastimil Babka break; 1968edc2ca61SVlastimil Babka } 1969edc2ca61SVlastimil Babka 1970edc2ca61SVlastimil Babka return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; 1971ff9543fdSMichal Nazarewicz } 1972ff9543fdSMichal Nazarewicz 197321c527a3SYaowei Bai /* 197421c527a3SYaowei Bai * order == -1 is expected when compacting via 197521c527a3SYaowei Bai * /proc/sys/vm/compact_memory 197621c527a3SYaowei Bai */ 197721c527a3SYaowei Bai static inline bool is_via_compact_memory(int order) 197821c527a3SYaowei Bai { 197921c527a3SYaowei Bai return order == -1; 198021c527a3SYaowei Bai } 198121c527a3SYaowei Bai 1982b4a0215eSKefeng Wang /* 1983b4a0215eSKefeng Wang * Determine whether kswapd is (or recently was!) running on this node. 1984b4a0215eSKefeng Wang * 1985b4a0215eSKefeng Wang * pgdat_kswapd_lock() pins pgdat->kswapd, so a concurrent kswapd_stop() can't 1986b4a0215eSKefeng Wang * zero it. 1987b4a0215eSKefeng Wang */ 1988facdaa91SNitin Gupta static bool kswapd_is_running(pg_data_t *pgdat) 1989facdaa91SNitin Gupta { 1990b4a0215eSKefeng Wang bool running; 1991b4a0215eSKefeng Wang 1992b4a0215eSKefeng Wang pgdat_kswapd_lock(pgdat); 1993b4a0215eSKefeng Wang running = pgdat->kswapd && task_is_running(pgdat->kswapd); 1994b4a0215eSKefeng Wang pgdat_kswapd_unlock(pgdat); 1995b4a0215eSKefeng Wang 1996b4a0215eSKefeng Wang return running; 1997facdaa91SNitin Gupta } 1998facdaa91SNitin Gupta 1999facdaa91SNitin Gupta /* 2000facdaa91SNitin Gupta * A zone's fragmentation score is the external fragmentation wrt to the 200140d7e203SCharan Teja Reddy * COMPACTION_HPAGE_ORDER. It returns a value in the range [0, 100]. 200240d7e203SCharan Teja Reddy */ 200340d7e203SCharan Teja Reddy static unsigned int fragmentation_score_zone(struct zone *zone) 200440d7e203SCharan Teja Reddy { 200540d7e203SCharan Teja Reddy return extfrag_for_order(zone, COMPACTION_HPAGE_ORDER); 200640d7e203SCharan Teja Reddy } 200740d7e203SCharan Teja Reddy 200840d7e203SCharan Teja Reddy /* 200940d7e203SCharan Teja Reddy * A weighted zone's fragmentation score is the external fragmentation 201040d7e203SCharan Teja Reddy * wrt to the COMPACTION_HPAGE_ORDER scaled by the zone's size. It 201140d7e203SCharan Teja Reddy * returns a value in the range [0, 100]. 2012facdaa91SNitin Gupta * 2013facdaa91SNitin Gupta * The scaling factor ensures that proactive compaction focuses on larger 2014facdaa91SNitin Gupta * zones like ZONE_NORMAL, rather than smaller, specialized zones like 2015facdaa91SNitin Gupta * ZONE_DMA32. For smaller zones, the score value remains close to zero, 2016facdaa91SNitin Gupta * and thus never exceeds the high threshold for proactive compaction. 2017facdaa91SNitin Gupta */ 201840d7e203SCharan Teja Reddy static unsigned int fragmentation_score_zone_weighted(struct zone *zone) 2019facdaa91SNitin Gupta { 2020facdaa91SNitin Gupta unsigned long score; 2021facdaa91SNitin Gupta 202240d7e203SCharan Teja Reddy score = zone->present_pages * fragmentation_score_zone(zone); 2023facdaa91SNitin Gupta return div64_ul(score, zone->zone_pgdat->node_present_pages + 1); 2024facdaa91SNitin Gupta } 2025facdaa91SNitin Gupta 2026facdaa91SNitin Gupta /* 2027facdaa91SNitin Gupta * The per-node proactive (background) compaction process is started by its 2028facdaa91SNitin Gupta * corresponding kcompactd thread when the node's fragmentation score 2029facdaa91SNitin Gupta * exceeds the high threshold. The compaction process remains active till 2030facdaa91SNitin Gupta * the node's score falls below the low threshold, or one of the back-off 2031facdaa91SNitin Gupta * conditions is met. 2032facdaa91SNitin Gupta */ 2033d34c0a75SNitin Gupta static unsigned int fragmentation_score_node(pg_data_t *pgdat) 2034facdaa91SNitin Gupta { 2035d34c0a75SNitin Gupta unsigned int score = 0; 2036facdaa91SNitin Gupta int zoneid; 2037facdaa91SNitin Gupta 2038facdaa91SNitin Gupta for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 2039facdaa91SNitin Gupta struct zone *zone; 2040facdaa91SNitin Gupta 2041facdaa91SNitin Gupta zone = &pgdat->node_zones[zoneid]; 204240d7e203SCharan Teja Reddy score += fragmentation_score_zone_weighted(zone); 2043facdaa91SNitin Gupta } 2044facdaa91SNitin Gupta 2045facdaa91SNitin Gupta return score; 2046facdaa91SNitin Gupta } 2047facdaa91SNitin Gupta 2048d34c0a75SNitin Gupta static unsigned int fragmentation_score_wmark(pg_data_t *pgdat, bool low) 2049facdaa91SNitin Gupta { 2050d34c0a75SNitin Gupta unsigned int wmark_low; 2051facdaa91SNitin Gupta 2052facdaa91SNitin Gupta /* 2053f0953a1bSIngo Molnar * Cap the low watermark to avoid excessive compaction 2054f0953a1bSIngo Molnar * activity in case a user sets the proactiveness tunable 2055facdaa91SNitin Gupta * close to 100 (maximum). 2056facdaa91SNitin Gupta */ 2057d34c0a75SNitin Gupta wmark_low = max(100U - sysctl_compaction_proactiveness, 5U); 2058d34c0a75SNitin Gupta return low ? wmark_low : min(wmark_low + 10, 100U); 2059facdaa91SNitin Gupta } 2060facdaa91SNitin Gupta 2061facdaa91SNitin Gupta static bool should_proactive_compact_node(pg_data_t *pgdat) 2062facdaa91SNitin Gupta { 2063facdaa91SNitin Gupta int wmark_high; 2064facdaa91SNitin Gupta 2065facdaa91SNitin Gupta if (!sysctl_compaction_proactiveness || kswapd_is_running(pgdat)) 2066facdaa91SNitin Gupta return false; 2067facdaa91SNitin Gupta 2068facdaa91SNitin Gupta wmark_high = fragmentation_score_wmark(pgdat, false); 2069facdaa91SNitin Gupta return fragmentation_score_node(pgdat) > wmark_high; 2070facdaa91SNitin Gupta } 2071facdaa91SNitin Gupta 207240cacbcbSMel Gorman static enum compact_result __compact_finished(struct compact_control *cc) 2073748446bbSMel Gorman { 20748fb74b9fSMel Gorman unsigned int order; 2075d39773a0SVlastimil Babka const int migratetype = cc->migratetype; 2076cb2dcaf0SMel Gorman int ret; 2077748446bbSMel Gorman 2078753341a4SMel Gorman /* Compaction run completes if the migrate and free scanner meet */ 2079f2849aa0SVlastimil Babka if (compact_scanners_met(cc)) { 208055b7c4c9SVlastimil Babka /* Let the next compaction start anew. */ 208140cacbcbSMel Gorman reset_cached_positions(cc->zone); 208255b7c4c9SVlastimil Babka 208362997027SMel Gorman /* 208462997027SMel Gorman * Mark that the PG_migrate_skip information should be cleared 2085accf6242SVlastimil Babka * by kswapd when it goes to sleep. kcompactd does not set the 208662997027SMel Gorman * flag itself as the decision to be clear should be directly 208762997027SMel Gorman * based on an allocation request. 208862997027SMel Gorman */ 2089accf6242SVlastimil Babka if (cc->direct_compaction) 209040cacbcbSMel Gorman cc->zone->compact_blockskip_flush = true; 209162997027SMel Gorman 2092c8f7de0bSMichal Hocko if (cc->whole_zone) 2093748446bbSMel Gorman return COMPACT_COMPLETE; 2094c8f7de0bSMichal Hocko else 2095c8f7de0bSMichal Hocko return COMPACT_PARTIAL_SKIPPED; 2096bb13ffebSMel Gorman } 2097748446bbSMel Gorman 2098facdaa91SNitin Gupta if (cc->proactive_compaction) { 2099facdaa91SNitin Gupta int score, wmark_low; 2100facdaa91SNitin Gupta pg_data_t *pgdat; 2101facdaa91SNitin Gupta 2102facdaa91SNitin Gupta pgdat = cc->zone->zone_pgdat; 2103facdaa91SNitin Gupta if (kswapd_is_running(pgdat)) 2104facdaa91SNitin Gupta return COMPACT_PARTIAL_SKIPPED; 2105facdaa91SNitin Gupta 2106facdaa91SNitin Gupta score = fragmentation_score_zone(cc->zone); 2107facdaa91SNitin Gupta wmark_low = fragmentation_score_wmark(pgdat, true); 2108facdaa91SNitin Gupta 2109facdaa91SNitin Gupta if (score > wmark_low) 2110facdaa91SNitin Gupta ret = COMPACT_CONTINUE; 2111facdaa91SNitin Gupta else 2112facdaa91SNitin Gupta ret = COMPACT_SUCCESS; 2113facdaa91SNitin Gupta 2114facdaa91SNitin Gupta goto out; 2115facdaa91SNitin Gupta } 2116facdaa91SNitin Gupta 211721c527a3SYaowei Bai if (is_via_compact_memory(cc->order)) 211856de7263SMel Gorman return COMPACT_CONTINUE; 211956de7263SMel Gorman 2120baf6a9a1SVlastimil Babka /* 2121efe771c7SMel Gorman * Always finish scanning a pageblock to reduce the possibility of 2122efe771c7SMel Gorman * fallbacks in the future. This is particularly important when 2123efe771c7SMel Gorman * migration source is unmovable/reclaimable but it's not worth 2124efe771c7SMel Gorman * special casing. 2125baf6a9a1SVlastimil Babka */ 2126*ee0913c4SKefeng Wang if (!pageblock_aligned(cc->migrate_pfn)) 2127baf6a9a1SVlastimil Babka return COMPACT_CONTINUE; 2128baf6a9a1SVlastimil Babka 212956de7263SMel Gorman /* Direct compactor: Is a suitable page free? */ 2130cb2dcaf0SMel Gorman ret = COMPACT_NO_SUITABLE_PAGE; 213156de7263SMel Gorman for (order = cc->order; order < MAX_ORDER; order++) { 213240cacbcbSMel Gorman struct free_area *area = &cc->zone->free_area[order]; 21332149cdaeSJoonsoo Kim bool can_steal; 21348fb74b9fSMel Gorman 213556de7263SMel Gorman /* Job done if page is free of the right migratetype */ 2136b03641afSDan Williams if (!free_area_empty(area, migratetype)) 2137cf378319SVlastimil Babka return COMPACT_SUCCESS; 213856de7263SMel Gorman 21392149cdaeSJoonsoo Kim #ifdef CONFIG_CMA 21402149cdaeSJoonsoo Kim /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ 21412149cdaeSJoonsoo Kim if (migratetype == MIGRATE_MOVABLE && 2142b03641afSDan Williams !free_area_empty(area, MIGRATE_CMA)) 2143cf378319SVlastimil Babka return COMPACT_SUCCESS; 21442149cdaeSJoonsoo Kim #endif 21452149cdaeSJoonsoo Kim /* 21462149cdaeSJoonsoo Kim * Job done if allocation would steal freepages from 21472149cdaeSJoonsoo Kim * other migratetype buddy lists. 21482149cdaeSJoonsoo Kim */ 21492149cdaeSJoonsoo Kim if (find_suitable_fallback(area, order, migratetype, 2150fa599c44SMiaohe Lin true, &can_steal) != -1) 2151baf6a9a1SVlastimil Babka /* 2152fa599c44SMiaohe Lin * Movable pages are OK in any pageblock. If we are 2153fa599c44SMiaohe Lin * stealing for a non-movable allocation, make sure 2154fa599c44SMiaohe Lin * we finish compacting the current pageblock first 2155fa599c44SMiaohe Lin * (which is assured by the above migrate_pfn align 2156fa599c44SMiaohe Lin * check) so it is as free as possible and we won't 2157fa599c44SMiaohe Lin * have to steal another one soon. 2158baf6a9a1SVlastimil Babka */ 2159baf6a9a1SVlastimil Babka return COMPACT_SUCCESS; 2160baf6a9a1SVlastimil Babka } 2161baf6a9a1SVlastimil Babka 2162facdaa91SNitin Gupta out: 2163cb2dcaf0SMel Gorman if (cc->contended || fatal_signal_pending(current)) 2164cb2dcaf0SMel Gorman ret = COMPACT_CONTENDED; 2165cb2dcaf0SMel Gorman 2166cb2dcaf0SMel Gorman return ret; 2167837d026dSJoonsoo Kim } 2168837d026dSJoonsoo Kim 216940cacbcbSMel Gorman static enum compact_result compact_finished(struct compact_control *cc) 2170837d026dSJoonsoo Kim { 2171837d026dSJoonsoo Kim int ret; 2172837d026dSJoonsoo Kim 217340cacbcbSMel Gorman ret = __compact_finished(cc); 217440cacbcbSMel Gorman trace_mm_compaction_finished(cc->zone, cc->order, ret); 2175837d026dSJoonsoo Kim if (ret == COMPACT_NO_SUITABLE_PAGE) 2176837d026dSJoonsoo Kim ret = COMPACT_CONTINUE; 2177837d026dSJoonsoo Kim 2178837d026dSJoonsoo Kim return ret; 2179748446bbSMel Gorman } 2180748446bbSMel Gorman 2181ea7ab982SMichal Hocko static enum compact_result __compaction_suitable(struct zone *zone, int order, 2182c603844bSMel Gorman unsigned int alloc_flags, 218397a225e6SJoonsoo Kim int highest_zoneidx, 218486a294a8SMichal Hocko unsigned long wmark_target) 21853e7d3449SMel Gorman { 21863e7d3449SMel Gorman unsigned long watermark; 21873e7d3449SMel Gorman 218821c527a3SYaowei Bai if (is_via_compact_memory(order)) 21893957c776SMichal Hocko return COMPACT_CONTINUE; 21903957c776SMichal Hocko 2191a9214443SMel Gorman watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); 2192ebff3980SVlastimil Babka /* 2193ebff3980SVlastimil Babka * If watermarks for high-order allocation are already met, there 2194ebff3980SVlastimil Babka * should be no need for compaction at all. 2195ebff3980SVlastimil Babka */ 219697a225e6SJoonsoo Kim if (zone_watermark_ok(zone, order, watermark, highest_zoneidx, 2197ebff3980SVlastimil Babka alloc_flags)) 2198cf378319SVlastimil Babka return COMPACT_SUCCESS; 2199ebff3980SVlastimil Babka 22003957c776SMichal Hocko /* 22019861a62cSVlastimil Babka * Watermarks for order-0 must be met for compaction to be able to 2202984fdba6SVlastimil Babka * isolate free pages for migration targets. This means that the 2203984fdba6SVlastimil Babka * watermark and alloc_flags have to match, or be more pessimistic than 2204984fdba6SVlastimil Babka * the check in __isolate_free_page(). We don't use the direct 2205984fdba6SVlastimil Babka * compactor's alloc_flags, as they are not relevant for freepage 220697a225e6SJoonsoo Kim * isolation. We however do use the direct compactor's highest_zoneidx 220797a225e6SJoonsoo Kim * to skip over zones where lowmem reserves would prevent allocation 220897a225e6SJoonsoo Kim * even if compaction succeeds. 22098348faf9SVlastimil Babka * For costly orders, we require low watermark instead of min for 22108348faf9SVlastimil Babka * compaction to proceed to increase its chances. 2211d883c6cfSJoonsoo Kim * ALLOC_CMA is used, as pages in CMA pageblocks are considered 2212d883c6cfSJoonsoo Kim * suitable migration targets 22133e7d3449SMel Gorman */ 22148348faf9SVlastimil Babka watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? 22158348faf9SVlastimil Babka low_wmark_pages(zone) : min_wmark_pages(zone); 22168348faf9SVlastimil Babka watermark += compact_gap(order); 221797a225e6SJoonsoo Kim if (!__zone_watermark_ok(zone, 0, watermark, highest_zoneidx, 2218d883c6cfSJoonsoo Kim ALLOC_CMA, wmark_target)) 22193e7d3449SMel Gorman return COMPACT_SKIPPED; 22203e7d3449SMel Gorman 2221cc5c9f09SVlastimil Babka return COMPACT_CONTINUE; 2222cc5c9f09SVlastimil Babka } 2223cc5c9f09SVlastimil Babka 22242b1a20c3SHui Su /* 22252b1a20c3SHui Su * compaction_suitable: Is this suitable to run compaction on this zone now? 22262b1a20c3SHui Su * Returns 22272b1a20c3SHui Su * COMPACT_SKIPPED - If there are too few free pages for compaction 22282b1a20c3SHui Su * COMPACT_SUCCESS - If the allocation would succeed without compaction 22292b1a20c3SHui Su * COMPACT_CONTINUE - If compaction should run now 22302b1a20c3SHui Su */ 2231cc5c9f09SVlastimil Babka enum compact_result compaction_suitable(struct zone *zone, int order, 2232cc5c9f09SVlastimil Babka unsigned int alloc_flags, 223397a225e6SJoonsoo Kim int highest_zoneidx) 2234cc5c9f09SVlastimil Babka { 2235cc5c9f09SVlastimil Babka enum compact_result ret; 2236cc5c9f09SVlastimil Babka int fragindex; 2237cc5c9f09SVlastimil Babka 223897a225e6SJoonsoo Kim ret = __compaction_suitable(zone, order, alloc_flags, highest_zoneidx, 2239cc5c9f09SVlastimil Babka zone_page_state(zone, NR_FREE_PAGES)); 22403e7d3449SMel Gorman /* 22413e7d3449SMel Gorman * fragmentation index determines if allocation failures are due to 22423e7d3449SMel Gorman * low memory or external fragmentation 22433e7d3449SMel Gorman * 2244ebff3980SVlastimil Babka * index of -1000 would imply allocations might succeed depending on 2245ebff3980SVlastimil Babka * watermarks, but we already failed the high-order watermark check 22463e7d3449SMel Gorman * index towards 0 implies failure is due to lack of memory 22473e7d3449SMel Gorman * index towards 1000 implies failure is due to fragmentation 22483e7d3449SMel Gorman * 224920311420SVlastimil Babka * Only compact if a failure would be due to fragmentation. Also 225020311420SVlastimil Babka * ignore fragindex for non-costly orders where the alternative to 225120311420SVlastimil Babka * a successful reclaim/compaction is OOM. Fragindex and the 225220311420SVlastimil Babka * vm.extfrag_threshold sysctl is meant as a heuristic to prevent 225320311420SVlastimil Babka * excessive compaction for costly orders, but it should not be at the 225420311420SVlastimil Babka * expense of system stability. 22553e7d3449SMel Gorman */ 225620311420SVlastimil Babka if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) { 22573e7d3449SMel Gorman fragindex = fragmentation_index(zone, order); 22583e7d3449SMel Gorman if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 2259cc5c9f09SVlastimil Babka ret = COMPACT_NOT_SUITABLE_ZONE; 22603e7d3449SMel Gorman } 22613e7d3449SMel Gorman 2262837d026dSJoonsoo Kim trace_mm_compaction_suitable(zone, order, ret); 2263837d026dSJoonsoo Kim if (ret == COMPACT_NOT_SUITABLE_ZONE) 2264837d026dSJoonsoo Kim ret = COMPACT_SKIPPED; 2265837d026dSJoonsoo Kim 2266837d026dSJoonsoo Kim return ret; 2267837d026dSJoonsoo Kim } 2268837d026dSJoonsoo Kim 226986a294a8SMichal Hocko bool compaction_zonelist_suitable(struct alloc_context *ac, int order, 227086a294a8SMichal Hocko int alloc_flags) 227186a294a8SMichal Hocko { 227286a294a8SMichal Hocko struct zone *zone; 227386a294a8SMichal Hocko struct zoneref *z; 227486a294a8SMichal Hocko 227586a294a8SMichal Hocko /* 227686a294a8SMichal Hocko * Make sure at least one zone would pass __compaction_suitable if we continue 227786a294a8SMichal Hocko * retrying the reclaim. 227886a294a8SMichal Hocko */ 227997a225e6SJoonsoo Kim for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 228097a225e6SJoonsoo Kim ac->highest_zoneidx, ac->nodemask) { 228186a294a8SMichal Hocko unsigned long available; 228286a294a8SMichal Hocko enum compact_result compact_result; 228386a294a8SMichal Hocko 228486a294a8SMichal Hocko /* 228586a294a8SMichal Hocko * Do not consider all the reclaimable memory because we do not 228686a294a8SMichal Hocko * want to trash just for a single high order allocation which 228786a294a8SMichal Hocko * is even not guaranteed to appear even if __compaction_suitable 228886a294a8SMichal Hocko * is happy about the watermark check. 228986a294a8SMichal Hocko */ 22905a1c84b4SMel Gorman available = zone_reclaimable_pages(zone) / order; 229186a294a8SMichal Hocko available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 229286a294a8SMichal Hocko compact_result = __compaction_suitable(zone, order, alloc_flags, 229397a225e6SJoonsoo Kim ac->highest_zoneidx, available); 2294cff387d6SMiaohe Lin if (compact_result == COMPACT_CONTINUE) 229586a294a8SMichal Hocko return true; 229686a294a8SMichal Hocko } 229786a294a8SMichal Hocko 229886a294a8SMichal Hocko return false; 229986a294a8SMichal Hocko } 230086a294a8SMichal Hocko 23015e1f0f09SMel Gorman static enum compact_result 23025e1f0f09SMel Gorman compact_zone(struct compact_control *cc, struct capture_control *capc) 2303748446bbSMel Gorman { 2304ea7ab982SMichal Hocko enum compact_result ret; 230540cacbcbSMel Gorman unsigned long start_pfn = cc->zone->zone_start_pfn; 230640cacbcbSMel Gorman unsigned long end_pfn = zone_end_pfn(cc->zone); 2307566e54e1SMel Gorman unsigned long last_migrated_pfn; 2308e0b9daebSDavid Rientjes const bool sync = cc->mode != MIGRATE_ASYNC; 23098854c55fSMel Gorman bool update_cached; 231084b328aaSBaolin Wang unsigned int nr_succeeded = 0; 2311748446bbSMel Gorman 2312a94b5252SYafang Shao /* 2313a94b5252SYafang Shao * These counters track activities during zone compaction. Initialize 2314a94b5252SYafang Shao * them before compacting a new zone. 2315a94b5252SYafang Shao */ 2316a94b5252SYafang Shao cc->total_migrate_scanned = 0; 2317a94b5252SYafang Shao cc->total_free_scanned = 0; 2318a94b5252SYafang Shao cc->nr_migratepages = 0; 2319a94b5252SYafang Shao cc->nr_freepages = 0; 2320a94b5252SYafang Shao INIT_LIST_HEAD(&cc->freepages); 2321a94b5252SYafang Shao INIT_LIST_HEAD(&cc->migratepages); 2322a94b5252SYafang Shao 232301c0bfe0SWei Yang cc->migratetype = gfp_migratetype(cc->gfp_mask); 232440cacbcbSMel Gorman ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags, 232597a225e6SJoonsoo Kim cc->highest_zoneidx); 23263e7d3449SMel Gorman /* Compaction is likely to fail */ 2327cf378319SVlastimil Babka if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED) 23283e7d3449SMel Gorman return ret; 2329c46649deSMichal Hocko 2330c46649deSMichal Hocko /* huh, compaction_suitable is returning something unexpected */ 2331c46649deSMichal Hocko VM_BUG_ON(ret != COMPACT_CONTINUE); 23323e7d3449SMel Gorman 2333c89511abSMel Gorman /* 2334d3132e4bSVlastimil Babka * Clear pageblock skip if there were failures recently and compaction 2335accf6242SVlastimil Babka * is about to be retried after being deferred. 2336d3132e4bSVlastimil Babka */ 233740cacbcbSMel Gorman if (compaction_restarting(cc->zone, cc->order)) 233840cacbcbSMel Gorman __reset_isolation_suitable(cc->zone); 2339d3132e4bSVlastimil Babka 2340d3132e4bSVlastimil Babka /* 2341c89511abSMel Gorman * Setup to move all movable pages to the end of the zone. Used cached 234206ed2998SVlastimil Babka * information on where the scanners should start (unless we explicitly 234306ed2998SVlastimil Babka * want to compact the whole zone), but check that it is initialised 234406ed2998SVlastimil Babka * by ensuring the values are within zone boundaries. 2345c89511abSMel Gorman */ 234670b44595SMel Gorman cc->fast_start_pfn = 0; 234706ed2998SVlastimil Babka if (cc->whole_zone) { 234806ed2998SVlastimil Babka cc->migrate_pfn = start_pfn; 234906ed2998SVlastimil Babka cc->free_pfn = pageblock_start_pfn(end_pfn - 1); 235006ed2998SVlastimil Babka } else { 235140cacbcbSMel Gorman cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync]; 235240cacbcbSMel Gorman cc->free_pfn = cc->zone->compact_cached_free_pfn; 2353623446e4SJoonsoo Kim if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) { 235406b6640aSVlastimil Babka cc->free_pfn = pageblock_start_pfn(end_pfn - 1); 235540cacbcbSMel Gorman cc->zone->compact_cached_free_pfn = cc->free_pfn; 2356c89511abSMel Gorman } 2357623446e4SJoonsoo Kim if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) { 2358c89511abSMel Gorman cc->migrate_pfn = start_pfn; 235940cacbcbSMel Gorman cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; 236040cacbcbSMel Gorman cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; 2361c89511abSMel Gorman } 2362c8f7de0bSMichal Hocko 2363e332f741SMel Gorman if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn) 2364c8f7de0bSMichal Hocko cc->whole_zone = true; 236506ed2998SVlastimil Babka } 2366c8f7de0bSMichal Hocko 2367566e54e1SMel Gorman last_migrated_pfn = 0; 2368748446bbSMel Gorman 23698854c55fSMel Gorman /* 23708854c55fSMel Gorman * Migrate has separate cached PFNs for ASYNC and SYNC* migration on 23718854c55fSMel Gorman * the basis that some migrations will fail in ASYNC mode. However, 23728854c55fSMel Gorman * if the cached PFNs match and pageblocks are skipped due to having 23738854c55fSMel Gorman * no isolation candidates, then the sync state does not matter. 23748854c55fSMel Gorman * Until a pageblock with isolation candidates is found, keep the 23758854c55fSMel Gorman * cached PFNs in sync to avoid revisiting the same blocks. 23768854c55fSMel Gorman */ 23778854c55fSMel Gorman update_cached = !sync && 23788854c55fSMel Gorman cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1]; 23798854c55fSMel Gorman 2380abd4349fSBaolin Wang trace_mm_compaction_begin(cc, start_pfn, end_pfn, sync); 23810eb927c0SMel Gorman 2382361a2a22SMinchan Kim /* lru_add_drain_all could be expensive with involving other CPUs */ 2383361a2a22SMinchan Kim lru_add_drain(); 2384748446bbSMel Gorman 238540cacbcbSMel Gorman while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) { 23869d502c1cSMinchan Kim int err; 238719d3cf9dSYanfei Xu unsigned long iteration_start_pfn = cc->migrate_pfn; 2388748446bbSMel Gorman 2389804d3121SMel Gorman /* 2390804d3121SMel Gorman * Avoid multiple rescans which can happen if a page cannot be 2391804d3121SMel Gorman * isolated (dirty/writeback in async mode) or if the migrated 2392804d3121SMel Gorman * pages are being allocated before the pageblock is cleared. 2393804d3121SMel Gorman * The first rescan will capture the entire pageblock for 2394804d3121SMel Gorman * migration. If it fails, it'll be marked skip and scanning 2395804d3121SMel Gorman * will proceed as normal. 2396804d3121SMel Gorman */ 2397804d3121SMel Gorman cc->rescan = false; 2398804d3121SMel Gorman if (pageblock_start_pfn(last_migrated_pfn) == 239919d3cf9dSYanfei Xu pageblock_start_pfn(iteration_start_pfn)) { 2400804d3121SMel Gorman cc->rescan = true; 2401804d3121SMel Gorman } 2402804d3121SMel Gorman 240332aaf055SPengfei Li switch (isolate_migratepages(cc)) { 2404f9e35b3bSMel Gorman case ISOLATE_ABORT: 24052d1e1041SVlastimil Babka ret = COMPACT_CONTENDED; 24065733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 2407e64c5237SShaohua Li cc->nr_migratepages = 0; 2408f9e35b3bSMel Gorman goto out; 2409f9e35b3bSMel Gorman case ISOLATE_NONE: 24108854c55fSMel Gorman if (update_cached) { 24118854c55fSMel Gorman cc->zone->compact_cached_migrate_pfn[1] = 24128854c55fSMel Gorman cc->zone->compact_cached_migrate_pfn[0]; 24138854c55fSMel Gorman } 24148854c55fSMel Gorman 2415fdaf7f5cSVlastimil Babka /* 2416fdaf7f5cSVlastimil Babka * We haven't isolated and migrated anything, but 2417fdaf7f5cSVlastimil Babka * there might still be unflushed migrations from 2418fdaf7f5cSVlastimil Babka * previous cc->order aligned block. 2419fdaf7f5cSVlastimil Babka */ 2420fdaf7f5cSVlastimil Babka goto check_drain; 2421f9e35b3bSMel Gorman case ISOLATE_SUCCESS: 24228854c55fSMel Gorman update_cached = false; 242319d3cf9dSYanfei Xu last_migrated_pfn = iteration_start_pfn; 2424f9e35b3bSMel Gorman } 2425748446bbSMel Gorman 2426d53aea3dSDavid Rientjes err = migrate_pages(&cc->migratepages, compaction_alloc, 2427e0b9daebSDavid Rientjes compaction_free, (unsigned long)cc, cc->mode, 242884b328aaSBaolin Wang MR_COMPACTION, &nr_succeeded); 2429748446bbSMel Gorman 2430abd4349fSBaolin Wang trace_mm_compaction_migratepages(cc, nr_succeeded); 2431748446bbSMel Gorman 2432f8c9301fSVlastimil Babka /* All pages were either migrated or will be released */ 2433f8c9301fSVlastimil Babka cc->nr_migratepages = 0; 24349d502c1cSMinchan Kim if (err) { 24355733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 24367ed695e0SVlastimil Babka /* 24377ed695e0SVlastimil Babka * migrate_pages() may return -ENOMEM when scanners meet 24387ed695e0SVlastimil Babka * and we want compact_finished() to detect it 24397ed695e0SVlastimil Babka */ 2440f2849aa0SVlastimil Babka if (err == -ENOMEM && !compact_scanners_met(cc)) { 24412d1e1041SVlastimil Babka ret = COMPACT_CONTENDED; 24424bf2bba3SDavid Rientjes goto out; 2443748446bbSMel Gorman } 2444fdd048e1SVlastimil Babka /* 2445fdd048e1SVlastimil Babka * We failed to migrate at least one page in the current 2446fdd048e1SVlastimil Babka * order-aligned block, so skip the rest of it. 2447fdd048e1SVlastimil Babka */ 2448fdd048e1SVlastimil Babka if (cc->direct_compaction && 2449fdd048e1SVlastimil Babka (cc->mode == MIGRATE_ASYNC)) { 2450fdd048e1SVlastimil Babka cc->migrate_pfn = block_end_pfn( 2451fdd048e1SVlastimil Babka cc->migrate_pfn - 1, cc->order); 2452fdd048e1SVlastimil Babka /* Draining pcplists is useless in this case */ 2453566e54e1SMel Gorman last_migrated_pfn = 0; 2454fdd048e1SVlastimil Babka } 24554bf2bba3SDavid Rientjes } 2456fdaf7f5cSVlastimil Babka 2457fdaf7f5cSVlastimil Babka check_drain: 2458fdaf7f5cSVlastimil Babka /* 2459fdaf7f5cSVlastimil Babka * Has the migration scanner moved away from the previous 2460fdaf7f5cSVlastimil Babka * cc->order aligned block where we migrated from? If yes, 2461fdaf7f5cSVlastimil Babka * flush the pages that were freed, so that they can merge and 2462fdaf7f5cSVlastimil Babka * compact_finished() can detect immediately if allocation 2463fdaf7f5cSVlastimil Babka * would succeed. 2464fdaf7f5cSVlastimil Babka */ 2465566e54e1SMel Gorman if (cc->order > 0 && last_migrated_pfn) { 2466fdaf7f5cSVlastimil Babka unsigned long current_block_start = 246706b6640aSVlastimil Babka block_start_pfn(cc->migrate_pfn, cc->order); 2468fdaf7f5cSVlastimil Babka 2469566e54e1SMel Gorman if (last_migrated_pfn < current_block_start) { 2470b01b2141SIngo Molnar lru_add_drain_cpu_zone(cc->zone); 2471fdaf7f5cSVlastimil Babka /* No more flushing until we migrate again */ 2472566e54e1SMel Gorman last_migrated_pfn = 0; 2473fdaf7f5cSVlastimil Babka } 2474fdaf7f5cSVlastimil Babka } 2475fdaf7f5cSVlastimil Babka 24765e1f0f09SMel Gorman /* Stop if a page has been captured */ 24775e1f0f09SMel Gorman if (capc && capc->page) { 24785e1f0f09SMel Gorman ret = COMPACT_SUCCESS; 24795e1f0f09SMel Gorman break; 24805e1f0f09SMel Gorman } 2481748446bbSMel Gorman } 2482748446bbSMel Gorman 2483f9e35b3bSMel Gorman out: 24846bace090SVlastimil Babka /* 24856bace090SVlastimil Babka * Release free pages and update where the free scanner should restart, 24866bace090SVlastimil Babka * so we don't leave any returned pages behind in the next attempt. 24876bace090SVlastimil Babka */ 24886bace090SVlastimil Babka if (cc->nr_freepages > 0) { 24896bace090SVlastimil Babka unsigned long free_pfn = release_freepages(&cc->freepages); 24906bace090SVlastimil Babka 24916bace090SVlastimil Babka cc->nr_freepages = 0; 24926bace090SVlastimil Babka VM_BUG_ON(free_pfn == 0); 24936bace090SVlastimil Babka /* The cached pfn is always the first in a pageblock */ 249406b6640aSVlastimil Babka free_pfn = pageblock_start_pfn(free_pfn); 24956bace090SVlastimil Babka /* 24966bace090SVlastimil Babka * Only go back, not forward. The cached pfn might have been 24976bace090SVlastimil Babka * already reset to zone end in compact_finished() 24986bace090SVlastimil Babka */ 249940cacbcbSMel Gorman if (free_pfn > cc->zone->compact_cached_free_pfn) 250040cacbcbSMel Gorman cc->zone->compact_cached_free_pfn = free_pfn; 25016bace090SVlastimil Babka } 2502748446bbSMel Gorman 25037f354a54SDavid Rientjes count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned); 25047f354a54SDavid Rientjes count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned); 25057f354a54SDavid Rientjes 2506abd4349fSBaolin Wang trace_mm_compaction_end(cc, start_pfn, end_pfn, sync, ret); 25070eb927c0SMel Gorman 2508748446bbSMel Gorman return ret; 2509748446bbSMel Gorman } 251076ab0f53SMel Gorman 2511ea7ab982SMichal Hocko static enum compact_result compact_zone_order(struct zone *zone, int order, 2512c3486f53SVlastimil Babka gfp_t gfp_mask, enum compact_priority prio, 251397a225e6SJoonsoo Kim unsigned int alloc_flags, int highest_zoneidx, 25145e1f0f09SMel Gorman struct page **capture) 251556de7263SMel Gorman { 2516ea7ab982SMichal Hocko enum compact_result ret; 251756de7263SMel Gorman struct compact_control cc = { 251856de7263SMel Gorman .order = order, 2519dbe2d4e4SMel Gorman .search_order = order, 25206d7ce559SDavid Rientjes .gfp_mask = gfp_mask, 252156de7263SMel Gorman .zone = zone, 2522a5508cd8SVlastimil Babka .mode = (prio == COMPACT_PRIO_ASYNC) ? 2523a5508cd8SVlastimil Babka MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT, 2524ebff3980SVlastimil Babka .alloc_flags = alloc_flags, 252597a225e6SJoonsoo Kim .highest_zoneidx = highest_zoneidx, 2526accf6242SVlastimil Babka .direct_compaction = true, 2527a8e025e5SVlastimil Babka .whole_zone = (prio == MIN_COMPACT_PRIORITY), 25289f7e3387SVlastimil Babka .ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY), 25299f7e3387SVlastimil Babka .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY) 253056de7263SMel Gorman }; 25315e1f0f09SMel Gorman struct capture_control capc = { 25325e1f0f09SMel Gorman .cc = &cc, 25335e1f0f09SMel Gorman .page = NULL, 25345e1f0f09SMel Gorman }; 25355e1f0f09SMel Gorman 2536b9e20f0dSVlastimil Babka /* 2537b9e20f0dSVlastimil Babka * Make sure the structs are really initialized before we expose the 2538b9e20f0dSVlastimil Babka * capture control, in case we are interrupted and the interrupt handler 2539b9e20f0dSVlastimil Babka * frees a page. 2540b9e20f0dSVlastimil Babka */ 2541b9e20f0dSVlastimil Babka barrier(); 2542b9e20f0dSVlastimil Babka WRITE_ONCE(current->capture_control, &capc); 254356de7263SMel Gorman 25445e1f0f09SMel Gorman ret = compact_zone(&cc, &capc); 2545e64c5237SShaohua Li 2546e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.freepages)); 2547e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.migratepages)); 2548e64c5237SShaohua Li 2549b9e20f0dSVlastimil Babka /* 2550b9e20f0dSVlastimil Babka * Make sure we hide capture control first before we read the captured 2551b9e20f0dSVlastimil Babka * page pointer, otherwise an interrupt could free and capture a page 2552b9e20f0dSVlastimil Babka * and we would leak it. 2553b9e20f0dSVlastimil Babka */ 2554b9e20f0dSVlastimil Babka WRITE_ONCE(current->capture_control, NULL); 2555b9e20f0dSVlastimil Babka *capture = READ_ONCE(capc.page); 255606dac2f4SCharan Teja Reddy /* 255706dac2f4SCharan Teja Reddy * Technically, it is also possible that compaction is skipped but 255806dac2f4SCharan Teja Reddy * the page is still captured out of luck(IRQ came and freed the page). 255906dac2f4SCharan Teja Reddy * Returning COMPACT_SUCCESS in such cases helps in properly accounting 256006dac2f4SCharan Teja Reddy * the COMPACT[STALL|FAIL] when compaction is skipped. 256106dac2f4SCharan Teja Reddy */ 256206dac2f4SCharan Teja Reddy if (*capture) 256306dac2f4SCharan Teja Reddy ret = COMPACT_SUCCESS; 25645e1f0f09SMel Gorman 2565e64c5237SShaohua Li return ret; 256656de7263SMel Gorman } 256756de7263SMel Gorman 25685e771905SMel Gorman int sysctl_extfrag_threshold = 500; 25695e771905SMel Gorman 257056de7263SMel Gorman /** 257156de7263SMel Gorman * try_to_compact_pages - Direct compact to satisfy a high-order allocation 257256de7263SMel Gorman * @gfp_mask: The GFP mask of the current allocation 25731a6d53a1SVlastimil Babka * @order: The order of the current allocation 25741a6d53a1SVlastimil Babka * @alloc_flags: The allocation flags of the current allocation 25751a6d53a1SVlastimil Babka * @ac: The context of current allocation 2576112d2d29SYang Shi * @prio: Determines how hard direct compaction should try to succeed 25776467552cSVlastimil Babka * @capture: Pointer to free page created by compaction will be stored here 257856de7263SMel Gorman * 257956de7263SMel Gorman * This is the main entry point for direct page compaction. 258056de7263SMel Gorman */ 2581ea7ab982SMichal Hocko enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, 2582c603844bSMel Gorman unsigned int alloc_flags, const struct alloc_context *ac, 25835e1f0f09SMel Gorman enum compact_priority prio, struct page **capture) 258456de7263SMel Gorman { 2585fe573327SVasily Averin int may_perform_io = (__force int)(gfp_mask & __GFP_IO); 258656de7263SMel Gorman struct zoneref *z; 258756de7263SMel Gorman struct zone *zone; 25881d4746d3SMichal Hocko enum compact_result rc = COMPACT_SKIPPED; 258956de7263SMel Gorman 259073e64c51SMichal Hocko /* 259173e64c51SMichal Hocko * Check if the GFP flags allow compaction - GFP_NOIO is really 259273e64c51SMichal Hocko * tricky context because the migration might require IO 259373e64c51SMichal Hocko */ 259473e64c51SMichal Hocko if (!may_perform_io) 259553853e2dSVlastimil Babka return COMPACT_SKIPPED; 259656de7263SMel Gorman 2597a5508cd8SVlastimil Babka trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio); 2598837d026dSJoonsoo Kim 259956de7263SMel Gorman /* Compact each zone in the list */ 260097a225e6SJoonsoo Kim for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 260197a225e6SJoonsoo Kim ac->highest_zoneidx, ac->nodemask) { 2602ea7ab982SMichal Hocko enum compact_result status; 260356de7263SMel Gorman 2604a8e025e5SVlastimil Babka if (prio > MIN_COMPACT_PRIORITY 2605a8e025e5SVlastimil Babka && compaction_deferred(zone, order)) { 26061d4746d3SMichal Hocko rc = max_t(enum compact_result, COMPACT_DEFERRED, rc); 260753853e2dSVlastimil Babka continue; 26081d4746d3SMichal Hocko } 260953853e2dSVlastimil Babka 2610a5508cd8SVlastimil Babka status = compact_zone_order(zone, order, gfp_mask, prio, 261197a225e6SJoonsoo Kim alloc_flags, ac->highest_zoneidx, capture); 261256de7263SMel Gorman rc = max(status, rc); 261356de7263SMel Gorman 26147ceb009aSVlastimil Babka /* The allocation should succeed, stop compacting */ 26157ceb009aSVlastimil Babka if (status == COMPACT_SUCCESS) { 261653853e2dSVlastimil Babka /* 261753853e2dSVlastimil Babka * We think the allocation will succeed in this zone, 261853853e2dSVlastimil Babka * but it is not certain, hence the false. The caller 261953853e2dSVlastimil Babka * will repeat this with true if allocation indeed 262053853e2dSVlastimil Babka * succeeds in this zone. 262153853e2dSVlastimil Babka */ 262253853e2dSVlastimil Babka compaction_defer_reset(zone, order, false); 26231f9efdefSVlastimil Babka 2624c3486f53SVlastimil Babka break; 26251f9efdefSVlastimil Babka } 26261f9efdefSVlastimil Babka 2627a5508cd8SVlastimil Babka if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE || 2628c3486f53SVlastimil Babka status == COMPACT_PARTIAL_SKIPPED)) 262953853e2dSVlastimil Babka /* 263053853e2dSVlastimil Babka * We think that allocation won't succeed in this zone 263153853e2dSVlastimil Babka * so we defer compaction there. If it ends up 263253853e2dSVlastimil Babka * succeeding after all, it will be reset. 263353853e2dSVlastimil Babka */ 263453853e2dSVlastimil Babka defer_compaction(zone, order); 26351f9efdefSVlastimil Babka 26361f9efdefSVlastimil Babka /* 26371f9efdefSVlastimil Babka * We might have stopped compacting due to need_resched() in 26381f9efdefSVlastimil Babka * async compaction, or due to a fatal signal detected. In that 2639c3486f53SVlastimil Babka * case do not try further zones 26401f9efdefSVlastimil Babka */ 2641c3486f53SVlastimil Babka if ((prio == COMPACT_PRIO_ASYNC && need_resched()) 2642c3486f53SVlastimil Babka || fatal_signal_pending(current)) 26431f9efdefSVlastimil Babka break; 26441f9efdefSVlastimil Babka } 26451f9efdefSVlastimil Babka 264656de7263SMel Gorman return rc; 264756de7263SMel Gorman } 264856de7263SMel Gorman 2649facdaa91SNitin Gupta /* 2650facdaa91SNitin Gupta * Compact all zones within a node till each zone's fragmentation score 2651facdaa91SNitin Gupta * reaches within proactive compaction thresholds (as determined by the 2652facdaa91SNitin Gupta * proactiveness tunable). 2653facdaa91SNitin Gupta * 2654facdaa91SNitin Gupta * It is possible that the function returns before reaching score targets 2655facdaa91SNitin Gupta * due to various back-off conditions, such as, contention on per-node or 2656facdaa91SNitin Gupta * per-zone locks. 2657facdaa91SNitin Gupta */ 2658facdaa91SNitin Gupta static void proactive_compact_node(pg_data_t *pgdat) 2659facdaa91SNitin Gupta { 2660facdaa91SNitin Gupta int zoneid; 2661facdaa91SNitin Gupta struct zone *zone; 2662facdaa91SNitin Gupta struct compact_control cc = { 2663facdaa91SNitin Gupta .order = -1, 2664facdaa91SNitin Gupta .mode = MIGRATE_SYNC_LIGHT, 2665facdaa91SNitin Gupta .ignore_skip_hint = true, 2666facdaa91SNitin Gupta .whole_zone = true, 2667facdaa91SNitin Gupta .gfp_mask = GFP_KERNEL, 2668facdaa91SNitin Gupta .proactive_compaction = true, 2669facdaa91SNitin Gupta }; 2670facdaa91SNitin Gupta 2671facdaa91SNitin Gupta for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 2672facdaa91SNitin Gupta zone = &pgdat->node_zones[zoneid]; 2673facdaa91SNitin Gupta if (!populated_zone(zone)) 2674facdaa91SNitin Gupta continue; 2675facdaa91SNitin Gupta 2676facdaa91SNitin Gupta cc.zone = zone; 2677facdaa91SNitin Gupta 2678facdaa91SNitin Gupta compact_zone(&cc, NULL); 2679facdaa91SNitin Gupta 2680facdaa91SNitin Gupta VM_BUG_ON(!list_empty(&cc.freepages)); 2681facdaa91SNitin Gupta VM_BUG_ON(!list_empty(&cc.migratepages)); 2682facdaa91SNitin Gupta } 2683facdaa91SNitin Gupta } 268456de7263SMel Gorman 268576ab0f53SMel Gorman /* Compact all zones within a node */ 26867103f16dSAndrew Morton static void compact_node(int nid) 26877be62de9SRik van Riel { 2688791cae96SVlastimil Babka pg_data_t *pgdat = NODE_DATA(nid); 2689791cae96SVlastimil Babka int zoneid; 2690791cae96SVlastimil Babka struct zone *zone; 26917be62de9SRik van Riel struct compact_control cc = { 26927be62de9SRik van Riel .order = -1, 2693e0b9daebSDavid Rientjes .mode = MIGRATE_SYNC, 269491ca9186SDavid Rientjes .ignore_skip_hint = true, 269506ed2998SVlastimil Babka .whole_zone = true, 269673e64c51SMichal Hocko .gfp_mask = GFP_KERNEL, 26977be62de9SRik van Riel }; 26987be62de9SRik van Riel 2699791cae96SVlastimil Babka 2700791cae96SVlastimil Babka for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 2701791cae96SVlastimil Babka 2702791cae96SVlastimil Babka zone = &pgdat->node_zones[zoneid]; 2703791cae96SVlastimil Babka if (!populated_zone(zone)) 2704791cae96SVlastimil Babka continue; 2705791cae96SVlastimil Babka 2706791cae96SVlastimil Babka cc.zone = zone; 2707791cae96SVlastimil Babka 27085e1f0f09SMel Gorman compact_zone(&cc, NULL); 2709791cae96SVlastimil Babka 2710791cae96SVlastimil Babka VM_BUG_ON(!list_empty(&cc.freepages)); 2711791cae96SVlastimil Babka VM_BUG_ON(!list_empty(&cc.migratepages)); 2712791cae96SVlastimil Babka } 27137be62de9SRik van Riel } 27147be62de9SRik van Riel 271576ab0f53SMel Gorman /* Compact all nodes in the system */ 27167964c06dSJason Liu static void compact_nodes(void) 271776ab0f53SMel Gorman { 271876ab0f53SMel Gorman int nid; 271976ab0f53SMel Gorman 27208575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 27218575ec29SHugh Dickins lru_add_drain_all(); 27228575ec29SHugh Dickins 272376ab0f53SMel Gorman for_each_online_node(nid) 272476ab0f53SMel Gorman compact_node(nid); 272576ab0f53SMel Gorman } 272676ab0f53SMel Gorman 2727fec4eb2cSYaowei Bai /* 2728facdaa91SNitin Gupta * Tunable for proactive compaction. It determines how 2729facdaa91SNitin Gupta * aggressively the kernel should compact memory in the 2730facdaa91SNitin Gupta * background. It takes values in the range [0, 100]. 2731facdaa91SNitin Gupta */ 2732d34c0a75SNitin Gupta unsigned int __read_mostly sysctl_compaction_proactiveness = 20; 2733facdaa91SNitin Gupta 273465d759c8SCharan Teja Reddy int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int write, 273565d759c8SCharan Teja Reddy void *buffer, size_t *length, loff_t *ppos) 273665d759c8SCharan Teja Reddy { 273765d759c8SCharan Teja Reddy int rc, nid; 273865d759c8SCharan Teja Reddy 273965d759c8SCharan Teja Reddy rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 274065d759c8SCharan Teja Reddy if (rc) 274165d759c8SCharan Teja Reddy return rc; 274265d759c8SCharan Teja Reddy 274365d759c8SCharan Teja Reddy if (write && sysctl_compaction_proactiveness) { 274465d759c8SCharan Teja Reddy for_each_online_node(nid) { 274565d759c8SCharan Teja Reddy pg_data_t *pgdat = NODE_DATA(nid); 274665d759c8SCharan Teja Reddy 274765d759c8SCharan Teja Reddy if (pgdat->proactive_compact_trigger) 274865d759c8SCharan Teja Reddy continue; 274965d759c8SCharan Teja Reddy 275065d759c8SCharan Teja Reddy pgdat->proactive_compact_trigger = true; 275165d759c8SCharan Teja Reddy wake_up_interruptible(&pgdat->kcompactd_wait); 275265d759c8SCharan Teja Reddy } 275365d759c8SCharan Teja Reddy } 275465d759c8SCharan Teja Reddy 275565d759c8SCharan Teja Reddy return 0; 275665d759c8SCharan Teja Reddy } 275765d759c8SCharan Teja Reddy 2758facdaa91SNitin Gupta /* 2759fec4eb2cSYaowei Bai * This is the entry point for compacting all nodes via 2760fec4eb2cSYaowei Bai * /proc/sys/vm/compact_memory 2761fec4eb2cSYaowei Bai */ 276276ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write, 276332927393SChristoph Hellwig void *buffer, size_t *length, loff_t *ppos) 276476ab0f53SMel Gorman { 276576ab0f53SMel Gorman if (write) 27667964c06dSJason Liu compact_nodes(); 276776ab0f53SMel Gorman 276876ab0f53SMel Gorman return 0; 276976ab0f53SMel Gorman } 2770ed4a6d7fSMel Gorman 2771ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 277217adb230SYueHaibing static ssize_t compact_store(struct device *dev, 277310fbcf4cSKay Sievers struct device_attribute *attr, 2774ed4a6d7fSMel Gorman const char *buf, size_t count) 2775ed4a6d7fSMel Gorman { 27768575ec29SHugh Dickins int nid = dev->id; 27778575ec29SHugh Dickins 27788575ec29SHugh Dickins if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { 27798575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 27808575ec29SHugh Dickins lru_add_drain_all(); 27818575ec29SHugh Dickins 27828575ec29SHugh Dickins compact_node(nid); 27838575ec29SHugh Dickins } 2784ed4a6d7fSMel Gorman 2785ed4a6d7fSMel Gorman return count; 2786ed4a6d7fSMel Gorman } 278717adb230SYueHaibing static DEVICE_ATTR_WO(compact); 2788ed4a6d7fSMel Gorman 2789ed4a6d7fSMel Gorman int compaction_register_node(struct node *node) 2790ed4a6d7fSMel Gorman { 279110fbcf4cSKay Sievers return device_create_file(&node->dev, &dev_attr_compact); 2792ed4a6d7fSMel Gorman } 2793ed4a6d7fSMel Gorman 2794ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node) 2795ed4a6d7fSMel Gorman { 279610fbcf4cSKay Sievers return device_remove_file(&node->dev, &dev_attr_compact); 2797ed4a6d7fSMel Gorman } 2798ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 2799ff9543fdSMichal Nazarewicz 2800698b1b30SVlastimil Babka static inline bool kcompactd_work_requested(pg_data_t *pgdat) 2801698b1b30SVlastimil Babka { 280265d759c8SCharan Teja Reddy return pgdat->kcompactd_max_order > 0 || kthread_should_stop() || 280365d759c8SCharan Teja Reddy pgdat->proactive_compact_trigger; 2804698b1b30SVlastimil Babka } 2805698b1b30SVlastimil Babka 2806698b1b30SVlastimil Babka static bool kcompactd_node_suitable(pg_data_t *pgdat) 2807698b1b30SVlastimil Babka { 2808698b1b30SVlastimil Babka int zoneid; 2809698b1b30SVlastimil Babka struct zone *zone; 281097a225e6SJoonsoo Kim enum zone_type highest_zoneidx = pgdat->kcompactd_highest_zoneidx; 2811698b1b30SVlastimil Babka 281297a225e6SJoonsoo Kim for (zoneid = 0; zoneid <= highest_zoneidx; zoneid++) { 2813698b1b30SVlastimil Babka zone = &pgdat->node_zones[zoneid]; 2814698b1b30SVlastimil Babka 2815698b1b30SVlastimil Babka if (!populated_zone(zone)) 2816698b1b30SVlastimil Babka continue; 2817698b1b30SVlastimil Babka 2818698b1b30SVlastimil Babka if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0, 281997a225e6SJoonsoo Kim highest_zoneidx) == COMPACT_CONTINUE) 2820698b1b30SVlastimil Babka return true; 2821698b1b30SVlastimil Babka } 2822698b1b30SVlastimil Babka 2823698b1b30SVlastimil Babka return false; 2824698b1b30SVlastimil Babka } 2825698b1b30SVlastimil Babka 2826698b1b30SVlastimil Babka static void kcompactd_do_work(pg_data_t *pgdat) 2827698b1b30SVlastimil Babka { 2828698b1b30SVlastimil Babka /* 2829698b1b30SVlastimil Babka * With no special task, compact all zones so that a page of requested 2830698b1b30SVlastimil Babka * order is allocatable. 2831698b1b30SVlastimil Babka */ 2832698b1b30SVlastimil Babka int zoneid; 2833698b1b30SVlastimil Babka struct zone *zone; 2834698b1b30SVlastimil Babka struct compact_control cc = { 2835698b1b30SVlastimil Babka .order = pgdat->kcompactd_max_order, 2836dbe2d4e4SMel Gorman .search_order = pgdat->kcompactd_max_order, 283797a225e6SJoonsoo Kim .highest_zoneidx = pgdat->kcompactd_highest_zoneidx, 2838698b1b30SVlastimil Babka .mode = MIGRATE_SYNC_LIGHT, 2839a0647dc9SDavid Rientjes .ignore_skip_hint = false, 284073e64c51SMichal Hocko .gfp_mask = GFP_KERNEL, 2841698b1b30SVlastimil Babka }; 2842698b1b30SVlastimil Babka trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, 284397a225e6SJoonsoo Kim cc.highest_zoneidx); 28447f354a54SDavid Rientjes count_compact_event(KCOMPACTD_WAKE); 2845698b1b30SVlastimil Babka 284697a225e6SJoonsoo Kim for (zoneid = 0; zoneid <= cc.highest_zoneidx; zoneid++) { 2847698b1b30SVlastimil Babka int status; 2848698b1b30SVlastimil Babka 2849698b1b30SVlastimil Babka zone = &pgdat->node_zones[zoneid]; 2850698b1b30SVlastimil Babka if (!populated_zone(zone)) 2851698b1b30SVlastimil Babka continue; 2852698b1b30SVlastimil Babka 2853698b1b30SVlastimil Babka if (compaction_deferred(zone, cc.order)) 2854698b1b30SVlastimil Babka continue; 2855698b1b30SVlastimil Babka 2856698b1b30SVlastimil Babka if (compaction_suitable(zone, cc.order, 0, zoneid) != 2857698b1b30SVlastimil Babka COMPACT_CONTINUE) 2858698b1b30SVlastimil Babka continue; 2859698b1b30SVlastimil Babka 2860172400c6SVlastimil Babka if (kthread_should_stop()) 2861172400c6SVlastimil Babka return; 2862a94b5252SYafang Shao 2863a94b5252SYafang Shao cc.zone = zone; 28645e1f0f09SMel Gorman status = compact_zone(&cc, NULL); 2865698b1b30SVlastimil Babka 28667ceb009aSVlastimil Babka if (status == COMPACT_SUCCESS) { 2867698b1b30SVlastimil Babka compaction_defer_reset(zone, cc.order, false); 2868c8f7de0bSMichal Hocko } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) { 2869698b1b30SVlastimil Babka /* 2870bc3106b2SDavid Rientjes * Buddy pages may become stranded on pcps that could 2871bc3106b2SDavid Rientjes * otherwise coalesce on the zone's free area for 2872bc3106b2SDavid Rientjes * order >= cc.order. This is ratelimited by the 2873bc3106b2SDavid Rientjes * upcoming deferral. 2874bc3106b2SDavid Rientjes */ 2875bc3106b2SDavid Rientjes drain_all_pages(zone); 2876bc3106b2SDavid Rientjes 2877bc3106b2SDavid Rientjes /* 2878698b1b30SVlastimil Babka * We use sync migration mode here, so we defer like 2879698b1b30SVlastimil Babka * sync direct compaction does. 2880698b1b30SVlastimil Babka */ 2881698b1b30SVlastimil Babka defer_compaction(zone, cc.order); 2882698b1b30SVlastimil Babka } 2883698b1b30SVlastimil Babka 28847f354a54SDavid Rientjes count_compact_events(KCOMPACTD_MIGRATE_SCANNED, 28857f354a54SDavid Rientjes cc.total_migrate_scanned); 28867f354a54SDavid Rientjes count_compact_events(KCOMPACTD_FREE_SCANNED, 28877f354a54SDavid Rientjes cc.total_free_scanned); 28887f354a54SDavid Rientjes 2889698b1b30SVlastimil Babka VM_BUG_ON(!list_empty(&cc.freepages)); 2890698b1b30SVlastimil Babka VM_BUG_ON(!list_empty(&cc.migratepages)); 2891698b1b30SVlastimil Babka } 2892698b1b30SVlastimil Babka 2893698b1b30SVlastimil Babka /* 2894698b1b30SVlastimil Babka * Regardless of success, we are done until woken up next. But remember 289597a225e6SJoonsoo Kim * the requested order/highest_zoneidx in case it was higher/tighter 289697a225e6SJoonsoo Kim * than our current ones 2897698b1b30SVlastimil Babka */ 2898698b1b30SVlastimil Babka if (pgdat->kcompactd_max_order <= cc.order) 2899698b1b30SVlastimil Babka pgdat->kcompactd_max_order = 0; 290097a225e6SJoonsoo Kim if (pgdat->kcompactd_highest_zoneidx >= cc.highest_zoneidx) 290197a225e6SJoonsoo Kim pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1; 2902698b1b30SVlastimil Babka } 2903698b1b30SVlastimil Babka 290497a225e6SJoonsoo Kim void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx) 2905698b1b30SVlastimil Babka { 2906698b1b30SVlastimil Babka if (!order) 2907698b1b30SVlastimil Babka return; 2908698b1b30SVlastimil Babka 2909698b1b30SVlastimil Babka if (pgdat->kcompactd_max_order < order) 2910698b1b30SVlastimil Babka pgdat->kcompactd_max_order = order; 2911698b1b30SVlastimil Babka 291297a225e6SJoonsoo Kim if (pgdat->kcompactd_highest_zoneidx > highest_zoneidx) 291397a225e6SJoonsoo Kim pgdat->kcompactd_highest_zoneidx = highest_zoneidx; 2914698b1b30SVlastimil Babka 29156818600fSDavidlohr Bueso /* 29166818600fSDavidlohr Bueso * Pairs with implicit barrier in wait_event_freezable() 29176818600fSDavidlohr Bueso * such that wakeups are not missed. 29186818600fSDavidlohr Bueso */ 29196818600fSDavidlohr Bueso if (!wq_has_sleeper(&pgdat->kcompactd_wait)) 2920698b1b30SVlastimil Babka return; 2921698b1b30SVlastimil Babka 2922698b1b30SVlastimil Babka if (!kcompactd_node_suitable(pgdat)) 2923698b1b30SVlastimil Babka return; 2924698b1b30SVlastimil Babka 2925698b1b30SVlastimil Babka trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order, 292697a225e6SJoonsoo Kim highest_zoneidx); 2927698b1b30SVlastimil Babka wake_up_interruptible(&pgdat->kcompactd_wait); 2928698b1b30SVlastimil Babka } 2929698b1b30SVlastimil Babka 2930698b1b30SVlastimil Babka /* 2931698b1b30SVlastimil Babka * The background compaction daemon, started as a kernel thread 2932698b1b30SVlastimil Babka * from the init process. 2933698b1b30SVlastimil Babka */ 2934698b1b30SVlastimil Babka static int kcompactd(void *p) 2935698b1b30SVlastimil Babka { 2936698b1b30SVlastimil Babka pg_data_t *pgdat = (pg_data_t *)p; 2937698b1b30SVlastimil Babka struct task_struct *tsk = current; 2938e1e92bfaSCharan Teja Reddy long default_timeout = msecs_to_jiffies(HPAGE_FRAG_CHECK_INTERVAL_MSEC); 2939e1e92bfaSCharan Teja Reddy long timeout = default_timeout; 2940698b1b30SVlastimil Babka 2941698b1b30SVlastimil Babka const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 2942698b1b30SVlastimil Babka 2943698b1b30SVlastimil Babka if (!cpumask_empty(cpumask)) 2944698b1b30SVlastimil Babka set_cpus_allowed_ptr(tsk, cpumask); 2945698b1b30SVlastimil Babka 2946698b1b30SVlastimil Babka set_freezable(); 2947698b1b30SVlastimil Babka 2948698b1b30SVlastimil Babka pgdat->kcompactd_max_order = 0; 294997a225e6SJoonsoo Kim pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1; 2950698b1b30SVlastimil Babka 2951698b1b30SVlastimil Babka while (!kthread_should_stop()) { 2952eb414681SJohannes Weiner unsigned long pflags; 2953eb414681SJohannes Weiner 295465d759c8SCharan Teja Reddy /* 295565d759c8SCharan Teja Reddy * Avoid the unnecessary wakeup for proactive compaction 295665d759c8SCharan Teja Reddy * when it is disabled. 295765d759c8SCharan Teja Reddy */ 295865d759c8SCharan Teja Reddy if (!sysctl_compaction_proactiveness) 295965d759c8SCharan Teja Reddy timeout = MAX_SCHEDULE_TIMEOUT; 2960698b1b30SVlastimil Babka trace_mm_compaction_kcompactd_sleep(pgdat->node_id); 2961facdaa91SNitin Gupta if (wait_event_freezable_timeout(pgdat->kcompactd_wait, 296265d759c8SCharan Teja Reddy kcompactd_work_requested(pgdat), timeout) && 296365d759c8SCharan Teja Reddy !pgdat->proactive_compact_trigger) { 2964698b1b30SVlastimil Babka 2965eb414681SJohannes Weiner psi_memstall_enter(&pflags); 2966698b1b30SVlastimil Babka kcompactd_do_work(pgdat); 2967eb414681SJohannes Weiner psi_memstall_leave(&pflags); 2968e1e92bfaSCharan Teja Reddy /* 2969e1e92bfaSCharan Teja Reddy * Reset the timeout value. The defer timeout from 2970e1e92bfaSCharan Teja Reddy * proactive compaction is lost here but that is fine 2971e1e92bfaSCharan Teja Reddy * as the condition of the zone changing substantionally 2972e1e92bfaSCharan Teja Reddy * then carrying on with the previous defer interval is 2973e1e92bfaSCharan Teja Reddy * not useful. 2974e1e92bfaSCharan Teja Reddy */ 2975e1e92bfaSCharan Teja Reddy timeout = default_timeout; 2976facdaa91SNitin Gupta continue; 2977facdaa91SNitin Gupta } 2978facdaa91SNitin Gupta 2979e1e92bfaSCharan Teja Reddy /* 2980e1e92bfaSCharan Teja Reddy * Start the proactive work with default timeout. Based 2981e1e92bfaSCharan Teja Reddy * on the fragmentation score, this timeout is updated. 2982e1e92bfaSCharan Teja Reddy */ 2983e1e92bfaSCharan Teja Reddy timeout = default_timeout; 2984facdaa91SNitin Gupta if (should_proactive_compact_node(pgdat)) { 2985facdaa91SNitin Gupta unsigned int prev_score, score; 2986facdaa91SNitin Gupta 2987facdaa91SNitin Gupta prev_score = fragmentation_score_node(pgdat); 2988facdaa91SNitin Gupta proactive_compact_node(pgdat); 2989facdaa91SNitin Gupta score = fragmentation_score_node(pgdat); 2990facdaa91SNitin Gupta /* 2991facdaa91SNitin Gupta * Defer proactive compaction if the fragmentation 2992facdaa91SNitin Gupta * score did not go down i.e. no progress made. 2993facdaa91SNitin Gupta */ 2994e1e92bfaSCharan Teja Reddy if (unlikely(score >= prev_score)) 2995e1e92bfaSCharan Teja Reddy timeout = 2996e1e92bfaSCharan Teja Reddy default_timeout << COMPACT_MAX_DEFER_SHIFT; 2997facdaa91SNitin Gupta } 299865d759c8SCharan Teja Reddy if (unlikely(pgdat->proactive_compact_trigger)) 299965d759c8SCharan Teja Reddy pgdat->proactive_compact_trigger = false; 3000698b1b30SVlastimil Babka } 3001698b1b30SVlastimil Babka 3002698b1b30SVlastimil Babka return 0; 3003698b1b30SVlastimil Babka } 3004698b1b30SVlastimil Babka 3005698b1b30SVlastimil Babka /* 3006698b1b30SVlastimil Babka * This kcompactd start function will be called by init and node-hot-add. 3007698b1b30SVlastimil Babka * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added. 3008698b1b30SVlastimil Babka */ 3009024c61eaSMiaohe Lin void kcompactd_run(int nid) 3010698b1b30SVlastimil Babka { 3011698b1b30SVlastimil Babka pg_data_t *pgdat = NODE_DATA(nid); 3012698b1b30SVlastimil Babka 3013698b1b30SVlastimil Babka if (pgdat->kcompactd) 3014024c61eaSMiaohe Lin return; 3015698b1b30SVlastimil Babka 3016698b1b30SVlastimil Babka pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid); 3017698b1b30SVlastimil Babka if (IS_ERR(pgdat->kcompactd)) { 3018698b1b30SVlastimil Babka pr_err("Failed to start kcompactd on node %d\n", nid); 3019698b1b30SVlastimil Babka pgdat->kcompactd = NULL; 3020698b1b30SVlastimil Babka } 3021698b1b30SVlastimil Babka } 3022698b1b30SVlastimil Babka 3023698b1b30SVlastimil Babka /* 3024698b1b30SVlastimil Babka * Called by memory hotplug when all memory in a node is offlined. Caller must 3025e8da368aSYun-Ze Li * be holding mem_hotplug_begin/done(). 3026698b1b30SVlastimil Babka */ 3027698b1b30SVlastimil Babka void kcompactd_stop(int nid) 3028698b1b30SVlastimil Babka { 3029698b1b30SVlastimil Babka struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd; 3030698b1b30SVlastimil Babka 3031698b1b30SVlastimil Babka if (kcompactd) { 3032698b1b30SVlastimil Babka kthread_stop(kcompactd); 3033698b1b30SVlastimil Babka NODE_DATA(nid)->kcompactd = NULL; 3034698b1b30SVlastimil Babka } 3035698b1b30SVlastimil Babka } 3036698b1b30SVlastimil Babka 3037698b1b30SVlastimil Babka /* 3038698b1b30SVlastimil Babka * It's optimal to keep kcompactd on the same CPUs as their memory, but 3039698b1b30SVlastimil Babka * not required for correctness. So if the last cpu in a node goes 3040698b1b30SVlastimil Babka * away, we get changed to run anywhere: as the first one comes back, 3041698b1b30SVlastimil Babka * restore their cpu bindings. 3042698b1b30SVlastimil Babka */ 3043e46b1db2SAnna-Maria Gleixner static int kcompactd_cpu_online(unsigned int cpu) 3044698b1b30SVlastimil Babka { 3045698b1b30SVlastimil Babka int nid; 3046698b1b30SVlastimil Babka 3047698b1b30SVlastimil Babka for_each_node_state(nid, N_MEMORY) { 3048698b1b30SVlastimil Babka pg_data_t *pgdat = NODE_DATA(nid); 3049698b1b30SVlastimil Babka const struct cpumask *mask; 3050698b1b30SVlastimil Babka 3051698b1b30SVlastimil Babka mask = cpumask_of_node(pgdat->node_id); 3052698b1b30SVlastimil Babka 3053698b1b30SVlastimil Babka if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) 3054698b1b30SVlastimil Babka /* One of our CPUs online: restore mask */ 30553109de30SMiaohe Lin if (pgdat->kcompactd) 3056698b1b30SVlastimil Babka set_cpus_allowed_ptr(pgdat->kcompactd, mask); 3057698b1b30SVlastimil Babka } 3058e46b1db2SAnna-Maria Gleixner return 0; 3059698b1b30SVlastimil Babka } 3060698b1b30SVlastimil Babka 3061698b1b30SVlastimil Babka static int __init kcompactd_init(void) 3062698b1b30SVlastimil Babka { 3063698b1b30SVlastimil Babka int nid; 3064e46b1db2SAnna-Maria Gleixner int ret; 3065e46b1db2SAnna-Maria Gleixner 3066e46b1db2SAnna-Maria Gleixner ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 3067e46b1db2SAnna-Maria Gleixner "mm/compaction:online", 3068e46b1db2SAnna-Maria Gleixner kcompactd_cpu_online, NULL); 3069e46b1db2SAnna-Maria Gleixner if (ret < 0) { 3070e46b1db2SAnna-Maria Gleixner pr_err("kcompactd: failed to register hotplug callbacks.\n"); 3071e46b1db2SAnna-Maria Gleixner return ret; 3072e46b1db2SAnna-Maria Gleixner } 3073698b1b30SVlastimil Babka 3074698b1b30SVlastimil Babka for_each_node_state(nid, N_MEMORY) 3075698b1b30SVlastimil Babka kcompactd_run(nid); 3076698b1b30SVlastimil Babka return 0; 3077698b1b30SVlastimil Babka } 3078698b1b30SVlastimil Babka subsys_initcall(kcompactd_init) 3079698b1b30SVlastimil Babka 3080ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION */ 3081